5 # The contents of this file are subject to the terms of the
6 # Common Development and Distribution License (the "License").
7 # You may not use this file except in compliance with the License.
9 # You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 # or http://www.opensolaris.org/os/licensing.
11 # See the License for the specific language governing permissions
12 # and limitations under the License.
14 # When distributing Covered Code, include this CDDL HEADER in each
15 # file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 # If applicable, add the following below this CDDL HEADER, with the
17 # fields enclosed by brackets "[]" replaced with your own identifying
18 # information: Portions Copyright [yyyy] [name of copyright owner]
26 from bisect import insort, bisect_left, bisect_right
32 # Commonly used command paths
33 PFCMD = "/usr/bin/pfexec"
34 ZFSCMD = "/usr/sbin/zfs"
35 ZPOOLCMD = "/usr/sbin/zpool"
38 class Datasets(Exception):
40 Container class for all zfs datasets. Maintains a centralised
41 list of datasets (generated on demand) and accessor methods.
42 Also allows clients to notify when a refresh might be necessary.
44 # Class wide instead of per-instance in order to avoid duplication
49 # Mutex locks to prevent concurrent writes to above class wide
51 _filesystemslock = threading.Lock()
52 _volumeslock = threading.Lock()
53 snapshotslock = threading.Lock()
55 def create_auto_snapshot_set(self, label, tag = None):
57 Create a complete set of snapshots as if this were
58 for a standard zfs-auto-snapshot operation.
62 A label to apply to the snapshot name. Cannot be None.
64 A string indicating one of the standard auto-snapshot schedules
65 tags to check (eg. "frequent" for will map to the tag:
66 com.sun:auto-snapshot:frequent). If specified as a zfs property
67 on a zfs dataset, the property corresponding to the tag will
68 override the wildcard property: "com.sun:auto-snapshot"
78 # Get auto-snap property in two passes. First with the schedule
79 # specific tag override value, then with the general property value
80 cmd = [ZFSCMD, "list", "-H", "-t", "filesystem,volume",
81 "-o", "name,com.sun:auto-snapshot", "-s", "name"]
83 overrideprop = "com.sun:auto-snapshot:" + tag
84 scmd = [ZFSCMD, "list", "-H", "-t", "filesystem,volume",
85 "-o", "name," + overrideprop, "-s", "name"]
86 outdata,errdata = util.run_command(scmd)
87 for line in outdata.rstrip().split('\n'):
89 # Skip over unset values.
92 # Add to everything list. This is used later
93 # for identifying parents/children of a given
94 # filesystem or volume.
95 everything.append(line[0])
97 included.append(line[0])
98 elif line[1] == "false":
99 excluded.append(line[0])
100 # Now use the general property. If no value
101 # was set in the first pass, we set it here.
102 outdata,errdata = util.run_command(cmd)
103 for line in outdata.rstrip().split('\n'):
105 idx = bisect_right(everything, line[0])
106 if len(everything) == 0 or \
107 everything[idx-1] != line[0]:
108 # Dataset is neither included nor excluded so far
111 everything.insert(idx, line[0])
112 if line[1] == "true":
113 included.insert(0, line[0])
114 elif line[1] == "false":
115 excluded.append(line[0])
117 # Now figure out what can be recursively snapshotted and what
118 # must be singly snapshotted. Single snapshot restrictions apply
119 # to those datasets who have a child in the excluded list.
120 # 'included' is sorted in reverse alphabetical order.
121 for datasetname in included:
122 excludedchild = False
123 idx = bisect_right(everything, datasetname)
124 children = [name for name in everything[idx:] if \
125 name.find(datasetname) == 0]
126 for child in children:
127 idx = bisect_left(excluded, child)
128 if excluded[idx] == child:
130 single.append(datasetname)
132 if excludedchild == False:
133 # We want recursive list sorted in alphabetical order
134 # so insert instead of append to the list.
135 recursive.insert(0, datasetname)
137 for datasetname in recursive:
138 parts = datasetname.rsplit('/', 1)
140 if parent == datasetname:
141 # Root filesystem of the Zpool, so
142 # this can't be inherited and must be
144 finalrecursive.append(datasetname)
146 idx = bisect_right(recursive, parent)
147 if len(recursive) > 0 and \
148 recursive[idx-1] == parent:
149 # Parent already marked for recursive snapshot: so skip
152 finalrecursive.append(datasetname)
154 for name in finalrecursive:
155 dataset = ReadWritableDataset(name)
156 dataset.create_snapshot(label, True)
158 dataset = ReadWritableDataset(name)
159 dataset.create_snapshot(label, False)
161 def list_auto_snapshot_sets(self, tag = None):
163 Returns a list of zfs filesystems and volumes tagged with
164 the "com.sun:auto-snapshot" property set to "true", either
165 set locally or inherited. Snapshots are excluded from the
170 A string indicating one of the standard auto-snapshot schedules
171 tags to check (eg. "frequent" will map to the tag:
172 com.sun:auto-snapshot:frequent). If specified as a zfs property
173 on a zfs dataset, the property corresponding to the tag will
174 override the wildcard property: "com.sun:auto-snapshot"
177 #Get auto-snap property in two passes. First with the global
178 #value, then overriding with the label/schedule specific value
183 cmd = [ZFSCMD, "list", "-H", "-t", "filesystem,volume",
184 "-o", "name,com.sun:auto-snapshot", "-s", "name"]
186 overrideprop = "com.sun:auto-snapshot:" + tag
187 scmd = [ZFSCMD, "list", "-H", "-t", "filesystem,volume",
188 "-o", "name," + overrideprop, "-s", "name"]
189 outdata,errdata = util.run_command(scmd)
190 for line in outdata.rstrip().split('\n'):
192 if line[1] == "true":
193 included.append(line[0])
194 elif line[1] == "false":
195 excluded.append(line[0])
196 outdata,errdata = util.run_command(cmd)
197 for line in outdata.rstrip().split('\n'):
199 # Only set values that aren't already set. Don't override
201 included.index(line[0])
205 excluded.index(line[0])
208 # Dataset is not listed in either list.
209 if line[1] == "true":
210 included.append(line[0])
213 def list_filesystems(self, pattern = None):
215 List pattern matching filesystems sorted by name.
218 pattern -- Filter according to pattern (default None)
221 # Need to first ensure no other thread is trying to
222 # build this list at the same time.
223 Datasets._filesystemslock.acquire()
224 if Datasets.filesystems == None:
225 Datasets.filesystems = []
226 cmd = [ZFSCMD, "list", "-H", "-t", "filesystem", \
227 "-o", "name,mountpoint", "-s", "name"]
229 p = subprocess.Popen(cmd,
230 stdout=subprocess.PIPE,
231 stderr=subprocess.PIPE,
233 outdata,errdata = p.communicate()
235 except OSError, message:
236 raise RuntimeError, "%s subprocess error:\n %s" % \
239 Datasets._filesystemslock.release()
240 raise RuntimeError, '%s failed with exit code %d\n%s' % \
241 (str(cmd), err, errdata)
242 for line in outdata.rstrip().split('\n'):
243 line = line.rstrip().split()
244 Datasets.filesystems.append([line[0], line[1]])
245 Datasets._filesystemslock.release()
248 filesystems = Datasets.filesystems[:]
250 # Regular expression pattern to match "pattern" parameter.
251 regexpattern = ".*%s.*" % pattern
252 patternobj = re.compile(regexpattern)
254 for fsname,fsmountpoint in Datasets.filesystems:
255 patternmatchobj = re.match(patternobj, fsname)
256 if patternmatchobj != None:
257 filesystems.append(fsname, fsmountpoint)
260 def list_volumes(self, pattern = None):
262 List pattern matching volumes sorted by name.
265 pattern -- Filter according to pattern (default None)
268 Datasets._volumeslock.acquire()
269 if Datasets.volumes == None:
270 Datasets.volumes = []
271 cmd = [ZFSCMD, "list", "-H", "-t", "volume", \
272 "-o", "name", "-s", "name"]
274 p = subprocess.Popen(cmd,
275 stdout=subprocess.PIPE,
276 stderr=subprocess.PIPE,
278 outdata,errdata = p.communicate()
280 except OSError, message:
281 raise RuntimeError, "%s subprocess error:\n %s" % \
284 Datasets._volumeslock.release()
285 raise RuntimeError, '%s failed with exit code %d\n%s' % \
286 (str(cmd), err, errdata)
287 for line in outdata.rstrip().split('\n'):
288 Datasets.volumes.append(line.rstrip())
289 Datasets._volumeslock.release()
292 volumes = Datasets.volumes[:]
294 # Regular expression pattern to match "pattern" parameter.
295 regexpattern = ".*%s.*" % pattern
296 patternobj = re.compile(regexpattern)
298 for volname in Datasets.volumes:
299 patternmatchobj = re.match(patternobj, volname)
300 if patternmatchobj != None:
301 volumes.append(volname)
304 def list_snapshots(self, pattern = None):
306 List pattern matching snapshots sorted by creation date.
310 pattern -- Filter according to pattern (default None)
313 Datasets.snapshotslock.acquire()
314 if Datasets.snapshots == None:
315 Datasets.snapshots = []
317 cmd = [ZFSCMD, "get", "-H", "-p", "-o", "value,name", "creation"]
319 p = subprocess.Popen(cmd,
320 stdout=subprocess.PIPE,
321 stderr=subprocess.PIPE,
323 outdata,errdata = p.communicate()
325 except OSError, message:
326 Datasets.snapshotslock.release()
327 raise RuntimeError, "%s subprocess error:\n %s" % \
330 Datasets.snapshotslock.release()
331 raise RuntimeError, '%s failed with exit code %d\n%s' % \
332 (str(cmd), err, errdata)
333 for dataset in outdata.rstrip().split('\n'):
334 if re.search("@", dataset):
335 insort(snaps, dataset.split())
337 Datasets.snapshots.append([snap[1], long(snap[0])])
339 snapshots = Datasets.snapshots[:]
341 # Regular expression pattern to match "pattern" parameter.
342 regexpattern = ".*@.*%s" % pattern
343 patternobj = re.compile(regexpattern)
345 for snapname,snaptime in Datasets.snapshots:
346 patternmatchobj = re.match(patternobj, snapname)
347 if patternmatchobj != None:
348 snapshots.append([snapname, snaptime])
349 Datasets.snapshotslock.release()
352 def list_cloned_snapshots(self):
354 Returns a list of snapshots that have cloned filesystems
356 Snapshots with cloned filesystems can not be destroyed
357 unless dependent cloned filesystems are first destroyed.
359 cmd = [ZFSCMD, "list", "-H", "-o", "origin"]
360 outdata,errdata = util.run_command(cmd)
362 for line in outdata.rstrip().split('\n'):
363 details = line.rstrip()
366 result.index(details)
368 result.append(details)
371 def list_held_snapshots(self):
373 Returns a list of snapshots that have a "userrefs"
374 property value of greater than 0. Resul list is
375 sorted in order of creation time. Oldest listed first.
377 cmd = [ZFSCMD, "list", "-H",
380 "-o", "userrefs,name"]
381 outdata,errdata = util.run_command(cmd)
383 for line in outdata.rstrip().split('\n'):
384 details = line.split()
385 if details[0] != "0":
386 result.append(details[1])
389 def refresh_snapshots(self):
391 Should be called when snapshots have been created or deleted
392 and a rescan should be performed. Rescan gets deferred until
393 next invocation of zfs.Dataset.list_snapshots()
396 # This is a little sub-optimal because we should be able to modify
397 # the snapshot list in place in some situations and regenerate the
398 # snapshot list without calling out to zfs(1m). But on the
399 # pro side, we will pick up any new snapshots since the last
400 # scan that we would be otherwise unaware of.
401 Datasets.snapshotslock.acquire()
402 Datasets.snapshots = None
403 Datasets.snapshotslock.release()
408 Base class for ZFS storage pool objects
410 def __init__(self, name):
412 self.health = self.__get_health()
413 self.__datasets = Datasets()
414 self.__filesystems = None
415 self.__volumes = None
416 self.__snapshots = None
418 def __get_health(self):
420 Returns pool health status: 'ONLINE', 'DEGRADED' or 'FAULTED'
422 cmd = [ZPOOLCMD, "list", "-H", "-o", "health", self.name]
423 outdata,errdata = util.run_command(cmd)
424 result = outdata.rstrip()
427 def get_capacity(self):
429 Returns the percentage of total pool storage in use.
430 Calculated based on the "used" and "available" properties
431 of the pool's top-level filesystem because the values account
432 for reservations and quotas of children in their calculations,
433 giving a more practical indication of how much capacity is used
436 if self.health == "FAULTED":
437 raise ZPoolFaultedError("Can not determine capacity of zpool: %s" \
438 "because it is in a FAULTED state" \
441 cmd = [ZFSCMD, "get", "-H", "-p", "-o", "value", \
442 "used,available", self.name]
443 outdata,errdata = util.run_command(cmd)
444 _used,_available = outdata.rstrip().split('\n')
446 available = float(_available)
447 return 100.0 * used/(used + available)
449 def get_available_size(self):
451 How much unused space is available for use on this Zpool.
454 # zpool(1) doesn't report available space in
455 # units suitable for calulations but zfs(1)
456 # can so use it to find the value for the
457 # filesystem matching the pool.
458 # The root filesystem of the pool is simply
460 poolfs = Filesystem(self.name)
461 avail = poolfs.get_available_size()
464 def get_used_size(self):
466 How much space is in use on this Zpool.
469 # Same as ZPool.get_available_size(): zpool(1)
470 # doesn't generate suitable out put so use
471 # zfs(1) on the toplevel filesystem
472 if self.health == "FAULTED":
473 raise ZPoolFaultedError("Can not determine used size of zpool: %s" \
474 "because it is in a FAULTED state" \
476 poolfs = Filesystem(self.name)
477 used = poolfs.get_used_size()
480 def list_filesystems(self):
482 Return a list of filesystems on this Zpool.
483 List is sorted by name.
485 if self.__filesystems == None:
487 # Provides pre-sorted filesystem list
488 for fsname,fsmountpoint in self.__datasets.list_filesystems():
489 if re.match(self.name, fsname):
490 result.append([fsname, fsmountpoint])
491 self.__filesystems = result
492 return self.__filesystems
494 def list_volumes(self):
496 Return a list of volumes (zvol) on this Zpool
497 List is sorted by name
499 if self.__volumes == None:
501 regexpattern = "^%s" % self.name
502 patternobj = re.compile(regexpattern)
503 for volname in self.__datasets.list_volumes():
504 patternmatchobj = re.match(patternobj, volname)
505 if patternmatchobj != None:
506 result.append(volname)
508 self.__volumes = result
509 return self.__volumes
511 def list_auto_snapshot_sets(self, tag = None):
513 Returns a list of zfs filesystems and volumes tagged with
514 the "com.sun:auto-snapshot" property set to "true", either
515 set locally or inherited. Snapshots are excluded from the
516 returned result. Results are not sorted.
520 A string indicating one of the standard auto-snapshot schedules
521 tags to check (eg. "frequent" will map to the tag:
522 com.sun:auto-snapshot:frequent). If specified as a zfs property
523 on a zfs dataset, the property corresponding to the tag will
524 override the wildcard property: "com.sun:auto-snapshot"
528 allsets = self.__datasets.list_auto_snapshot_sets(tag)
529 if len(allsets) == 0:
532 regexpattern = "^%s" % self.name
533 patternobj = re.compile(regexpattern)
534 for datasetname in allsets:
535 patternmatchobj = re.match(patternobj, datasetname)
536 if patternmatchobj != None:
537 result.append(datasetname)
540 def list_snapshots(self, pattern = None):
542 List pattern matching snapshots sorted by creation date.
546 pattern -- Filter according to pattern (default None)
548 # If there isn't a list of snapshots for this dataset
549 # already, create it now and store it in order to save
550 # time later for potential future invocations.
551 Datasets.snapshotslock.acquire()
552 if Datasets.snapshots == None:
553 self.__snapshots = None
554 Datasets.snapshotslock.release()
555 if self.__snapshots == None:
557 regexpattern = "^%s.*@" % self.name
558 patternobj = re.compile(regexpattern)
559 for snapname,snaptime in self.__datasets.list_snapshots():
560 patternmatchobj = re.match(patternobj, snapname)
561 if patternmatchobj != None:
562 result.append([snapname, snaptime])
563 # Results already sorted by creation time
564 self.__snapshots = result
566 return self.__snapshots
569 regexpattern = "^%s.*@.*%s" % (self.name, pattern)
570 patternobj = re.compile(regexpattern)
571 for snapname,snaptime in self.__snapshots:
572 patternmatchobj = re.match(patternobj, snapname)
573 if patternmatchobj != None:
574 snapshots.append([snapname, snaptime])
578 return_string = "ZPool name: " + self.name
579 return_string = return_string + "\n\tHealth: " + self.health
581 return_string = return_string + \
583 str(self.get_used_size()/BYTESPERMB) + "Mb"
584 return_string = return_string + \
585 "\n\tAvailable: " + \
586 str(self.get_available_size()/BYTESPERMB) + "Mb"
587 return_string = return_string + \
589 str(self.get_capacity()) + "%"
590 except ZPoolFaultedError:
595 class ReadableDataset:
597 Base class for Filesystem, Volume and Snapshot classes
598 Provides methods for read only operations common to all.
600 def __init__(self, name, creation = None):
602 self.__creationTime = creation
603 self.datasets = Datasets()
604 self.__used_size = None
607 return_string = "ReadableDataset name: " + self.name + "\n"
610 def get_creation_time(self):
611 if self.__creationTime == None:
612 cmd = [ZFSCMD, "get", "-H", "-p", "-o", "value", "creation",
614 outdata,errdata = util.run_command(cmd)
615 self.__creationTime = long(outdata.rstrip())
616 return self.__creationTime
620 Returns True if the dataset is still existent on the system.
623 # Test existance of the dataset by checking the output of a
624 # simple zfs get command on the snapshot
625 cmd = [ZFSCMD, "get", "-H", "-o", "name", "type", self.name]
627 p = subprocess.Popen(cmd,
628 stdout=subprocess.PIPE,
629 stderr=subprocess.PIPE,
631 outdata,errdata = p.communicate()
633 except OSError, message:
634 raise RuntimeError, "%s subprocess error:\n %s" % \
635 (command, str(message))
640 result = outdata.rstrip()
641 if result == self.name:
646 def get_used_size(self):
647 #print "fix get_used_size"
649 if self.__used_size == None:
650 cmd = [ZFSCMD, "get", "-H", "-p", "-o", "value", "used", self.name]
651 outdata,errdata = util.run_command(cmd)
652 self.__used_size = long(outdata.rstrip())
653 return self.__used_size
655 def get_user_property(self, prop, local=False):
657 cmd = [ZFSCMD, "get", "-s", "local", "-H", "-o", "value", prop, self.name]
659 cmd = [ZFSCMD, "get", "-H", "-o", "value", prop, self.name]
660 outdata,errdata = util.run_command(cmd)
661 return outdata.rstrip()
663 def set_user_property(self, prop, value):
664 cmd = [PFCMD, ZFSCMD, "set", "%s=%s" % (prop, value), self.name]
665 outdata,errdata = util.run_command(cmd)
667 def unset_user_property(self, prop):
668 cmd = [PFCMD, ZFSCMD, "inherit", prop, self.name]
669 outdata,errdata = util.run_command(cmd)
671 class Snapshot(ReadableDataset):
673 ZFS Snapshot object class.
674 Provides information and operations specfic to ZFS snapshots
676 def __init__(self, name, creation = None):
679 name -- Name of the ZFS snapshot
680 creation -- Creation time of the snapshot if known (Default None)
682 ReadableDataset.__init__(self, name, creation)
683 self.fsname, self.snaplabel = self.__split_snapshot_name()
684 self.poolname = self.__get_pool_name()
686 def __get_pool_name(self):
687 name = self.fsname.split("/", 1)
690 def __split_snapshot_name(self):
691 name = self.name.split("@", 1)
692 # Make sure this is really a snapshot and not a
693 # filesystem otherwise a filesystem could get
694 # destroyed instead of a snapshot. That would be
696 if name[0] == self.name:
697 raise SnapshotError("\'%s\' is not a valid snapshot name" \
699 return name[0],name[1]
701 def get_referenced_size(self):
703 How much unique storage space is used by this snapshot.
706 cmd = [ZFSCMD, "get", "-H", "-p", \
707 "-o", "value", "referenced", \
709 outdata,errdata = util.run_command(cmd)
710 return long(outdata.rstrip())
712 def list_children(self):
713 """Returns a recursive list of child snapshots of this snapshot"""
715 "list", "-t", "snapshot", "-H", "-r", "-o", "name",
717 outdata,errdata = util.run_command(cmd)
719 for line in outdata.rstrip().split('\n'):
720 if re.search("@%s" % (self.snaplabel), line) and \
725 def has_clones(self):
726 """Returns True if the snapshot has any dependent clones"""
727 cmd = [ZFSCMD, "list", "-H", "-o", "origin,name"]
728 outdata,errdata = util.run_command(cmd)
729 for line in outdata.rstrip().split('\n'):
730 details = line.rstrip().split()
731 if details[0] == self.name and \
736 def destroy_snapshot(self, deferred=True):
738 Permanently remove this snapshot from the filesystem
739 Performs deferred destruction by default.
741 print "destroy_snapshot %s" % self.name
743 # Be sure it genuninely exists before trying to destroy it
744 if self.exists() == False:
746 if deferred == False:
747 cmd = [PFCMD, ZFSCMD, "destroy", self.name]
749 cmd = [PFCMD, ZFSCMD, "destroy", "-d", self.name]
751 outdata,errdata = util.run_command(cmd)
752 # Clear the global snapshot cache so that a rescan will be
753 # triggered on the next call to Datasets.list_snapshots()
754 self.datasets.refresh_snapshots()
758 Place a hold on the snapshot with the specified "tag" string.
760 # FIXME - fails if hold is already held
761 # Be sure it genuninely exists before trying to place a hold
762 if self.exists() == False:
765 cmd = [PFCMD, ZFSCMD, "hold", tag, self.name]
766 outdata,errdata = util.run_command(cmd)
770 Returns a list of user hold tags for this snapshot
772 cmd = [ZFSCMD, "holds", self.name]
774 outdata,errdata = util.run_command(cmd)
776 for line in outdata.rstrip().split('\n'):
779 # The first line heading columns are NAME TAG TIMESTAMP
780 # Filter that line out.
782 if (line[0] != "NAME" and line[1] != "TAG"):
783 results.append(line[1])
786 def release(self, tag,):
788 Release the hold on the snapshot with the specified "tag" string.
790 # FIXME raises exception if no hold exists.
791 # Be sure it genuninely exists before trying to destroy it
792 if self.exists() == False:
795 cmd = [PFCMD, ZFSCMD, "release", tag, self.name]
797 outdata,errdata = util.run_command(cmd)
798 # Releasing the snapshot might cause it get automatically
800 # Clear the global snapshot cache so that a rescan will be
801 # triggered on the next call to Datasets.list_snapshots()
802 self.datasets.refresh_snapshots()
806 return_string = "Snapshot name: " + self.name
807 return_string = return_string + "\n\tCreation time: " \
808 + str(self.get_creation_time())
809 return_string = return_string + "\n\tUsed Size: " \
810 + str(self.get_used_size())
811 return_string = return_string + "\n\tReferenced Size: " \
812 + str(self.get_referenced_size())
816 class ReadWritableDataset(ReadableDataset):
818 Base class for ZFS filesystems and volumes.
819 Provides methods for operations and properties
820 common to both filesystems and volumes.
822 def __init__(self, name, creation = None):
823 ReadableDataset.__init__(self, name, creation)
824 self.__snapshots = None
827 return_string = "ReadWritableDataset name: " + self.name + "\n"
830 def get_auto_snap(self, schedule = None):
832 cmd = [ZFSCMD, "get", "-H", "-o", "value", \
833 "com.sun:auto-snapshot", self.name]
834 cmd = [ZFSCMD, "get", "-H", "-o", "value", \
835 "com.sun:auto-snapshot", self.name]
836 outdata,errdata = util.run_command(cmd)
837 if outdata.rstrip() == "true":
842 def get_available_size(self):
843 cmd = [ZFSCMD, "get", "-H", "-p", "-o", "value", "available", \
845 outdata,errdata = util.run_command(cmd)
846 return long(outdata.rstrip())
848 def create_snapshot(self, snaplabel, recursive = False):
850 Create a snapshot for the ReadWritable dataset using the supplied
855 A string to use as the snapshot label.
856 The bit that comes after the "@" part of the snapshot
859 Recursively snapshot childfren of this dataset.
862 cmd = [PFCMD, ZFSCMD, "snapshot"]
863 if recursive == True:
865 cmd.append("%s@%s" % (self.name, snaplabel))
866 outdata,errdata = util.run_command(cmd)
867 self.datasets.refresh_snapshots()
869 def list_children(self):
871 # Note, if more dataset types ever come around they will
872 # need to be added to the filsystem,volume args below.
873 # Not for the forseeable future though.
874 cmd = [ZFSCMD, "list", "-H", "-r", "-t", "filesystem,volume",
875 "-o", "name", self.name]
876 outdata,errdata = util.run_command(cmd)
878 for line in outdata.rstrip().split('\n'):
879 if line.rstrip() != self.name:
880 result.append(line.rstrip())
884 def list_snapshots(self, pattern = None):
886 List pattern matching snapshots sorted by creation date.
890 pattern -- Filter according to pattern (default None)
892 # If there isn't a list of snapshots for this dataset
893 # already, create it now and store it in order to save
894 # time later for potential future invocations.
895 Datasets.snapshotslock.acquire()
896 if Datasets.snapshots == None:
897 self.__snapshots = None
898 Datasets.snapshotslock.release()
899 if self.__snapshots == None:
901 regexpattern = "^%s@" % self.name
902 patternobj = re.compile(regexpattern)
903 for snapname,snaptime in self.datasets.list_snapshots():
904 patternmatchobj = re.match(patternobj, snapname)
905 if patternmatchobj != None:
906 result.append([snapname, snaptime])
907 # Results already sorted by creation time
908 self.__snapshots = result
910 return self.__snapshots
913 regexpattern = "^%s@.*%s" % (self.name, pattern)
914 patternobj = re.compile(regexpattern)
915 for snapname,snaptime in self.__snapshots:
916 patternmatchobj = re.match(patternobj, snapname)
917 if patternmatchobj != None:
918 snapshots.append(snapname)
921 def set_auto_snap(self, include, inherit = False):
923 self.unset_user_property("com.sun:auto-snapshot")
929 self.set_user_property("com.sun:auto-snapshot", value)
934 class Filesystem(ReadWritableDataset):
935 """ZFS Filesystem class"""
936 def __init__(self, name, mountpoint = None):
937 ReadWritableDataset.__init__(self, name)
938 self.__mountpoint = mountpoint
941 return_string = "Filesystem name: " + self.name + \
942 "\n\tMountpoint: " + self.get_mountpoint() + \
943 "\n\tMounted: " + str(self.is_mounted()) + \
944 "\n\tAuto snap: " + str(self.get_auto_snap())
947 def get_mountpoint(self):
948 if (self.__mountpoint == None):
949 cmd = [ZFSCMD, "get", "-H", "-o", "value", "mountpoint", \
951 outdata,errdata = util.run_command(cmd)
952 result = outdata.rstrip()
953 self.__mountpoint = result
954 return self.__mountpoint
956 def is_mounted(self):
957 cmd = [ZFSCMD, "get", "-H", "-o", "value", "mounted", \
959 outdata,errdata = util.run_command(cmd)
960 result = outdata.rstrip()
966 def list_children(self):
967 cmd = [ZFSCMD, "list", "-H", "-r", "-t", "filesystem", "-o", "name",
969 outdata,errdata = util.run_command(cmd)
971 for line in outdata.rstrip().split('\n'):
972 if line.rstrip() != self.name:
973 result.append(line.rstrip())
977 class Volume(ReadWritableDataset):
980 This is basically just a stub and does nothing
981 unique from ReadWritableDataset parent class.
983 def __init__(self, name):
984 ReadWritableDataset.__init__(self, name)
987 return_string = "Volume name: " + self.name + "\n"
991 class ZFSError(Exception):
992 """Generic base class for ZPoolFaultedError and SnapshotError
995 msg -- explanation of the error
997 def __init__(self, msg):
1000 return repr(self.msg)
1003 class ZPoolFaultedError(ZFSError):
1004 """Exception raised for queries made against ZPools that
1005 are in a FAULTED state
1008 msg -- explanation of the error
1010 def __init__(self, msg):
1011 ZFSError.__init__(self, msg)
1014 class SnapshotError(ZFSError):
1015 """Exception raised for invalid snapshot names provided to
1016 Snapshot() constructor.
1019 msg -- explanation of the error
1021 def __init__(self, msg):
1022 ZFSError.__init__(self, msg)
1026 """Returns a list of all zpools on the system"""
1028 cmd = [ZPOOLCMD, "list", "-H", "-o", "name"]
1029 outdata,errdata = util.run_command(cmd)
1030 for line in outdata.rstrip().split('\n'):
1031 result.append(line.rstrip())
1035 if __name__ == "__main__":
1036 for zpool in list_zpools():
1039 for filesys,mountpoint in pool.list_filesystems():
1040 fs = Filesystem(filesys, mountpoint)
1042 print "\tSnapshots:"
1043 for snapshot, snaptime in fs.list_snapshots():
1044 snap = Snapshot(snapshot, snaptime)
1045 print "\t\t" + snap.name
1047 for volname in pool.list_volumes():
1048 vol = Volume(volname)
1050 print "\tSnapshots:"
1051 for snapshot, snaptime in vol.list_snapshots():
1052 snap = Snapshot(snapshot, snaptime)
1053 print "\t\t" + snap.name