Remove leftover backup files
authorRalf Ertzinger <ralf@skytale.net>
Wed, 12 Feb 2014 12:57:03 +0000 (13:57 +0100)
committerRalf Ertzinger <ralf@skytale.net>
Wed, 12 Feb 2014 12:57:03 +0000 (13:57 +0100)
usr/share/time-slider/lib/plugin/pluginsmf.py~ [deleted file]
usr/share/time-slider/lib/time_slider/applet.py~ [deleted file]
usr/share/time-slider/lib/time_slider/deletegui.py~ [deleted file]
usr/share/time-slider/lib/time_slider/setupgui.py~ [deleted file]
usr/share/time-slider/lib/time_slider/snapnowui.py~ [deleted file]
usr/share/time-slider/lib/time_slider/timesliderd.py~ [deleted file]
usr/share/time-slider/lib/time_slider/tmp.py~ [deleted file]
usr/share/time-slider/lib/time_slider/tmp2.py~ [deleted file]
usr/share/time-slider/lib/time_slider/util.py~ [deleted file]
usr/share/time-slider/lib/time_slider/zfs.py~ [deleted file]

diff --git a/usr/share/time-slider/lib/plugin/pluginsmf.py~ b/usr/share/time-slider/lib/plugin/pluginsmf.py~
deleted file mode 100644 (file)
index e6c44df..0000000
+++ /dev/null
@@ -1,67 +0,0 @@
-#!/usr/bin/python2.6
-#
-# CDDL HEADER START
-#
-# The contents of this file are subject to the terms of the
-# Common Development and Distribution License (the "License").
-# You may not use this file except in compliance with the License.
-#
-# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
-# or http://www.opensolaris.org/os/licensing.
-# See the License for the specific language governing permissions
-# and limitations under the License.
-#
-# When distributing Covered Code, include this CDDL HEADER in each
-# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
-# If applicable, add the following below this CDDL HEADER, with the
-# fields enclosed by brackets "[]" replaced with your own identifying
-# information: Portions Copyright [yyyy] [name of copyright owner]
-#
-# CDDL HEADER END
-#
-
-import os
-import sys
-import subprocess
-from os.path import abspath, dirname, join, pardir
-sys.path.insert(0, join(dirname(__file__), pardir))
-print join (dirname(__file__), pardir)
-from time_slider import smf, autosnapsmf, util
-
-
-PLUGINBASEFMRI = "svc:/application/time-slider/plugin"
-PLUGINPROPGROUP = "plugin"
-
-class PluginSMF(smf.SMFInstance):
-
-    def __init__(self, instanceName):
-        smf.SMFInstance.__init__(self, instanceName)
-        self.triggerCommand = None
-        self.triggers = None
-
-    def get_trigger_command(self):
-        # FIXME Use mutex locking for MT safety
-        if self.triggerCommand == None:
-            value = self.get_prop(PLUGINPROPGROUP, "trigger_command")
-            self.triggerCommand = value.strip()
-        return self.triggerCommand            
-
-    def get_trigger_list(self):
-        #FIXME Use mutex locking to make MT-safe
-        if self.triggers == None:
-            self.triggers = []
-            value = self.get_prop(PLUGINPROPGROUP, "trigger_on")
-            
-            # Strip out '\' characters inserted by svcprop
-            triggerList = value.strip().replace('\\', '').split(',')
-            for trigger in triggerList:
-                self.triggers.append(trigger.strip())
-        return self.triggers
-
-    def get_verbose(self):
-        value = self.get_prop(PLUGINPROPGROUP, "verbose")
-        if value == "true":
-            return True
-        else:
-            return False
-
diff --git a/usr/share/time-slider/lib/time_slider/applet.py~ b/usr/share/time-slider/lib/time_slider/applet.py~
deleted file mode 100755 (executable)
index 7d722a6..0000000
+++ /dev/null
@@ -1,645 +0,0 @@
-#!/usr/bin/python2.6
-#
-# CDDL HEADER START
-#
-# The contents of this file are subject to the terms of the
-# Common Development and Distribution License (the "License").
-# You may not use this file except in compliance with the License.
-#
-# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
-# or http://www.opensolaris.org/os/licensing.
-# See the License for the specific language governing permissions
-# and limitations under the License.
-#
-# When distributing Covered Code, include this CDDL HEADER in each
-# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
-# If applicable, add the following below this CDDL HEADER, with the
-# fields enclosed by brackets "[]" replaced with your own identifying
-# information: Portions Copyright [yyyy] [name of copyright owner]
-#
-# CDDL HEADER END
-#
-
-import sys
-import os
-import subprocess
-import threading
-import gobject
-import dbus
-import dbus.decorators
-import dbus.glib
-import dbus.mainloop
-import dbus.mainloop.glib
-import gio
-import gtk
-import pygtk
-import pynotify
-
-from time_slider import util, rbac
-
-from os.path import abspath, dirname, join, pardir
-sys.path.insert(0, join(dirname(__file__), pardir, "plugin"))
-import plugin
-sys.path.insert(0, join(dirname(__file__), pardir, "plugin", "rsync"))
-import backup, rsyncsmf
-
-class Note:
-    _iconConnected = False
-
-    def __init__(self, icon, menu):
-        self._note = None
-        self._msgDialog = None
-        self._menu = menu
-        self._icon = icon
-        if Note._iconConnected == False:
-            self._icon.connect("popup-menu", self._popup_menu)
-            Note._iconConnected = True
-        self._icon.set_visible(True)
-
-    def _popup_menu(self, icon, button, time):
-        if button == 3:
-            # Don't popup an empty menu
-            if len(self._menu.get_children()) > 0:
-                self._menu.popup(None, None,
-                                 gtk.status_icon_position_menu,
-                                 button, time, icon)
-
-    def _dialog_response(self, dialog, response):
-        dialog.destroy()
-
-    def _notification_closed(self, notifcation):
-        self._note = None
-        self._icon.set_blinking(False)
-
-    def _show_notification(self):
-        if self._icon.is_embedded() == True:
-            self._note.attach_to_status_icon(self._icon)
-        self._note.show()
-        return False
-
-    def _connect_to_object(self):
-        pass
-
-    def refresh(self):
-        pass
-
-    def _watch_handler(self, new_owner = None):
-        if new_owner == None or len(new_owner) == 0:
-            pass
-        else:
-            self._connect_to_object()
-
-    def _setup_icon_for_note(self, themed=None):
-        if themed:
-            iconList = themed.get_names()
-        else:
-            iconList = ['gnome-dev-harddisk']
-
-        iconTheme = gtk.icon_theme_get_default()
-        iconInfo = iconTheme.choose_icon(iconList, 48, 0)
-        pixbuf = iconInfo.load_icon()
-
-        self._note.set_category("device")
-        self._note.set_icon_from_pixbuf(pixbuf)
-
-
-class RsyncNote(Note):
-
-    def __init__(self, icon, menu):
-        Note.__init__(self, icon, menu)
-        dbus.bus.NameOwnerWatch(bus,
-                                "org.opensolaris.TimeSlider.plugin.rsync",
-                                self._watch_handler)
-
-        self.smfInst = rsyncsmf.RsyncSMF("%s:rsync" \
-                                         % (plugin.PLUGINBASEFMRI))
-        self._lock = threading.Lock()
-        self._masterKey = None
-        sys,self._nodeName,rel,ver,arch = os.uname()
-        # References to gio.File and handler_id of a registered
-        # monitor callback on gio.File
-        self._fm = None
-        self._fmID = None
-        # References to gio.VolumeMonitor and handler_ids of
-        # registered mount-added and mount-removed callbacks.
-        self._vm = None
-        self._vmAdd = None
-        self._vmRem = None
-        # Every time the rsync backup script runs it will
-        # register with d-bus and trigger self._watch_handler().
-        # Use this variable to keep track of it's running status.
-        self._scriptRunning = False
-        self._targetDirAvail = False
-        self._syncNowItem = gtk.MenuItem(_("Update Backups Now"))
-        self._syncNowItem.set_sensitive(False)
-        self._syncNowItem.connect("activate",
-                                  self._sync_now)
-        self._menu.append(self._syncNowItem)
-
-        self.refresh()
-
-    def _validate_rsync_target(self, path):
-        """
-           Tests path to see if it is the pre-configured
-           rsync backup device path.
-           Returns True on success, otherwise False
-        """
-        if not os.path.exists(path):
-            return False
-        testDir = join(path,
-                       rsyncsmf.RSYNCDIRPREFIX,
-                       self._nodeName)
-        testKeyFile = join(path,
-                           rsyncsmf.RSYNCDIRPREFIX,
-                           rsyncsmf.RSYNCCONFIGFILE)
-        if os.path.exists(testDir) and \
-            os.path.exists(testKeyFile):
-            testKeyVal = None
-            f = open(testKeyFile, 'r')
-            for line in f.readlines():
-                key, val = line.strip().split('=')
-                if key.strip() == "target_key":
-                    targetKey = val.strip()
-                    break
-            f.close()
-            if targetKey == self._masterKey:
-                return True
-        return False
-
-    def _setup_monitor(self):
-        # Disconnect any previously registered signal
-        # handlers
-        if self._fm:
-            self._fm.disconnect(self._fmID)
-            self._fm = None
-
-        useVolMonitor = False        
-
-        # We always compare against masterKey to validate
-        # an rsync backup device.
-        self._masterKey = self.smfInst.get_target_key()
-        self._baseTargetDir = None
-        online = False
-
-        self._masterTargetDir = self.smfInst.get_target_dir()
-
-        if self._validate_rsync_target(self._masterTargetDir) == True:
-            self._baseTargetDir = self._masterTargetDir
-            online = True
-
-        if self._vm == None:
-            self._vm = gio.volume_monitor_get()
-
-        # If located, see if it's also managed by the volume monitor.
-        # Or just try to find it otherwise.
-        mounts = self._vm.get_mounts()
-        for mount in mounts:
-            root = mount.get_root()
-            path = root.get_path()
-            if self._baseTargetDir != None and \
-                path == self._baseTargetDir:
-                # Means the directory we found is gio monitored,
-                # so just monitor it using gio.VolumeMonitor.
-                useVolMonitor = True
-                break
-            elif self._validate_rsync_target(path) == True:
-                # Found it but not where we expected it to be so override
-                # the target path defined by SMF for now.
-                useVolMonitor = True
-                self._baseTargetDir = path
-                online = True
-                break
-
-        if self._baseTargetDir == None:
-            # Means we didn't find it, and we don't know where to expect
-            # it either - via a hotpluggable device or other nfs/zfs etc.
-            # We need to hedge our bets and monitor for both.
-            self._setup_file_monitor(self._masterTargetDir)
-            self._setup_volume_monitor()
-        else:
-            # Found it
-            if useVolMonitor == True:
-                # Looks like a removable device. Use gio.VolumeMonitor
-                # as the preferred monitoring mechanism.
-                self._setup_volume_monitor()
-            else:
-                # Found it on a static mount point like a zfs or nfs
-                # mount point.
-                # Can't use gio.VolumeMonitor so use a gio.File monitor
-                # instead.
-                self._setup_file_monitor(self._masterTargetDir)
-
-        # Finally, update the UI menu state
-        self._lock.acquire()
-        self._targetDirAvail = online
-        self._update_menu_state()
-        self._lock.release()
-            
-            
-    def _setup_file_monitor(self, expectedPath):
-        # Use gio.File monitor as a fallback in 
-        # case gio.VolumeMonitor can't track the device.
-        # This is the case for static/manual mount points
-        # such as NFS, ZFS and other non-hotpluggables.
-        gFile = gio.File(path=expectedPath)
-        self._fm = gFile.monitor_file(gio.FILE_MONITOR_WATCH_MOUNTS)
-        self._fmID = self._fm.connect("changed",
-                                      self._file_monitor_changed)
-
-    def _setup_volume_monitor(self):
-        # Check the handler_ids first to see if they have 
-        # already been connected. Avoids multiple callbacks
-        # for a single event
-        if self._vmAdd == None:
-            self._vmAdd = self._vm.connect("mount-added",
-                                           self._mount_added)
-        if self._vmRem == None:
-            self._vmRem = self._vm.connect("mount-removed",
-                                           self._mount_removed)
-            
-    def _mount_added(self, monitor, mount):
-        root = mount.get_root()
-        path = root.get_path()
-        if self._validate_rsync_target(path) == True:
-            # Since gio.VolumeMonitor found the rsync target, don't
-            # bother relying on gio.File to find it any more. Disconnect
-            # it's registered callbacks.
-            if self._fm:
-                self._fm.disconnect(self._fmID)
-                self._fm = None
-            self._lock.acquire()
-            self._baseTargetDir = path
-            self._targetDirAvail = True
-            self._update_menu_state()
-            self._lock.release()
-
-    def _mount_removed(self, monitor, mount):
-        root = mount.get_root()
-        path = root.get_path()
-        if path == self._baseTargetDir:
-            self._lock.acquire()
-            self._targetDirAvail = False
-            self._update_menu_state()
-            self._lock.release()
-
-    def _file_monitor_changed(self, filemonitor, file, other_file, event_type):
-        if file.get_path() == self._masterTargetDir:
-            self._lock.acquire()
-            if self._validate_rsync_target(self._masterTargetDir) == True:
-                self._targetDirAvail = True
-            else:
-                self._targetDirAvail = False
-            self._update_menu_state()
-            self._lock.release()            
-
-    def _update_menu_state(self):
-        if self._syncNowItem:
-            if self._targetDirAvail == True and \
-                self._scriptRunning == False:
-                self._syncNowItem.set_sensitive(True)
-            else:
-                self._syncNowItem.set_sensitive(False)
-
-    def _watch_handler(self, new_owner = None):
-        self._lock.acquire()
-        if new_owner == None or len(new_owner) == 0:
-            # Script not running or exited
-            self._scriptRunning = False
-        else:
-            self._scriptRunning = True
-            self._connect_to_object()
-        self._update_menu_state()
-        self._lock.release()
-
-    def _rsync_started_handler(self, target, sender=None, interface=None, path=None):
-        urgency = pynotify.URGENCY_NORMAL
-        if (self._note != None):
-            self._note.close()
-        # Try to pretty things up a bit by displaying volume name
-        # and hinted icon instead of the raw device path,
-        # and standard harddisk icon if possible.
-        icon = None
-        volume = util.path_to_volume(target)
-        if volume == None:
-            volName = target
-        else:
-            volName = volume.get_name()
-            icon = volume.get_icon()
-                      
-        self._note = pynotify.Notification(_("Backup Started"),
-                                           _("Backing up snapshots to:\n<b>%s</b>\n" \
-                                           "Do not disconnect the backup device.") \
-                                            % (volName))
-        self._note.connect("closed", \
-                           self._notification_closed)
-        self._note.set_urgency(urgency)
-        self._setup_icon_for_note(icon)
-        gobject.idle_add(self._show_notification)
-
-    def _rsync_current_handler(self, snapshot, remaining, sender=None, interface=None, path=None):
-        self._icon.set_tooltip_markup(_("Backing up: <b>\'%s\'\n%d</b> snapshots remaining.\n" \
-                                      "Do not disconnect the backup device.") \
-                                      % (snapshot, remaining))
-
-    def _rsync_complete_handler(self, target, sender=None, interface=None, path=None):
-        urgency = pynotify.URGENCY_NORMAL
-        if (self._note != None):
-            self._note.close()
-        # Try to pretty things up a bit by displaying volume name
-        # and hinted icon instead of the raw device path,
-        # and standard harddisk icon if possible.
-        icon = None
-        volume = util.path_to_volume(target)
-        if volume == None:
-            volName = target
-        else:
-            volName = volume.get_name()
-            icon = volume.get_icon()
-
-        self._note = pynotify.Notification(_("Backup Complete"),
-                                           _("Your snapshots have been backed up to:\n<b>%s</b>") \
-                                           % (volName))
-        self._note.connect("closed", \
-                           self._notification_closed)
-        self._note.set_urgency(urgency)
-        self._setup_icon_for_note(icon)
-        self._icon.set_has_tooltip(False)
-        self.queueSize = 0
-        gobject.idle_add(self._show_notification)
-
-    def _rsync_synced_handler(self, sender=None, interface=None, path=None):
-        self._icon.set_tooltip_markup(_("Your backups are up to date."))
-        self.queueSize = 0
-
-    def _rsync_unsynced_handler(self, queueSize, sender=None, interface=None, path=None):
-        self._icon.set_tooltip_markup(_("%d snapshots are queued for backup.") \
-                                      % (queueSize))
-        self.queueSize = queueSize
-
-    def _connect_to_object(self):
-        try:
-            remote_object = bus.get_object("org.opensolaris.TimeSlider.plugin.rsync",
-                                           "/org/opensolaris/TimeSlider/plugin/rsync")
-        except dbus.DBusException:
-            sys.stderr.write("Failed to connect to remote D-Bus object: " + \
-                             "/org/opensolaris/TimeSlider/plugin/rsync")
-            return
-
-        # Create an Interface wrapper for the remote object
-        iface = dbus.Interface(remote_object, "org.opensolaris.TimeSlider.plugin.rsync")
-
-        iface.connect_to_signal("rsync_started", self._rsync_started_handler, sender_keyword='sender',
-                                interface_keyword='interface', path_keyword='path')
-        iface.connect_to_signal("rsync_current", self._rsync_current_handler, sender_keyword='sender',
-                                interface_keyword='interface', path_keyword='path')
-        iface.connect_to_signal("rsync_complete", self._rsync_complete_handler, sender_keyword='sender',
-                                interface_keyword='interface', path_keyword='path')
-        iface.connect_to_signal("rsync_synced", self._rsync_synced_handler, sender_keyword='sender',
-                                interface_keyword='interface', path_keyword='path')
-        iface.connect_to_signal("rsync_unsynced", self._rsync_unsynced_handler, sender_keyword='sender',
-                                interface_keyword='interface', path_keyword='path')
-
-    def refresh(self):
-        # Hide/Unhide rsync menu item based on whether the plugin is online
-        if self._syncNowItem and \
-           self.smfInst.get_service_state() == "online":
-            #self._setup_file_monitor()
-            self._setup_monitor()
-            # Kick start things by initially obtaining the
-            # backlog size and triggering a callback.
-            # Signal handlers will keep tooltip status up
-            # to date afterwards when the backup cron job
-            # executes.
-            propName = "%s:rsync" % (backup.propbasename)
-            queue = backup.list_pending_snapshots(propName)
-            self.queueSize = len(queue)
-            if self.queueSize == 0:
-                self._rsync_synced_handler()
-            else:
-                self._rsync_unsynced_handler(self.queueSize)
-            self._syncNowItem.show()
-        else:
-            self._syncNowItem.hide()
-
-    def _sync_now(self, menuItem):
-        """Runs the rsync-backup script manually
-           Assumes that user is root since it is only
-           called from the menu item which is invisible to
-           not authorised users
-        """
-        cmdPath = os.path.join(os.path.dirname(sys.argv[0]), \
-                               "time-slider/plugins/rsync/rsync-backup")
-        if os.geteuid() == 0:
-         cmd = [cmdPath, \
-                "%s:rsync" % (plugin.PLUGINBASEFMRI)]
-       else:
-         cmd = ['/usr/bin/gksu' ,cmdPath, \
-                "%s:rsync" % (plugin.PLUGINBASEFMRI)]
-
-       subprocess.Popen(cmd, close_fds=True, cwd="/")
-
-
-class CleanupNote(Note):
-
-    def __init__(self, icon, menu):
-        Note.__init__(self, icon, menu)
-        self._cleanupHead = None
-        self._cleanupBody = None
-        dbus.bus.NameOwnerWatch(bus,
-                                "org.opensolaris.TimeSlider",
-                                self._watch_handler)
-
-    def _show_cleanup_details(self, *args):
-        # We could keep a dialog around but this a rare
-        # enough event that's it not worth the effort.
-        dialog = gtk.MessageDialog(type=gtk.MESSAGE_WARNING,
-                                   buttons=gtk.BUTTONS_CLOSE)
-        dialog.set_title(_("Time Slider: Low Space Warning"))
-        dialog.set_markup("<b>%s</b>" % (self._cleanupHead))
-        dialog.format_secondary_markup(self._cleanupBody)
-        dialog.show()
-        dialog.present()
-        dialog.connect("response", self._dialog_response)
-
-    def _cleanup_handler(self, pool, severity, threshhold, sender=None, interface=None, path=None):
-        if severity == 4:
-            expiry = pynotify.EXPIRES_NEVER
-            urgency = pynotify.URGENCY_CRITICAL
-            self._cleanupHead = _("Emergency: \'%s\' is full!") % pool
-            notifyBody = _("The file system: \'%s\', is over %s%% full.") \
-                            % (pool, threshhold)
-            self._cleanupBody = _("The file system: \'%s\', is over %s%% full.\n"
-                     "As an emergency measure, Time Slider has "
-                     "destroyed all of its backups.\nTo fix this problem, "
-                     "delete any unnecessary files on \'%s\', or add "
-                     "disk space (see ZFS documentation).") \
-                      % (pool, threshhold, pool)
-        elif severity == 3:
-            expiry = pynotify.EXPIRES_NEVER
-            urgency = pynotify.URGENCY_CRITICAL
-            self._cleanupHead = _("Emergency: \'%s\' is almost full!") % pool
-            notifyBody = _("The file system: \'%s\', exceeded %s%% "
-                           "of its total capacity") \
-                            % (pool, threshhold)
-            self._cleanupBody = _("The file system: \'%s\', exceeded %s%% "
-                     "of its total capacity. As an emerency measure, "
-                     "Time Slider has has destroyed most or all of its "
-                     "backups to prevent the disk becoming full. "
-                     "To prevent this from happening again, delete "
-                     "any unnecessary files on \'%s\', or add disk "
-                     "space (see ZFS documentation).") \
-                      % (pool, threshhold, pool)
-        elif severity == 2:
-            expiry = pynotify.EXPIRES_NEVER
-            urgency = pynotify.URGENCY_CRITICAL
-            self._cleanupHead = _("Urgent: \'%s\' is almost full!") % pool
-            notifyBody = _("The file system: \'%s\', exceeded %s%% "
-                           "of its total capacity") \
-                            % (pool, threshhold)
-            self._cleanupBody = _("The file system: \'%s\', exceeded %s%% "
-                     "of its total capacity. As a remedial measure, "
-                     "Time Slider has destroyed some backups, and will "
-                     "destroy more, eventually all, as capacity continues "
-                     "to diminish.\nTo prevent this from happening again, "
-                     "delete any unnecessary files on \'%s\', or add disk "
-                     "space (see ZFS documentation).") \
-                     % (pool, threshhold, pool)
-        elif severity == 1:
-            expiry = 20000 # 20 seconds
-            urgency = pynotify.URGENCY_NORMAL
-            self._cleanupHead = _("Warning: \'%s\' is getting full") % pool
-            notifyBody = _("The file system: \'%s\', exceeded %s%% "
-                           "of its total capacity") \
-                            % (pool, threshhold)
-            self._cleanupBody = _("\'%s\' exceeded %s%% of its total "
-                     "capacity. To fix this, Time Slider has destroyed "
-                     "some recent backups, and will destroy more as "
-                     "capacity continues to diminish.\nTo prevent "
-                     "this from happening again, delete any "
-                     "unnecessary files on \'%s\', or add disk space "
-                     "(see ZFS documentation).\n") \
-                     % (pool, threshhold, pool)
-        else:
-            return # No other values currently supported
-
-        if (self._note != None):
-            self._note.close()
-        self._note = pynotify.Notification(self._cleanupHead,
-                                           notifyBody)
-        self._note.add_action("clicked",
-                              _("Details..."),
-                              self._show_cleanup_details)
-        self._note.connect("closed",
-                           self._notification_closed)
-        self._note.set_urgency(urgency)
-        self._note.set_timeout(expiry)
-        self._setup_icon_for_note()
-        self._icon.set_blinking(True)
-        gobject.idle_add(self._show_notification)
-
-    def _connect_to_object(self):
-        try:
-            remote_object = bus.get_object("org.opensolaris.TimeSlider",
-                                           "/org/opensolaris/TimeSlider/autosnap")
-        except dbus.DBusException:
-            sys.stderr.write("Failed to connect to remote D-Bus object: " + \
-                             "/org/opensolaris/TimeSlider/autosnap")
-
-        #Create an Interface wrapper for the remote object
-        iface = dbus.Interface(remote_object, "org.opensolaris.TimeSlider.autosnap")
-
-        iface.connect_to_signal("capacity_exceeded", self._cleanup_handler, sender_keyword='sender',
-                                interface_keyword='interface', path_keyword='path')
-
-
-
-class SetupNote(Note):
-
-    def __init__(self, icon, menu, manager):
-        Note.__init__(self, icon, menu)
-        # We are passed a reference to out parent so we can
-        # provide it notification which it can then circulate
-        # to other notification objects such as Rsync and
-        # Cleanup
-        self._manager = manager
-        self._icon = icon
-        self._menu = menu
-        self._configSvcItem = gtk.MenuItem(_("Configure Time Slider..."))
-        self._configSvcItem.connect("activate",
-                                    self._run_config_app)
-        self._configSvcItem.set_sensitive(True)
-        self._menu.append(self._configSvcItem)
-        self._configSvcItem.show()
-        dbus.bus.NameOwnerWatch(bus,
-                                "org.opensolaris.TimeSlider.config",
-                                self._watch_handler)
-
-    def _connect_to_object(self):
-        try:
-            remote_object = bus.get_object("org.opensolaris.TimeSlider.config",
-                                           "/org/opensolaris/TimeSlider/config")
-        except dbus.DBusException:
-            sys.stderr.write("Failed to connect to remote D-Bus object: " + \
-                             "/org/opensolaris/TimeSlider/config")
-
-        #Create an Interface wrapper for the remote object
-        iface = dbus.Interface(remote_object, "org.opensolaris.TimeSlider.config")
-
-        iface.connect_to_signal("config_changed", self._config_handler, sender_keyword='sender',
-                                interface_keyword='interface', path_keyword='path')
-
-    def _config_handler(self, sender=None, interface=None, path=None):
-        # Notify the manager.
-        # This will eventually propogate through to an invocation
-        # of our own refresh() method.
-        self._manager.refresh()
-
-    def _run_config_app(self, menuItem):
-        cmdPath = os.path.join(os.path.dirname(sys.argv[0]),
-                           os.path.pardir,
-                           "bin",
-                           "time-slider-setup")
-        cmd = os.path.abspath(cmdPath)
-        # The setup GUI deals with it's own security and 
-        # authorisation, so no need to pfexec it. Any
-        # changes made to configuration will come back to
-        # us by way of D-Bus notification.
-        subprocess.Popen(cmd, close_fds=True)
-
-class NoteManager():
-    def __init__(self):
-        # Notification objects need to share a common
-        # status icon and popup menu so these are created
-        # outside the object and passed to the constructor
-        self._menu = gtk.Menu()
-        self._icon = gtk.StatusIcon()
-        self._icon.set_from_icon_name("time-slider-setup")
-        self._setupNote = SetupNote(self._icon,
-                                    self._menu,
-                                    self)
-        self._cleanupNote = CleanupNote(self._icon,
-                                        self._menu)
-        self._rsyncNote = RsyncNote(self._icon,
-                                    self._menu)
-
-    def refresh(self):
-        self._rsyncNote.refresh()
-
-bus = dbus.SystemBus()
-
-def main(argv):
-    mainloop = gobject.MainLoop()
-    dbus.mainloop.glib.DBusGMainLoop(set_as_default = True)
-    gobject.threads_init()
-    pynotify.init(_("Time Slider"))
-
-    noteManager = NoteManager()
-
-    try:
-        mainloop.run()
-    except:
-        print "Exiting"
-
-if __name__ == '__main__':
-    main()
-
diff --git a/usr/share/time-slider/lib/time_slider/deletegui.py~ b/usr/share/time-slider/lib/time_slider/deletegui.py~
deleted file mode 100755 (executable)
index 8023d9a..0000000
+++ /dev/null
@@ -1,756 +0,0 @@
-#!/usr/bin/python2.6
-#
-# CDDL HEADER START
-#
-# The contents of this file are subject to the terms of the
-# Common Development and Distribution License (the "License").
-# You may not use this file except in compliance with the License.
-#
-# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
-# or http://www.opensolaris.org/os/licensing.
-# See the License for the specific language governing permissions
-# and limitations under the License.
-#
-# When distributing Covered Code, include this CDDL HEADER in each
-# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
-# If applicable, add the following below this CDDL HEADER, with the
-# fields enclosed by brackets "[]" replaced with your own identifying
-# information: Portions Copyright [yyyy] [name of copyright owner]
-#
-# CDDL HEADER END
-#
-
-import threading
-import sys
-import os
-import time
-import getopt
-import locale
-import shutil
-import fcntl
-from bisect import insort
-
-try:
-    import pygtk
-    pygtk.require("2.4")
-except:
-    pass
-try:
-    import gtk
-    import gtk.glade
-    gtk.gdk.threads_init()
-except:
-    sys.exit(1)
-try:
-    import glib
-    import gobject
-except:
-    sys.exit(1)
-
-from os.path import abspath, dirname, join, pardir
-sys.path.insert(0, join(dirname(__file__), pardir, "plugin"))
-import plugin
-sys.path.insert(0, join(dirname(__file__), pardir, "plugin", "rsync"))
-import rsyncsmf
-
-
-# here we define the path constants so that other modules can use it.
-# this allows us to get access to the shared files without having to
-# know the actual location, we just use the location of the current
-# file and use paths relative to that.
-SHARED_FILES = os.path.abspath(os.path.join(os.path.dirname(__file__),
-                               os.path.pardir,
-                               os.path.pardir))
-LOCALE_PATH = os.path.join('/usr', 'share', 'locale')
-RESOURCE_PATH = os.path.join(SHARED_FILES, 'res')
-
-# the name of the gettext domain. because we have our translation files
-# not in a global folder this doesn't really matter, setting it to the
-# application name is a good idea tough.
-GETTEXT_DOMAIN = 'time-slider'
-
-# set up the glade gettext system and locales
-gtk.glade.bindtextdomain(GETTEXT_DOMAIN, LOCALE_PATH)
-gtk.glade.textdomain(GETTEXT_DOMAIN)
-
-import zfs
-from rbac import RBACprofile
-
-class RsyncBackup:
-
-    def __init__(self, mountpoint, rsync_dir = None,  fsname= None, snaplabel= None, creationtime= None):
-
-        if rsync_dir == None:
-            self.__init_from_mp (mountpoint)
-        else:
-            self.rsync_dir = rsync_dir
-            self.mountpoint = mountpoint
-            self.fsname = fsname
-            self.snaplabel = snaplabel
-
-            self.creationtime = creationtime
-            try:
-                tm = time.localtime(self.creationtime)
-                self.creationtime_str = unicode(time.strftime ("%c", tm),
-                           locale.getpreferredencoding()).encode('utf-8')
-            except:
-                self.creationtime_str = time.ctime(self.creationtime)
-        fs = zfs.Filesystem (self.fsname)
-        self.zfs_mountpoint = fs.get_mountpoint ()
-
-    def __init_from_mp (self, mountpoint):
-        self.rsyncsmf = rsyncsmf.RsyncSMF("%s:rsync" %(plugin.PLUGINBASEFMRI))
-        rsyncBaseDir = self.rsyncsmf.get_target_dir()
-        sys,nodeName,rel,ver,arch = os.uname()
-        self.rsync_dir = os.path.join(rsyncBaseDir,
-                                     rsyncsmf.RSYNCDIRPREFIX,
-                                     nodeName)
-        self.mountpoint = mountpoint
-
-        s1 = mountpoint.split ("%s/" % self.rsync_dir, 1)
-        s2 = s1[1].split ("/%s" % rsyncsmf.RSYNCDIRSUFFIX, 1)
-        s3 = s2[1].split ('/',2)
-        self.fsname = s2[0]
-        self.snaplabel =  s3[1]
-        self.creationtime = os.stat(mountpoint).st_mtime
-
-    def __str__(self):
-        ret = "self.rsync_dir = %s\n \
-               self.mountpoint = %s\n \
-               self.fsname = %s\n \
-               self.snaplabel = %s\n" % (self.rsync_dir,
-                                         self.mountpoint, self.fsname,
-                                         self.snaplabel)
-        return ret
-
-
-    def exists(self):
-        return os.path.exists(self.mountpoint)
-
-    def destroy(self):
-        lockFileDir = os.path.join(self.rsync_dir,
-                             self.fsname,
-                             rsyncsmf.RSYNCLOCKSUFFIX)
-
-        if not os.path.exists(lockFileDir):
-            os.makedirs(lockFileDir, 0755)
-
-        lockFile = os.path.join(lockFileDir, self.snaplabel + ".lock")
-        try:
-            lockFp = open(lockFile, 'w')
-            fcntl.flock(lockFp, fcntl.LOCK_EX | fcntl.LOCK_NB)
-        except IOError:
-            raise RuntimeError, \
-            "couldn't delete %s, already used by another process" % self.mountpoint
-            return
-
-        trashDir = os.path.join(self.rsync_dir,
-                          self.fsname,
-                          rsyncsmf.RSYNCTRASHSUFFIX)
-        if not os.path.exists(trashDir):
-            os.makedirs(trashDir, 0755)
-
-        backupTrashDir = os.path.join (self.rsync_dir,
-                                 self.fsname,
-                                 rsyncsmf.RSYNCTRASHSUFFIX,
-                                 self.snaplabel)
-
-        # move then delete
-        os.rename (self.mountpoint, backupTrashDir)
-        shutil.rmtree (backupTrashDir)
-
-        log = "%s/%s/%s/%s.log" % (self.rsync_dir,
-                                   self.fsname,
-                                   rsyncsmf.RSYNCLOGSUFFIX,
-                                   self.snaplabel)
-        if os.path.exists (log):
-            os.unlink (log)
-
-        lockFp.close()
-        os.unlink(lockFile)
-
-class DeleteSnapManager:
-
-    def __init__(self, snapshots = None):
-        self.xml = gtk.glade.XML("%s/../../glade/time-slider-delete.glade" \
-                                  % (os.path.dirname(__file__)))
-        self.backuptodelete = []
-        self.shortcircuit = []
-        maindialog = self.xml.get_widget("time-slider-delete")
-        self.pulsedialog = self.xml.get_widget("pulsedialog")
-        self.pulsedialog.set_transient_for(maindialog)
-        self.datasets = zfs.Datasets()
-        if snapshots:
-            maindialog.hide()
-            self.shortcircuit = snapshots
-        else:
-            glib.idle_add(self.__init_scan)
-
-        self.progressdialog = self.xml.get_widget("deletingdialog")
-        self.progressdialog.set_transient_for(maindialog)
-        self.progressbar = self.xml.get_widget("deletingprogress")
-        # signal dictionary
-        dic = {"on_closebutton_clicked" : gtk.main_quit,
-               "on_window_delete_event" : gtk.main_quit,
-               "on_snapshotmanager_delete_event" : gtk.main_quit,
-               "on_fsfilterentry_changed" : self.__on_filterentry_changed,
-               "on_schedfilterentry_changed" : self.__on_filterentry_changed,
-               "on_typefiltercombo_changed" : self.__on_filterentry_changed,
-               "on_selectbutton_clicked" : self.__on_selectbutton_clicked,
-               "on_deselectbutton_clicked" : self.__on_deselectbutton_clicked,
-               "on_deletebutton_clicked" : self.__on_deletebutton_clicked,
-               "on_confirmcancel_clicked" : self.__on_confirmcancel_clicked,
-               "on_confirmdelete_clicked" : self.__on_confirmdelete_clicked,
-               "on_errordialog_response" : self.__on_errordialog_response}
-        self.xml.signal_autoconnect(dic)
-
-    def initialise_view(self):
-        if len(self.shortcircuit) == 0:
-            # Set TreeViews
-            self.liststorefs = gtk.ListStore(str, str, str, str, str, long,
-                                             gobject.TYPE_PYOBJECT)
-            list_filter = self.liststorefs.filter_new()
-            list_sort = gtk.TreeModelSort(list_filter)
-            list_sort.set_sort_column_id(1, gtk.SORT_ASCENDING)
-
-            self.snaptreeview = self.xml.get_widget("snaplist")
-            self.snaptreeview.set_model(self.liststorefs)
-            self.snaptreeview.get_selection().set_mode(gtk.SELECTION_MULTIPLE)
-
-            cell0 = gtk.CellRendererText()
-            cell1 = gtk.CellRendererText()
-            cell2 = gtk.CellRendererText()
-            cell3 = gtk.CellRendererText()
-            cell4 = gtk.CellRendererText()
-            cell5 = gtk.CellRendererText()
-
-            typecol = gtk.TreeViewColumn(_("Type"),
-                                            cell0, text = 0)
-            typecol.set_sort_column_id(0)
-            typecol.set_resizable(True)
-            typecol.connect("clicked",
-                self.__on_treeviewcol_clicked, 0)
-            self.snaptreeview.append_column(typecol)
-
-            mountptcol = gtk.TreeViewColumn(_("Mount Point"),
-                                            cell1, text = 1)
-            mountptcol.set_sort_column_id(1)
-            mountptcol.set_resizable(True)
-            mountptcol.connect("clicked",
-                self.__on_treeviewcol_clicked, 1)
-            self.snaptreeview.append_column(mountptcol)
-
-            fsnamecol = gtk.TreeViewColumn(_("File System Name"),
-                                           cell2, text = 2)
-            fsnamecol.set_sort_column_id(2)
-            fsnamecol.set_resizable(True)
-            fsnamecol.connect("clicked",
-                self.__on_treeviewcol_clicked, 2)
-            self.snaptreeview.append_column(fsnamecol)
-
-            snaplabelcol = gtk.TreeViewColumn(_("Snapshot Name"),
-                                              cell3, text = 3)
-            snaplabelcol.set_sort_column_id(3)
-            snaplabelcol.set_resizable(True)
-            snaplabelcol.connect("clicked",
-                self.__on_treeviewcol_clicked, 3)
-            self.snaptreeview.append_column(snaplabelcol)
-
-            cell4.props.xalign = 1.0
-            creationcol = gtk.TreeViewColumn(_("Creation Time"),
-                                             cell4, text = 4)
-            creationcol.set_sort_column_id(5)
-            creationcol.set_resizable(True)
-            creationcol.connect("clicked",
-                self.__on_treeviewcol_clicked, 5)
-            self.snaptreeview.append_column(creationcol)
-
-            # Note to developers.
-            # The second element is for internal matching and should not
-            # be i18ned under any circumstances.
-            typestore = gtk.ListStore(str, str)
-            typestore.append([_("All"), "All"])
-            typestore.append([_("Backups"), "Backup"])
-            typestore.append([_("Snapshots"), "Snapshot"])
-
-            self.typefiltercombo = self.xml.get_widget("typefiltercombo")
-            self.typefiltercombo.set_model(typestore)
-            typefiltercomboCell = gtk.CellRendererText()
-            self.typefiltercombo.pack_start(typefiltercomboCell, True)
-            self.typefiltercombo.add_attribute(typefiltercomboCell, 'text',0)
-
-            # Note to developers.
-            # The second element is for internal matching and should not
-            # be i18ned under any circumstances.
-            fsstore = gtk.ListStore(str, str)
-            fslist = self.datasets.list_filesystems()
-            fsstore.append([_("All"), None])
-            for fsname,fsmount in fslist:
-                fsstore.append([fsname, fsname])
-            self.fsfilterentry = self.xml.get_widget("fsfilterentry")
-            self.fsfilterentry.set_model(fsstore)
-            self.fsfilterentry.set_text_column(0)
-            fsfilterentryCell = gtk.CellRendererText()
-            self.fsfilterentry.pack_start(fsfilterentryCell)
-
-            schedstore = gtk.ListStore(str, str)
-            # Note to developers.
-            # The second element is for internal matching and should not
-            # be i18ned under any circumstances.
-            schedstore.append([_("All"), None])
-            schedstore.append([_("Monthly"), "monthly"])
-            schedstore.append([_("Weekly"), "weekly"])
-            schedstore.append([_("Daily"), "daily"])
-            schedstore.append([_("Hourly"), "hourly"])
-            schedstore.append([_("1/4 Hourly"), "frequent"])
-            self.schedfilterentry = self.xml.get_widget("schedfilterentry")
-            self.schedfilterentry.set_model(schedstore)
-            self.schedfilterentry.set_text_column(0)
-            schedentryCell = gtk.CellRendererText()
-            self.schedfilterentry.pack_start(schedentryCell)
-
-            self.schedfilterentry.set_active(0)
-            self.fsfilterentry.set_active(0)
-            self.typefiltercombo.set_active(0)
-        else:
-            cloned = self.datasets.list_cloned_snapshots()
-            num_snap = 0
-            num_rsync = 0
-            for snapname in self.shortcircuit:
-                # Filter out snapshots that are the root
-                # of cloned filesystems or volumes
-                try:
-                    cloned.index(snapname)
-                    dialog = gtk.MessageDialog(None,
-                                   0,
-                                   gtk.MESSAGE_ERROR,
-                                   gtk.BUTTONS_CLOSE,
-                                   _("Snapshot can not be deleted"))
-                    text = _("%s has one or more dependent clones "
-                             "and will not be deleted. To delete "
-                             "this snapshot, first delete all "
-                             "datasets and snapshots cloned from "
-                             "this snapshot.") \
-                             % snapname
-                    dialog.format_secondary_text(text)
-                    dialog.run()
-                    sys.exit(1)
-                except ValueError:
-                    path = os.path.abspath (snapname)
-                    if not os.path.exists (path):
-                        snapshot = zfs.Snapshot(snapname)
-                        self.backuptodelete.append(snapshot)
-                        num_snap += 1
-                    else:
-                        self.backuptodelete.append(RsyncBackup (snapname))
-                        num_rsync += 1
-
-            confirm = self.xml.get_widget("confirmdialog")
-            summary = self.xml.get_widget("summarylabel")
-            total = len(self.backuptodelete)
-
-            text = ""
-            if num_rsync != 0 :
-                if num_rsync == 1:
-                    text = _("1 external backup will be deleted.")
-                else:
-                    text = _("%d external backups will be deleted.") % num_rsync
-
-            if num_snap != 0 :
-                if len(text) != 0:
-                    text += "\n"
-                if num_snap == 1:
-                    text += _("1 snapshot will be deleted.")
-                else:
-                    text += _("%d snapshots will be deleted.") % num_snap
-
-            summary.set_text(text )
-            response = confirm.run()
-            if response != 2:
-                sys.exit(0)
-            else:
-                # Create the thread in an idle loop in order to
-                # avoid deadlock inside gtk.
-                glib.idle_add(self.__init_delete)
-        return False
-
-    def __on_treeviewcol_clicked(self, widget, searchcol):
-        self.snaptreeview.set_search_column(searchcol)
-
-    def __filter_snapshot_list(self, list, filesys = None, snap = None, btype = None):
-        if filesys == None and snap == None and btype == None:
-            return list
-        fssublist = []
-        if filesys != None:
-            for snapshot in list:
-                if snapshot.fsname.find(filesys) != -1:
-                    fssublist.append(snapshot)
-        else:
-            fssublist = list
-
-        snaplist = []
-        if snap != None:
-            for snapshot in fssublist:
-                if  snapshot.snaplabel.find(snap) != -1:
-                    snaplist.append(snapshot)
-        else:
-            snaplist = fssublist
-
-        typelist = []
-        if btype != None and btype != "All":
-            for item in snaplist:
-                if btype == "Backup":
-                    if isinstance(item, RsyncBackup):
-                        typelist.append (item)
-                else:
-                    if isinstance(item, zfs.Snapshot):
-                        typelist.append (item)
-        else:
-            typelist = snaplist
-
-        return typelist
-
-    def __on_filterentry_changed(self, widget):
-        # Get the filesystem filter value
-        iter = self.fsfilterentry.get_active_iter()
-        if iter == None:
-            filesys = self.fsfilterentry.get_active_text()
-        else:
-            model = self.fsfilterentry.get_model()
-            filesys = model.get(iter, 1)[0]
-        # Get the snapshot name filter value
-        iter = self.schedfilterentry.get_active_iter()
-        if iter == None:
-            snap = self.schedfilterentry.get_active_text()
-        else:
-            model = self.schedfilterentry.get_model()
-            snap = model.get(iter, 1)[0]
-
-        # Get the type filter value
-        iter = self.typefiltercombo.get_active_iter()
-        if iter == None:
-            type = "All"
-        else:
-            model = self.typefiltercombo.get_model()
-            type = model.get(iter, 1)[0]
-
-        self.liststorefs.clear()
-        newlist = self.__filter_snapshot_list(self.snapscanner.snapshots,
-                    filesys,
-                    snap, type)
-        for snapshot in newlist:
-            try:
-                tm = time.localtime(snapshot.get_creation_time())
-                t = unicode(time.strftime ("%c", tm),
-                    locale.getpreferredencoding()).encode('utf-8')
-            except:
-                t = time.ctime(snapshot.get_creation_time())
-            try:
-                mount_point = self.snapscanner.mounts[snapshot.fsname]
-                if (mount_point == "legacy"):
-                    mount_point = _("Legacy")
-
-                self.liststorefs.append([
-                       _("Snapshot"),
-                       mount_point,
-                       snapshot.fsname,
-                       snapshot.snaplabel,
-                       t,
-                       snapshot.get_creation_time(),
-                       snapshot])
-            except KeyError:
-                continue
-                # This will catch exceptions from things we ignore
-                # such as dump as swap volumes and skip over them.
-            # add rsync backups
-        newlist = self.__filter_snapshot_list(self.snapscanner.rsynced_backups,
-                                                filesys,
-                                                snap, type)
-        for backup in newlist:
-            self.liststorefs.append([_("Backup"),
-                                     backup.zfs_mountpoint,
-                                     backup.fsname,
-                                     backup.snaplabel,
-                                     backup.creationtime_str,
-                                     backup.creationtime,
-                                     backup])
-
-    def __on_selectbutton_clicked(self, widget):
-        selection = self.snaptreeview.get_selection()
-        selection.select_all()
-        return
-
-    def __on_deselectbutton_clicked(self, widget):
-        selection = self.snaptreeview.get_selection()
-        selection.unselect_all()
-        return
-
-    def __on_deletebutton_clicked(self, widget):
-        self.backuptodelete = []
-        selection = self.snaptreeview.get_selection()
-        selection.selected_foreach(self.__add_selection)
-        total = len(self.backuptodelete)
-        if total <= 0:
-            return
-
-        confirm = self.xml.get_widget("confirmdialog")
-        summary = self.xml.get_widget("summarylabel")
-
-        num_snap = 0
-        num_rsync = 0
-        for item in self.backuptodelete:
-            if isinstance (item, RsyncBackup):
-                num_rsync+=1
-            else:
-                num_snap+=1
-
-        str = ""
-        if num_rsync != 0 :
-            if num_rsync == 1:
-                str = _("1 external backup will be deleted.")
-            else:
-                str = _("%d external backups will be deleted.") % num_rsync
-
-        if num_snap != 0 :
-            if len(str) != 0:
-                str += "\n"
-            if num_snap == 1:
-                str += _("1 snapshot will be deleted.")
-            else:
-                str += _("%d snapshots will be deleted.") % num_snap
-
-        summary.set_text(str)
-        response = confirm.run()
-        if response != 2:
-            return
-        else:
-            glib.idle_add(self.__init_delete)
-        return
-
-    def __init_scan(self):
-        self.snapscanner = ScanSnapshots()
-        self.pulsedialog.show()
-        self.snapscanner.start()
-        glib.timeout_add(100, self.__monitor_scan)
-        return False
-
-    def __init_delete(self):
-        self.snapdeleter = DeleteSnapshots(self.backuptodelete)
-        # If there's more than a few snapshots, pop up
-        # a progress bar.
-        if len(self.backuptodelete) > 3:
-            self.progressbar.set_fraction(0.0)
-            self.progressdialog.show()
-        self.snapdeleter.start()
-        glib.timeout_add(300, self.__monitor_deletion)
-        return False
-
-    def __monitor_scan(self):
-        if self.snapscanner.isAlive() == True:
-            self.xml.get_widget("pulsebar").pulse()
-            return True
-        else:
-            self.pulsedialog.hide()
-            if self.snapscanner.errors:
-                details = ""
-                dialog = gtk.MessageDialog(None,
-                            0,
-                            gtk.MESSAGE_ERROR,
-                            gtk.BUTTONS_CLOSE,
-                            _("Some snapshots could not be read"))
-                dialog.connect("response",
-                            self.__on_errordialog_response)
-                for error in self.snapscanner.errors:
-                    details = details + error
-                dialog.format_secondary_text(details)
-                dialog.show()
-            self.__on_filterentry_changed(None)
-            return False
-
-    def __monitor_deletion(self):
-        if self.snapdeleter.isAlive() == True:
-            self.progressbar.set_fraction(self.snapdeleter.progress)
-            return True
-        else:
-            self.progressdialog.hide()
-            self.progressbar.set_fraction(1.0)
-            self.progressdialog.hide()
-            if self.snapdeleter.errors:
-                details = ""
-                dialog = gtk.MessageDialog(None,
-                            0,
-                            gtk.MESSAGE_ERROR,
-                            gtk.BUTTONS_CLOSE,
-                            _("Some snapshots could not be deleted"))
-                dialog.connect("response",
-                            self.__on_errordialog_response)
-                for error in self.snapdeleter.errors:
-                    details = details + error
-                dialog.format_secondary_text(details)
-                dialog.show()
-            # If we didn't shortcircut straight to the delete confirmation
-            # dialog then the main dialog is visible so we rebuild the list
-            # view.
-            if len(self.shortcircuit) ==  0:
-                self.__refresh_view()
-            else:
-                gtk.main_quit()
-            return False
-
-    def __refresh_view(self):
-        self.liststorefs.clear()
-        glib.idle_add(self.__init_scan)
-        self.backuptodelete = []
-
-    def __add_selection(self, treemodel, path, iter):
-        snapshot = treemodel.get(iter, 6)[0]
-        self.backuptodelete.append(snapshot)
-
-    def __on_confirmcancel_clicked(self, widget):
-        widget.get_toplevel().hide()
-        widget.get_toplevel().response(1)
-
-    def __on_confirmdelete_clicked(self, widget):
-        widget.get_toplevel().hide()
-        widget.get_toplevel().response(2)
-
-    def __on_errordialog_response(self, widget, responseid):
-        widget.hide()
-
-class ScanSnapshots(threading.Thread):
-
-    def __init__(self):
-        threading.Thread.__init__(self)
-        self.errors = []
-        self.datasets = zfs.Datasets()
-        self.snapshots = []
-        self.rsynced_fs = []
-        self.rsynced_backups = []
-
-    def run(self):
-        self.mounts = self.__get_fs_mountpoints()
-        self.rsyncsmf = rsyncsmf.RsyncSMF("%s:rsync" %(plugin.PLUGINBASEFMRI))
-        self.__get_rsync_backups ()
-        self.rescan()
-
-    def __get_rsync_backups (self):
-        # get rsync backup dir
-        self.rsyncsmf = rsyncsmf.RsyncSMF("%s:rsync" %(plugin.PLUGINBASEFMRI))
-        rsyncBaseDir = self.rsyncsmf.get_target_dir()
-        sys,nodeName,rel,ver,arch = os.uname()
-        self.rsyncDir = os.path.join(rsyncBaseDir,
-                                     rsyncsmf.RSYNCDIRPREFIX,
-                                     nodeName)
-        if not os.path.exists(self.rsyncDir):
-            return
-
-        rootBackupDirs = []
-
-        for root, dirs, files in os.walk(self.rsyncDir):
-            if '.time-slider' in dirs:
-                dirs.remove('.time-slider')
-                backupDir = os.path.join(root, rsyncsmf.RSYNCDIRSUFFIX)
-                if os.path.exists(backupDir):
-                    insort(rootBackupDirs, os.path.abspath(backupDir))
-
-        for dirName in rootBackupDirs:
-            os.chdir(dirName)
-            for d in os.listdir(dirName):
-                if os.path.isdir(d) and not os.path.islink(d):
-                    s1 = dirName.split ("%s/" % self.rsyncDir, 1)
-                    s2 = s1[1].split ("/%s" % rsyncsmf.RSYNCDIRSUFFIX, 1)
-                    fs = s2[0]
-
-                    rb = RsyncBackup ("%s/%s" %(dirName, d),
-                                      self.rsyncDir,
-                                      fs,
-                                      d,
-                                      os.stat(d).st_mtime)
-                    self.rsynced_backups.append (rb)
-
-    def __get_fs_mountpoints(self):
-        """Returns a dictionary mapping:
-           {filesystem : mountpoint}"""
-        result = {}
-        for filesys,mountpoint in self.datasets.list_filesystems():
-            result[filesys] = mountpoint
-        return result
-
-    def rescan(self):
-        cloned = self.datasets.list_cloned_snapshots()
-        self.snapshots = []
-        snaplist = self.datasets.list_snapshots()
-        for snapname,snaptime in snaplist:
-            # Filter out snapshots that are the root
-            # of cloned filesystems or volumes
-            try:
-                cloned.index(snapname)
-            except ValueError:
-                snapshot = zfs.Snapshot(snapname, snaptime)
-                self.snapshots.append(snapshot)
-
-class DeleteSnapshots(threading.Thread):
-
-    def __init__(self, snapshots):
-        threading.Thread.__init__(self)
-        self.backuptodelete = snapshots
-        self.started = False
-        self.completed = False
-        self.progress = 0.0
-        self.errors = []
-
-    def run(self):
-        deleted = 0
-        self.started = True
-        total = len(self.backuptodelete)
-        for backup in self.backuptodelete:
-            # The backup could have expired and been automatically
-            # destroyed since the user selected it. Check that it
-            # still exists before attempting to delete it. If it
-            # doesn't exist just silently ignore it.
-            if backup.exists():
-                try:
-                    backup.destroy ()
-                except RuntimeError, inst:
-                    self.errors.append(str(inst))
-            deleted += 1
-            self.progress = deleted / (total * 1.0)
-        self.completed = True
-
-def main(argv):
-    try:
-        opts,args = getopt.getopt(sys.argv[1:], "", [])
-    except getopt.GetoptError:
-        sys.exit(2)
-    rbacp = RBACprofile()
-    if os.geteuid() == 0:
-        if len(args) > 0:
-            manager = DeleteSnapManager(args)
-        else:
-            manager = DeleteSnapManager()
-        gtk.gdk.threads_enter()
-        glib.idle_add(manager.initialise_view)
-        gtk.main()
-        gtk.gdk.threads_leave()
-    elif os.path.exists(argv) and os.path.exists("/usr/bin/gksu"):
-        # Run via gksu, which will prompt for the root password
-        newargs = ["gksu", argv]
-        for arg in args:
-            newargs.append(arg)
-        os.execv("/usr/bin/gksu", newargs);
-        # Shouldn't reach this point
-        sys.exit(1)
-    else:
-        dialog = gtk.MessageDialog(None,
-                                   0,
-                                   gtk.MESSAGE_ERROR,
-                                   gtk.BUTTONS_CLOSE,
-                                   _("Insufficient Priviliges"))
-        dialog.format_secondary_text(_("Snapshot deletion requires "
-                                       "administrative privileges to run. "
-                                       "You have not been assigned the necessary"
-                                       "administrative priviliges."
-                                       "\n\nConsult your system administrator "))
-        dialog.run()
-        print argv + "is not a valid executable path"
-        sys.exit(1)
diff --git a/usr/share/time-slider/lib/time_slider/setupgui.py~ b/usr/share/time-slider/lib/time_slider/setupgui.py~
deleted file mode 100755 (executable)
index b3a37b6..0000000
+++ /dev/null
@@ -1,1322 +0,0 @@
-#!/usr/bin/python2.6
-#
-# CDDL HEADER START
-#
-# The contents of this file are subject to the terms of the
-# Common Development and Distribution License (the "License").
-# You may not use this file except in compliance with the License.
-#
-# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
-# or http://www.opensolaris.org/os/licensing.
-# See the License for the specific language governing permissions
-# and limitations under the License.
-#
-# When distributing Covered Code, include this CDDL HEADER in each
-# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
-# If applicable, add the following below this CDDL HEADER, with the
-# fields enclosed by brackets "[]" replaced with your own identifying
-# information: Portions Copyright [yyyy] [name of copyright owner]
-#
-# CDDL HEADER END
-#
-
-import sys
-import os
-import subprocess
-import threading
-import util
-import smf
-from autosnapsmf import enable_default_schedules, disable_default_schedules
-
-from os.path import abspath, dirname, join, pardir
-sys.path.insert(0, join(dirname(__file__), pardir, "plugin"))
-import plugin
-sys.path.insert(0, join(dirname(__file__), pardir, "plugin", "rsync"))
-import rsyncsmf
-
-try:
-    import pygtk
-    pygtk.require("2.4")
-except:
-    pass
-try:
-    import gtk
-    import gtk.glade
-    gtk.gdk.threads_init()
-except:
-    sys.exit(1)
-
-import glib
-import gobject
-import gio
-import dbus
-import dbus.service
-import dbus.mainloop
-import dbus.mainloop.glib
-import dbussvc
-
-
-# This is the rough guess ratio used for rsync backup device size
-# vs. the total size of the pools it's expected to backup.
-RSYNCTARGETRATIO = 2
-
-# here we define the path constants so that other modules can use it.
-# this allows us to get access to the shared files without having to
-# know the actual location, we just use the location of the current
-# file and use paths relative to that.
-SHARED_FILES = os.path.abspath(os.path.join(os.path.dirname(__file__),
-                               os.path.pardir,
-                               os.path.pardir))
-LOCALE_PATH = os.path.join('/usr', 'share', 'locale')
-RESOURCE_PATH = os.path.join(SHARED_FILES, 'res')
-
-# the name of the gettext domain. because we have our translation files
-# not in a global folder this doesn't really matter, setting it to the
-# application name is a good idea tough.
-GETTEXT_DOMAIN = 'time-slider'
-
-# set up the glade gettext system and locales
-gtk.glade.bindtextdomain(GETTEXT_DOMAIN, LOCALE_PATH)
-gtk.glade.textdomain(GETTEXT_DOMAIN)
-
-import zfs
-from timeslidersmf import TimeSliderSMF
-from rbac import RBACprofile
-
-
-class FilesystemIntention:
-
-    def __init__(self, name, selected, inherited):
-        self.name = name
-        self.selected = selected
-        self.inherited = inherited
-
-    def __str__(self):
-        return_string = "Filesystem name: " + self.name + \
-                "\n\tSelected: " + str(self.selected) + \
-                "\n\tInherited: " + str(self.inherited)
-        return return_string
-
-    def __eq__(self, other):
-        if self.name != other.name:
-            return False
-        if self.inherited and other.inherited:
-            return True
-        elif not self.inherited and other.inherited:
-            return False
-        if (self.selected == other.selected) and \
-           (self.inherited == other.inherited):
-            return True
-        else:
-            return False
-
-class SetupManager:
-
-    def __init__(self, execpath):
-        self._execPath = execpath
-        self._datasets = zfs.Datasets()
-        self._xml = gtk.glade.XML("%s/../../glade/time-slider-setup.glade" \
-                                  % (os.path.dirname(__file__)))
-
-        # Tell dbus to use the gobject mainloop for async ops
-        dbus.mainloop.glib.DBusGMainLoop(set_as_default=True)
-        dbus.mainloop.glib.threads_init()
-
-        # Register a bus name with the system dbus daemon
-        systemBus = dbus.SystemBus()
-        busName = dbus.service.BusName("org.opensolaris.TimeSlider.config",
-                                       systemBus)
-        self._dbus = dbussvc.Config(systemBus,
-                                    '/org/opensolaris/TimeSlider/config')
-        # Used later to trigger a D-Bus notification of select configuration 
-        # changes made
-        self._configNotify = False
-
-        # These variables record the initial UI state which are used
-        # later to compare against the UI state when the OK or Cancel
-        # button is clicked and apply the minimum set of necessary 
-        # configuration changes. Prevents minor changes taking ages
-        # to be applied by the GUI.
-        self._initialEnabledState = None
-        self._initialRsyncState = None
-        self._initialRsyncTargetDir = None
-        self._initialCleanupLevel = None
-        self._initialCustomSelection = False
-        self._initialSnapStateDic = {}
-        self._initialRsyncStateDic = {}
-        self._initialFsIntentDic = {}
-        self._initialRsyncIntentDic = {}
-
-        # Currently selected rsync backup device via the GUI.
-        self._newRsyncTargetDir = None
-        # Used to store GUI filesystem selection state and the
-        # set of intended properties to apply to zfs filesystems.
-        self._snapStateDic = {}
-        self._rsyncStateDic = {}
-        self._fsIntentDic = {}
-        self._rsyncIntentDic = {}
-        # Dictionary that maps device ID numbers to zfs filesystem objects
-        self._fsDevices = {}
-
-        topLevel = self._xml.get_widget("toplevel")
-        self._pulseDialog = self._xml.get_widget("pulsedialog")
-        self._pulseDialog.set_transient_for(topLevel)
-        
-        # gio.VolumeMonitor reference
-        self._vm = gio.volume_monitor_get()
-        self._vm.connect("mount-added", self._mount_added)
-        self._vm.connect("mount-removed" , self._mount_removed)
-
-        self._fsListStore = gtk.ListStore(bool,
-                                         bool,
-                                         str,
-                                         str,
-                                         gobject.TYPE_PYOBJECT)
-        filesystems = self._datasets.list_filesystems()
-        for fsname,fsmountpoint in filesystems:
-            if (fsmountpoint == "legacy"):
-                mountpoint = _("Legacy")
-            else:
-                mountpoint = fsmountpoint
-            fs = zfs.Filesystem(fsname, fsmountpoint)
-            # Note that we don't deal with legacy mountpoints.
-            if fsmountpoint != "legacy" and fs.is_mounted():
-                self._fsDevices[os.stat(fsmountpoint).st_dev] = fs
-            snap = fs.get_auto_snap()
-            rsyncstr = fs.get_user_property(rsyncsmf.RSYNCFSTAG)
-            if rsyncstr == "true":
-                rsync = True
-            else:
-                rsync = False
-            # Rsync is only performed on snapshotted filesystems.
-            # So treat as False if rsync is set to true independently
-            self._fsListStore.append([snap, snap & rsync,
-                                     mountpoint, fs.name, fs])
-            self._initialSnapStateDic[fs.name] = snap
-            self._initialRsyncStateDic[fs.name] = snap & rsync
-        del filesystems
-
-        for fsname in self._initialSnapStateDic:
-                self._refine_filesys_actions(fsname,
-                                              self._initialSnapStateDic,
-                                              self._initialFsIntentDic)
-                self._refine_filesys_actions(fsname,
-                                              self._initialRsyncStateDic,
-                                              self._initialRsyncIntentDic)
-   
-        self._fsTreeView = self._xml.get_widget("fstreeview")
-        self._fsTreeView.set_sensitive(False)
-        self._fsTreeView.set_size_request(10, 200)
-
-        self._fsTreeView.set_model(self._fsListStore)
-
-        cell0 = gtk.CellRendererToggle()
-        cell1 = gtk.CellRendererToggle()
-        cell2 = gtk.CellRendererText()
-        cell3 = gtk.CellRendererText()
-        radioColumn = gtk.TreeViewColumn(_("Select"),
-                                             cell0, active=0)
-        self._fsTreeView.insert_column(radioColumn, 0)
-
-        self._rsyncRadioColumn = gtk.TreeViewColumn(_("Replicate"),
-                                                    cell1, active=1)
-        nameColumn = gtk.TreeViewColumn(_("Mount Point"),
-                                        cell2, text=2)
-        self._fsTreeView.insert_column(nameColumn, 2)
-        mountPointColumn = gtk.TreeViewColumn(_("File System Name"),
-                                              cell3, text=3)
-        self._fsTreeView.insert_column(mountPointColumn, 3)
-        cell0.connect('toggled', self._row_toggled)
-        cell1.connect('toggled', self._rsync_cell_toggled)
-        advancedBox = self._xml.get_widget("advancedbox")
-        advancedBox.connect('unmap', self._advancedbox_unmap)  
-
-        self._rsyncSMF = rsyncsmf.RsyncSMF("%s:rsync" \
-                                          %(plugin.PLUGINBASEFMRI))
-        state = self._rsyncSMF.get_service_state()
-        self._initialRsyncTargetDir = self._rsyncSMF.get_target_dir()
-        # Check for the default, unset value of "" from SMF.
-        if self._initialRsyncTargetDir == '""':
-            self._initialRsyncTargetDir = ''
-        self._newRsyncTargetDir = self._initialRsyncTargetDir
-        self._smfTargetKey = self._rsyncSMF.get_target_key()
-        self._newRsyncTargetSelected = False
-        sys,self._nodeName,rel,ver,arch = os.uname()
-
-        # Model columns:
-        # 0 Themed icon list (python list)
-        # 1 device root
-        # 2 volume name
-        # 3 Is gio.Mount device
-        # 4 Is separator (for comboBox separator rendering)
-        self._rsyncStore = gtk.ListStore(gobject.TYPE_PYOBJECT,
-                                         gobject.TYPE_STRING,
-                                         gobject.TYPE_STRING,
-                                         gobject.TYPE_BOOLEAN,
-                                         gobject.TYPE_BOOLEAN)
-        self._rsyncCombo = self._xml.get_widget("rsyncdevcombo")
-        mounts = self._vm.get_mounts()
-        for mount in mounts:
-            self._mount_added(self._vm, mount)
-        if len(mounts) > 0:
-            # Add a separator
-            self._rsyncStore.append((None, None, None, None, True))
-        del mounts
-
-        if len(self._newRsyncTargetDir) == 0:
-            self._rsyncStore.append((['folder'],
-                                    _("(None)"),
-                                    '',
-                                    False,
-                                    False))
-            # Add a separator
-            self._rsyncStore.append((None, None, None, None, True))
-        self._rsyncStore.append((None, _("Other..."), "Other", False, False))
-        self._iconCell = gtk.CellRendererPixbuf()
-        self._nameCell = gtk.CellRendererText()
-        self._rsyncCombo.clear()
-        self._rsyncCombo.pack_start(self._iconCell, False)
-        self._rsyncCombo.set_cell_data_func(self._iconCell,
-                                            self._icon_cell_render)
-        self._rsyncCombo.pack_end(self._nameCell)
-        self._rsyncCombo.set_attributes(self._nameCell, text=1)
-        self._rsyncCombo.set_row_separator_func(self._row_separator)
-        self._rsyncCombo.set_model(self._rsyncStore)
-        self._rsyncCombo.connect("changed", self._rsync_combo_changed)
-        # Force selection of currently configured device
-        self._rsync_dev_selected(self._newRsyncTargetDir)
-
-        # signal dictionary    
-        dic = {"on_ok_clicked" : self._on_ok_clicked,
-               "on_cancel_clicked" : gtk.main_quit,
-               "on_snapshotmanager_delete_event" : gtk.main_quit,
-               "on_enablebutton_toggled" : self._on_enablebutton_toggled,
-               "on_rsyncbutton_toggled" : self._on_rsyncbutton_toggled,
-               "on_defaultfsradio_toggled" : self._on_defaultfsradio_toggled,
-               "on_selectfsradio_toggled" : self._on_selectfsradio_toggled,
-               "on_deletesnapshots_clicked" : self._on_deletesnapshots_clicked}
-        self._xml.signal_autoconnect(dic)
-
-        if state != "disabled":
-            self._rsyncEnabled = True
-            self._xml.get_widget("rsyncbutton").set_active(True)
-            self._initialRsyncState = True
-        else:
-            self._rsyncEnabled = False
-            self._rsyncCombo.set_sensitive(False)
-            self._initialRsyncState = False
-
-        # Initialise SMF service instance state.
-        try:
-            self._sliderSMF = TimeSliderSMF()
-        except RuntimeError,message:
-            self._xml.get_widget("toplevel").set_sensitive(False)
-            dialog = gtk.MessageDialog(self._xml.get_widget("toplevel"),
-                                       0,
-                                       gtk.MESSAGE_ERROR,
-                                       gtk.BUTTONS_CLOSE,
-                                       _("Snapshot manager service error"))
-            dialog.format_secondary_text(_("The snapshot manager service does "
-                                         "not appear to be installed on this "
-                                         "system."
-                                         "\n\nSee the svcs(1) man page for more "
-                                         "information."
-                                         "\n\nDetails:\n%s")%(message))
-            dialog.set_icon_name("time-slider-setup")
-            dialog.run()
-            sys.exit(1)
-
-        if self._sliderSMF.svcstate == "disabled":
-            self._xml.get_widget("enablebutton").set_active(False)
-            self._initialEnabledState = False
-        elif self._sliderSMF.svcstate == "offline":
-            self._xml.get_widget("toplevel").set_sensitive(False)
-            errors = ''.join("%s\n" % (error) for error in \
-                self._sliderSMF.find_dependency_errors())
-            dialog = gtk.MessageDialog(self._xml.get_widget("toplevel"),
-                                        0,
-                                        gtk.MESSAGE_ERROR,
-                                        gtk.BUTTONS_CLOSE,
-                                        _("Snapshot manager service dependency error"))
-            dialog.format_secondary_text(_("The snapshot manager service has "
-                                            "been placed offline due to a dependency "
-                                            "problem. The following dependency problems "
-                                            "were found:\n\n%s\n\nRun \"svcs -xv\" from "
-                                            "a command prompt for more information about "
-                                            "these dependency problems.") % errors)
-            dialog.set_icon_name("time-slider-setup")
-            dialog.run()
-            sys.exit(1)
-        elif self._sliderSMF.svcstate == "maintenance":
-            self._xml.get_widget("toplevel").set_sensitive(False)
-            dialog = gtk.MessageDialog(self._xml.get_widget("toplevel"),
-                                        0,
-                                        gtk.MESSAGE_ERROR,
-                                        gtk.BUTTONS_CLOSE,
-                                        _("Snapshot manager service error"))
-            dialog.format_secondary_text(_("The snapshot manager service has "
-                                            "encountered a problem and has been "
-                                            "disabled until the problem is fixed."
-                                            "\n\nSee the svcs(1) man page for more "
-                                            "information."))
-            dialog.set_icon_name("time-slider-setup")
-            dialog.run()
-            sys.exit(1)
-        else:
-            # FIXME: Check transitional states 
-            self._xml.get_widget("enablebutton").set_active(True)
-            self._initialEnabledState = True
-
-
-        # Emit a toggled signal so that the initial GUI state is consistent
-        self._xml.get_widget("enablebutton").emit("toggled")
-        # Check the snapshotting policy (UserData (default), or Custom)
-        self._initialCustomSelection = self._sliderSMF.is_custom_selection()
-        if self._initialCustomSelection == True:
-            self._xml.get_widget("selectfsradio").set_active(True)
-            # Show the advanced controls so the user can see the
-            # customised configuration.
-            if self._sliderSMF.svcstate != "disabled":
-                self._xml.get_widget("expander").set_expanded(True)
-        else: # "false" or any other non "true" value
-            self._xml.get_widget("defaultfsradio").set_active(True)
-
-        # Set the cleanup threshhold value
-        spinButton = self._xml.get_widget("capspinbutton")
-        critLevel = self._sliderSMF.get_cleanup_level("critical")
-        warnLevel = self._sliderSMF.get_cleanup_level("warning")
-
-        # Force the warning level to something practical
-        # on the lower end, and make it no greater than the
-        # critical level specified in the SVC instance.
-        spinButton.set_range(70, critLevel)
-        self._initialCleanupLevel = warnLevel
-        if warnLevel > 70:
-            spinButton.set_value(warnLevel)
-        else:
-            spinButton.set_value(70)
-
-    def _icon_cell_render(self, celllayout, cell, model, iter):
-        iconList = self._rsyncStore.get_value(iter, 0)
-        if iconList != None:
-            gicon = gio.ThemedIcon(iconList)
-            cell.set_property("gicon", gicon)
-        else:
-            root = self._rsyncStore.get_value(iter, 2)
-            if root == "Other":
-                cell.set_property("gicon", None)
-
-    def _row_separator(self, model, iter):
-        return model.get_value(iter, 4)
-
-    def _mount_added(self, volume_monitor, mount):
-        icon = mount.get_icon()
-        iconList = icon.get_names()
-        if iconList == None:
-            iconList = ['drive-harddisk', 'drive']
-        root = mount.get_root()
-        path = root.get_path()
-        mountName = mount.get_name()
-        volume = mount.get_volume()
-        if volume == None:
-            volName = mount.get_name()
-            if volName == None:
-                volName = os.path.split(path)[1]
-        else:
-            volName = volume.get_name()
-
-        # Check to see if there is at least one gio.Mount device already
-        # in the ListStore. If not, then we also need to add a separator
-        # row.
-        iter = self._rsyncStore.get_iter_first()
-        if iter and self._rsyncStore.get_value(iter, 3) == False:
-            self._rsyncStore.insert(0, (None, None, None, None, True))
-        
-        self._rsyncStore.insert(0, (iconList, volName, path, True, False))
-        # If this happens to be the already configured backup device
-        # and the user hasn't tried to change device yet, auto select
-        # it.
-        if self._initialRsyncTargetDir == self._newRsyncTargetDir:
-            if self._validate_rsync_target(path) == True:
-                self._rsyncCombo.set_active(0)
-
-    def _mount_removed(self, volume_monitor, mount):
-        root = mount.get_root()
-        path = root.get_path()
-        iter = self._rsyncStore.get_iter_first()
-        mountIter = None
-        numMounts = 0
-        # Search gio.Mount devices
-        while iter != None and \
-            self._rsyncStore.get_value(iter, 3) == True:
-            numMounts += 1
-            compPath = self._rsyncStore.get_value(iter, 2)
-            if compPath == path:
-                mountIter = iter
-                break
-            else:
-                iter = self._rsyncStore.iter_next(iter)
-        if mountIter != None:
-            if numMounts == 1:
-                # Need to remove the separator also since
-                # there will be no more gio.Mount devices
-                # shown in the combo box
-                sepIter = self._rsyncStore.iter_next(mountIter)
-                if self._rsyncStore.get_value(sepIter, 4) == True:
-                    self._rsyncStore.remove(sepIter)                  
-            self._rsyncStore.remove(mountIter)
-            iter = self._rsyncStore.get_iter_first()
-            # Insert a custom folder if none exists already
-            if self._rsyncStore.get_value(iter, 2) == "Other":
-                path = self._initialRsyncTargetDir
-                length = len(path)
-                if length > 1:
-                    name = os.path.split(path)[1]
-                elif length == 1:
-                    name = path
-                else: # Indicates path is unset: ''
-                    name = _("(None)")
-                iter = self._rsyncStore.insert_before(iter,
-                                                      (None,
-                                                       None,
-                                                       None,
-                                                       None,
-                                                       True))
-                iter = self._rsyncStore.insert_before(iter,
-                                                      (['folder'],
-                                                       name,
-                                                       path,
-                                                       False,
-                                                       False))
-            self._rsyncCombo.set_active_iter(iter)
-
-    def _monitor_setup(self, pulseBar):
-        if self._enabler.isAlive() == True:
-            pulseBar.pulse()
-            return True
-        else:
-            gtk.main_quit()   
-
-    def _row_toggled(self, renderer, path):
-        model = self._fsTreeView.get_model()
-        iter = model.get_iter(path)
-        state = renderer.get_active()
-        if state == False:
-            self._fsListStore.set_value(iter, 0, True)
-        else:
-            self._fsListStore.set_value(iter, 0, False)
-            self._fsListStore.set_value(iter, 1, False)
-
-    def _rsync_cell_toggled(self, renderer, path):
-        model = self._fsTreeView.get_model()
-        iter = model.get_iter(path)
-        state = renderer.get_active()
-        rowstate = self._fsListStore.get_value(iter, 0)
-        if rowstate == True:
-            if state == False:
-                self._fsListStore.set_value(iter, 1, True)
-            else:
-                self._fsListStore.set_value(iter, 1, False)
-
-    def _rsync_config_error(self, msg):
-        topLevel = self._xml.get_widget("toplevel")
-        dialog = gtk.MessageDialog(topLevel,
-                                    0,
-                                    gtk.MESSAGE_ERROR,
-                                    gtk.BUTTONS_CLOSE,
-                                    _("Unsuitable Backup Location"))
-        dialog.format_secondary_text(msg)
-        dialog.set_icon_name("time-slider-setup")
-        dialog.run()
-        dialog.hide()
-        return
-
-    def _rsync_dev_selected(self, path):
-        iter = self._rsyncStore.get_iter_first()
-        while iter != None:
-            # Break out when we hit a non gio.Mount device
-            if self._rsyncStore.get_value(iter, 3) == False:
-                break
-            compPath = self._rsyncStore.get_value(iter, 2)
-            if compPath == path:
-                self._rsyncCombo.set_active_iter(iter)
-                self._newRsyncTargetDir = path
-                return
-            else:
-                iter = self._rsyncStore.iter_next(iter)
-
-        # Not one of the shortcut RMM devices, so it's
-        # some other path on the filesystem.
-        # iter may be pointing at a separator. Increment
-        # to next row iter if so.
-        if self._rsyncStore.get_value(iter, 4) == True:
-            iter = self._rsyncStore.iter_next(iter)
-
-        if iter != None:
-            if len(path) > 1:
-                name = os.path.split(path)[1]
-            elif len(path) == 1:
-                name = path
-            else: # Indicates path is unset: ''
-                name = _("(None)")
-            # Could be either the custom folder selection
-            # row or the  "Other" row if the custom row
-            # was not created. If "Other" then create the
-            # custom row and separator now at this position
-            if self._rsyncStore.get_value(iter, 2) == "Other":
-                iter = self._rsyncStore.insert_before(iter,
-                                                      (None,
-                                                       None,
-                                                       None,
-                                                       None,
-                                                       True))
-                iter = self._rsyncStore.insert_before(iter,
-                                                      (['folder'],
-                                                       name,
-                                                       path,
-                                                       False,
-                                                       False))
-            else:
-                self._rsyncStore.set(iter,
-                                     1, name,
-                                     2, path)
-            self._rsyncCombo.set_active_iter(iter)
-            self._newRsyncTargetDir = path
-
-    def _rsync_combo_changed(self, combobox):
-        newIter = combobox.get_active_iter()
-        if newIter != None:
-            root = self._rsyncStore.get_value(newIter, 2)
-            if root != "Other":
-                self._newRsyncTargetDir = root
-            else:
-                msg = _("Select A Back Up Device")
-                fileDialog = \
-                    gtk.FileChooserDialog(
-                        msg,
-                        self._xml.get_widget("toplevel"),
-                        gtk.FILE_CHOOSER_ACTION_SELECT_FOLDER,
-                        (gtk.STOCK_CANCEL,gtk.RESPONSE_CANCEL,
-                        gtk.STOCK_OK,gtk.RESPONSE_OK),
-                        None)
-                self._rsyncCombo.set_sensitive(False)
-                response = fileDialog.run()
-                fileDialog.hide()
-                if response == gtk.RESPONSE_OK:
-                    gFile = fileDialog.get_file()
-                    self._rsync_dev_selected(gFile.get_path())
-                else:
-                    self._rsync_dev_selected(self._newRsyncTargetDir)
-                self._rsyncCombo.set_sensitive(True)
-
-    def _rsync_size_warning(self, zpools, zpoolSize,
-                             rsyncTarget, targetSize):
-        # Using decimal "GB" instead of binary "GiB"
-        KB = 1000
-        MB = 1000 * KB
-        GB = 1000 * MB
-        TB = 1000 * GB
-
-        suggestedSize = RSYNCTARGETRATIO * zpoolSize
-        if suggestedSize > TB:
-            sizeStr = "%.1f TB" % round(suggestedSize / float(TB), 1)
-        elif suggestedSize > GB:
-            sizeStr = "%.1f GB" % round(suggestedSize / float(GB), 1)
-        else:
-            sizeStr = "%.1f MB" % round(suggestedSize / float(MB), 1)
-
-        if targetSize > TB:
-            targetStr = "%.1f TB" % round(targetSize / float(TB), 1)
-        elif targetSize > GB:
-            targetStr = "%.1f GB" % round(targetSize / float(GB), 1)
-        else:
-            targetStr = "%.1f MB" % round(targetSize / float(MB), 1)
-
-
-        msg = _("Time Slider suggests a device with a capacity of at "
-                "least <b>%s</b>.\n"
-                "The device: \'<b>%s</b>\'\nonly has <b>%s</b>\n"
-                "Do you want to use it anyway?") \
-                % (sizeStr, rsyncTarget, targetStr)
-
-        topLevel = self._xml.get_widget("toplevel")
-        dialog = gtk.MessageDialog(topLevel,
-                                   0,
-                                   gtk.MESSAGE_QUESTION,
-                                   gtk.BUTTONS_YES_NO,
-                                   _("Time Slider"))
-        dialog.set_default_response(gtk.RESPONSE_NO)
-        dialog.set_transient_for(topLevel)
-        dialog.set_markup(msg)
-        dialog.set_icon_name("time-slider-setup")
-
-        response = dialog.run()
-        dialog.hide()
-        if response == gtk.RESPONSE_YES:
-            return True
-        else:
-            return False
-
-    def _check_rsync_config(self):
-        """
-           Checks rsync configuration including, filesystem selection,
-           target directory validation and capacity checks.
-           Returns True if everything is OK, otherwise False.
-           Pops up blocking error dialogs to notify users of error
-           conditions before returning.
-        """
-        def _get_mount_point(path):
-            if os.path.ismount(path):
-                return path
-            else:
-                return _get_mount_point(abspath(join(path, pardir)))
-
-        if self._rsyncEnabled != True:
-            return True
-
-        if len(self._newRsyncTargetDir) == 0:
-            msg = _("No backup device was selected.\n"
-                    "Please select an empty device.")
-            self._rsync_config_error(msg)
-            return False
-        # There's little that can be done if the device is from a
-        # previous configuration and currently offline. So just 
-        # treat it as being OK based on the assumption that it was
-        # previously deemed to be OK.
-        if self._initialRsyncTargetDir == self._newRsyncTargetDir and \
-           not os.path.exists(self._newRsyncTargetDir):
-            return True
-        # Perform the required validation checks on the
-        # target directory.
-        newTargetDir = self._newRsyncTargetDir
-
-        # We require the whole device. So find the enclosing
-        # mount point and inspect from there.
-        targetMountPoint = abspath(_get_mount_point(newTargetDir))
-
-        # Check that it's writable.
-        f = None
-        testFile = os.path.join(targetMountPoint, ".ts-test")
-        try:
-            f = open(testFile, 'w')
-        except (OSError, IOError):
-            msg = _("\'%s\'\n"
-                    "is not writable. The backup device must "
-                    "be writable by the system administrator." 
-                    "\n\nPlease use a different device.") \
-                    % (targetMountPoint)
-            self._rsync_config_error(msg)
-            return False
-        f.close()
-
-        # Try to create a symlink. Rsync requires this to
-        # do incremental backups and to ensure it's posix like
-        # enough to correctly set file ownerships and perms.
-        os.chdir(targetMountPoint)
-        try:
-            os.link(testFile, ".ts-test-link")
-        except OSError:
-            msg = _("\'%s\'\n"
-                    "contains an incompatible file system. " 
-                    "The selected device must have a Unix "
-                    "style file system that supports file "
-                    "linking, such as UFS"
-                    "\n\nPlease use a different device.") \
-                    % (targetMountPoint)
-            self._rsync_config_error(msg)
-            return False
-        finally:
-            os.unlink(testFile)
-        os.unlink(".ts-test-link")
-
-        # Check that selected directory is either empty
-        # or already preconfigured as a backup target
-        sys,nodeName,rel,ver,arch = os.uname()
-        basePath = os.path.join(targetMountPoint,
-                                rsyncsmf.RSYNCDIRPREFIX)
-        nodePath = os.path.join(basePath,
-                                nodeName)
-        configPath = os.path.join(basePath,
-                                    rsyncsmf.RSYNCCONFIGFILE)
-        self._newRsyncTargetSelected = True
-        targetDirKey = None
-
-        contents = os.listdir(targetMountPoint)
-        os.chdir(targetMountPoint)
-
-        # The only other exception to an empty directory is
-        # "lost+found".
-        for item in contents:
-            if (item != rsyncsmf.RSYNCDIRPREFIX and \
-                item != "lost+found") or \
-               not os.path.isdir(item) or \
-               os.path.islink(item):
-                msg = _("\'%s\'\n is not an empty device.\n\n"
-                        "Please select an empty device.") \
-                        % (newTargetDir)
-                self._rsync_config_error(msg)
-                return False
-
-        # Validate existing directory structure
-        if os.path.exists(basePath):
-            # We only accept a pre-existing directory if
-            # 1. It has a config key that matches that stored by
-            #    the rsync plugin's SMF configuration
-            # 2. It has a single subfolder that matches the nodename
-            #    of this system,
-
-            # Check for previous config key
-            if os.path.exists(configPath):
-                f = open(configPath, 'r')
-                for line in f.readlines():
-                    key, val = line.strip().split('=')
-                    if key.strip() == "target_key":
-                        targetDirKey = val.strip()
-                        break
-
-            # Examine anything else in the directory
-            self._targetSelectionError = None
-            dirList = [d for d in os.listdir(basePath) if
-                        d != '.rsync-config']
-            os.chdir(basePath)
-            if len(dirList) > 0:
-                msg = _("\'%s\'\n is not an empty device.\n\n"
-                        "Please select an empty device.") \
-                        % (newTargetDir)
-                # No config key or > 1 directory:
-                # User specified a non empty directory.
-                if targetDirKey == None or len(dirList) > 1:
-                    self._rsync_config_error(msg)
-                    return False
-                # Make sure the single item is not a file or symlink.
-                elif os.path.islink(dirList[0]) or \
-                        os.path.isfile(dirList[0]):
-                    self._rsync_config_error(msg)
-                    return False
-                else:
-                    # Has 1 other item and a config key. Other
-                    # item must be a directory and must match the
-                    # system nodename and SMF's key value respectively
-                    # respectively
-                    if dirList[0] != nodeName and \
-                        targetDirKey != self._smfTargetKey:
-                        msg = _("\'%s\'\n"
-                                "is a Time Slider external backup device "
-                                "that is already in use by another system. "
-                                "Backup devices may not be shared between "
-                                "systems." 
-                                "\n\nPlease use a different device.") \
-                                % (newTargetDir)
-                        self._rsync_config_error(msg)                                
-                        return False
-                    else:
-                        if dirList[0] == nodeName and \
-                           targetDirKey != self._smfTargetKey:
-                            # Looks like a device that we previously used,
-                            # but discontinued using in favour of some other
-                            # device.
-                            msg = _("\'<b>%s</b>\' appears to be a a device "
-                                    "previously configured for use by this "
-                                    "system.\n\nDo you want resume use of "
-                                    "this device for backups?") \
-                                    % (newTargetDir)
-
-                            topLevel = self._xml.get_widget("toplevel")
-                            dialog = gtk.MessageDialog(topLevel,
-                                                       0,
-                                                       gtk.MESSAGE_QUESTION,
-                                                       gtk.BUTTONS_YES_NO,
-                                                       _("Time Slider"))
-                            dialog.set_default_response(gtk.RESPONSE_NO)
-                            dialog.set_transient_for(topLevel)
-                            dialog.set_markup(msg)
-                            dialog.set_icon_name("time-slider-setup")
-
-                            response = dialog.run()
-                            dialog.hide()
-                            if response == gtk.RESPONSE_NO:
-                                return False
-                        else:
-                            # Appears to be our own pre-configured directory.
-                            self._newRsyncTargetSelected = False
-
-        # Compare device ID against selected ZFS filesystems
-        # and their enclosing Zpools. The aim is to avoid
-        # a vicous circle caused by backing up snapshots onto
-        # the same pool the snapshots originate from
-        targetDev = os.stat(newTargetDir).st_dev
-        try:
-            fs = self._fsDevices[targetDev]
-            
-            # See if the filesystem itself is selected
-            # and/or any other fileystem on the pool is 
-            # selected.
-            fsEnabled = self._snapStateDic[fs.name]
-            if fsEnabled == True:
-                # Definitely can't use this since it's a
-                # snapshotted filesystem.
-                msg = _("\'%s\'\n"
-                        "belongs to the ZFS filesystem \'%s\' "
-                        "which is already selected for "
-                        "regular ZFS snaphots." 
-                        "\n\nPlease select a drive "
-                        "not already in use by "
-                        "Time Slider") \
-                        % (newTargetDir, fs.name)
-                self._rsync_config_error(msg)
-                return False
-            else:
-                # See if there is anything else on the pool being
-                # snapshotted
-                poolName = fs.name.split("/", 1)[0]
-                for name,mount in self._datasets.list_filesystems():
-                    if name.find(poolName) == 0:
-                        try:
-                            otherEnabled = self._snapStateDic[name]
-                            radioBtn = self._xml.get_widget("defaultfsradio")
-                            snapAll = radioBtn.get_active()
-                            if snapAll or otherEnabled:
-                                msg = _("\'%s\'\n"
-                                        "belongs to the ZFS pool \'%s\' "
-                                        "which is already being used "
-                                        "to store ZFS snaphots." 
-                                        "\n\nPlease select a drive "
-                                        "not already in use by "
-                                        "Time Slider") \
-                                        % (newTargetDir, poolName)
-                                self._rsync_config_error(msg)
-                                return False
-                        except KeyError:
-                            pass               
-        except KeyError:
-            # No match found - good.
-            pass
-
-
-        # Figure out if there's a reasonable amount of free space to
-        # store backups. This is a vague guess at best.
-        allPools = zfs.list_zpools()
-        snapPools = []
-        # FIXME -  this is for custom selection. There is a short
-        # circuit case for default (All) configuration. Don't forget
-        # to implement this short circuit.
-        for poolName in allPools:
-            try:
-                snapPools.index(poolName)
-            except ValueError:
-                pool = zfs.ZPool(poolName)
-                # FIXME - we should include volumes here but they
-                # can only be set from the command line, not via
-                # the GUI, so not crucial.
-                for fsName,mount in pool.list_filesystems():
-                    # Don't try to catch exception. The filesystems
-                    # are already populated in self._snapStateDic
-                    enabled = self._snapStateDic[fsName]
-                    if enabled == True:
-                        snapPools.append(poolName)
-                        break
-
-        sumPoolSize = 0
-        for poolName in snapPools:
-            pool = zfs.ZPool(poolName)
-            # Rough calcualation, but precise enough for
-            # estimation purposes
-            sumPoolSize += pool.get_used_size()
-            sumPoolSize += pool.get_available_size()
-
-
-        # Compare with available space on rsync target dir
-        targetAvail = util.get_available_size(targetMountPoint)
-        targetUsed = util.get_used_size(targetMountPoint)
-        targetSum = targetAvail + targetUsed
-
-        # Recommended Minimum:
-        # At least double the combined size of all pools with
-        # fileystems selected for backup. Variables include,
-        # frequency of data changes, how much efficiency rsync
-        # sacrifices compared to ZFS' block level diff tracking,
-        # whether compression and/or deduplication are enabled 
-        # on the source pool/fileystem.
-        # We don't try to make calculations based on individual
-        # filesystem selection as there are too many unpredictable
-        # variables to make an estimation of any practical use.
-        # Let the user figure that out for themselves.
-
-        # The most consistent measurement is to use the sum of
-        # available and used size on the target fileystem. We
-        # assume based on previous checks that the target device
-        # is only being used for rsync backups and therefore the
-        # used value consists of existing backups and is. Available
-        # space can be reduced for various reasons including the used
-        # value increasing or for nfs mounted zfs fileystems, other
-        # zfs filesystems on the containing pool using up more space.
-        
-
-        targetPoolRatio = targetSum/float(sumPoolSize)
-        if (targetPoolRatio < RSYNCTARGETRATIO):
-            response = self._rsync_size_warning(snapPools,
-                                                 sumPoolSize,
-                                                 targetMountPoint,
-                                                 targetSum)
-            if response == False:
-                return False
-
-        self._newRsyncTargetDir = targetMountPoint
-        return True
-
-    def _on_ok_clicked(self, widget):
-        # Make sure the dictionaries are empty.
-        self._fsIntentDic = {}
-        self._snapStateDic = {}
-        self._rsyncStateDic = {}
-        enabled = self._xml.get_widget("enablebutton").get_active()
-        self._rsyncEnabled = self._xml.get_widget("rsyncbutton").get_active()
-        if enabled == False:
-            if self._rsyncEnabled == False and \
-               self._initialRsyncState == True:
-                self._rsyncSMF.disable_service()
-            if self._initialEnabledState == True:
-                self._sliderSMF.disable_service()
-            # Ignore other changes to the snapshot/rsync configuration
-            # of filesystems. Just broadcast the change and exit.
-            self._configNotify = True
-            self.broadcast_changes()
-            gtk.main_quit()
-        else:
-            model = self._fsTreeView.get_model()
-            snapalldata = self._xml.get_widget("defaultfsradio").get_active()
-                
-            if snapalldata == True:
-                model.foreach(self._set_fs_selection_state, True)
-                if self._rsyncEnabled == True:
-                    model.foreach(self._set_rsync_selection_state, True)
-            else:
-                model.foreach(self._get_fs_selection_state)
-                model.foreach(self._get_rsync_selection_state)
-            for fsname in self._snapStateDic:
-                self._refine_filesys_actions(fsname,
-                                              self._snapStateDic,
-                                              self._fsIntentDic)
-                if self._rsyncEnabled == True:
-                    self._refine_filesys_actions(fsname,
-                                                  self._rsyncStateDic,
-                                                  self._rsyncIntentDic)
-            if self._rsyncEnabled and \
-               not self._check_rsync_config():
-                    return
-
-            self._pulseDialog.show()
-            self._enabler = EnableService(self)
-            self._enabler.start()
-            glib.timeout_add(100,
-                             self._monitor_setup,
-                             self._xml.get_widget("pulsebar"))
-
-    def _on_enablebutton_toggled(self, widget):
-        expander = self._xml.get_widget("expander")
-        enabled = widget.get_active()
-        self._xml.get_widget("filesysframe").set_sensitive(enabled)
-        expander.set_sensitive(enabled)
-        if (enabled == False):
-            expander.set_expanded(False)
-
-    def _on_rsyncbutton_toggled(self, widget):
-        self._rsyncEnabled = widget.get_active()
-        if self._rsyncEnabled == True:
-            self._fsTreeView.insert_column(self._rsyncRadioColumn, 1)
-            self._rsyncCombo.set_sensitive(True)
-        else:
-            self._fsTreeView.remove_column(self._rsyncRadioColumn)
-            self._rsyncCombo.set_sensitive(False)
-
-    def _on_defaultfsradio_toggled(self, widget):
-        if widget.get_active() == True:
-            self._xml.get_widget("fstreeview").set_sensitive(False)
-
-    def _on_selectfsradio_toggled(self, widget):
-       if widget.get_active() == True:
-            self._xml.get_widget("fstreeview").set_sensitive(True)
-
-    def _advancedbox_unmap(self, widget):
-        # Auto shrink the window by subtracting the frame's height
-        # requistion from the window's height requisition
-        myrequest = widget.size_request()
-        toplevel = self._xml.get_widget("toplevel")
-        toprequest = toplevel.size_request()
-        toplevel.resize(toprequest[0], toprequest[1] - myrequest[1])
-
-    def _get_fs_selection_state(self, model, path, iter):
-        fsname = self._fsListStore.get_value(iter, 3)    
-        enabled = self._fsListStore.get_value(iter, 0)
-        self._snapStateDic[fsname] = enabled
-
-    def _get_rsync_selection_state(self, model, path, iter):
-        fsname = self._fsListStore.get_value(iter, 3)
-        enabled = self._fsListStore.get_value(iter, 1)
-        self._rsyncStateDic[fsname] = enabled
-
-    def _set_fs_selection_state(self, model, path, iter, selected):
-        fsname = self._fsListStore.get_value(iter, 3)
-        self._snapStateDic[fsname] = selected
-
-    def _set_rsync_selection_state(self, model, path, iter, selected):
-        fsname = self._fsListStore.get_value(iter, 3)
-        self._rsyncStateDic[fsname] = selected
-
-    def _refine_filesys_actions(self, fsname, inputdic, actions):
-        selected = inputdic[fsname]
-        try:
-            fstag = actions[fsname]
-            # Found so we can skip over.
-        except KeyError:
-            # Need to check parent value to see if
-            # we should set explicitly or just inherit.
-            path = fsname.rsplit("/", 1)
-            parentName = path[0]
-            if parentName == fsname:
-                # Means this filesystem is the root of the pool
-                # so we need to set it explicitly.
-                actions[fsname] = \
-                    FilesystemIntention(fsname, selected, False)
-            else:
-                parentIntent = None
-                inherit = False
-                # Check if parent is already set and if so whether to
-                # inherit or override with a locally set property value.
-                try:
-                    # Parent has already been registered
-                    parentIntent = actions[parentName]
-                except:
-                    # Parent not yet set, so do that recursively to figure
-                    # out if we need to inherit or set a local property on
-                    # this child filesystem.
-                    self._refine_filesys_actions(parentName,
-                                                  inputdic,
-                                                  actions)
-                    parentIntent = actions[parentName]
-                if parentIntent.selected == selected:
-                    inherit = True
-                actions[fsname] = \
-                    FilesystemIntention(fsname, selected, inherit)
-
-    def _validate_rsync_target(self, path):
-        """
-            Tests path to see if it is the pre-configured
-            rsync backup device path.
-            Returns True on success, otherwise False
-        """
-        # FIXME - this is duplicate in applet.py and rsync-backup.py
-        # It should be moved into a shared module
-        if not os.path.exists(path):
-            return False
-        testDir = os.path.join(path,
-                                rsyncsmf.RSYNCDIRPREFIX,
-                                self._nodeName)
-        testKeyFile = os.path.join(path,
-                                    rsyncsmf.RSYNCDIRPREFIX,
-                                    rsyncsmf.RSYNCCONFIGFILE)
-        if os.path.exists(testDir) and \
-            os.path.exists(testKeyFile):
-            testKeyVal = None
-            f = open(testKeyFile, 'r')
-            for line in f.readlines():
-                key, val = line.strip().split('=')
-                if key.strip() == "target_key":
-                    targetKey = val.strip()
-                    break
-            f.close()
-            if targetKey == self._smfTargetKey:
-                return True
-        return False
-
-
-    def commit_filesystem_selection(self):
-        """
-        Commits the intended filesystem selection actions based on the
-        user's UI configuration to disk. Compares with initial startup
-        configuration and applies the minimum set of necessary changes.
-        """
-        for fsname,fsmountpoint in self._datasets.list_filesystems():
-            fs = zfs.Filesystem(fsname, fsmountpoint)
-            try:
-                initialIntent = self._initialFsIntentDic[fsname]
-                intent = self._fsIntentDic[fsname]
-                if intent == initialIntent:
-                    continue
-                fs.set_auto_snap(intent.selected, intent.inherited)
-
-            except KeyError:
-                pass
-
-    def commit_rsync_selection(self):
-        """
-        Commits the intended filesystem selection actions based on the
-        user's UI configuration to disk. Compares with initial startup
-        configuration and applies the minimum set of necessary changes.
-        """
-        for fsname,fsmountpoint in self._datasets.list_filesystems():
-            fs = zfs.Filesystem(fsname, fsmountpoint)
-            try:
-                initialIntent = self._initialRsyncIntentDic[fsname]
-                intent = self._rsyncIntentDic[fsname]
-                if intent == initialIntent:
-                    continue
-                if intent.inherited == True and \
-                    initialIntent.inherited == False:
-                    fs.unset_user_property(rsyncsmf.RSYNCFSTAG)
-                else:
-                    if intent.selected == True:
-                        value = "true"
-                    else:
-                        value = "false"
-                    fs.set_user_property(rsyncsmf.RSYNCFSTAG,
-                                         value)
-            except KeyError:
-                pass
-
-    def setup_rsync_config(self):
-        if self._rsyncEnabled == True:
-            if self._newRsyncTargetSelected == True:
-                sys,nodeName,rel,ver,arch = os.uname()
-                basePath = os.path.join(self._newRsyncTargetDir,
-                                        rsyncsmf.RSYNCDIRPREFIX,)
-                nodePath = os.path.join(basePath,
-                                        nodeName)
-                configPath = os.path.join(basePath,
-                                          rsyncsmf.RSYNCCONFIGFILE)
-                newKey = generate_random_key()
-                try:
-                    origmask = os.umask(0222)
-                    if not os.path.exists(nodePath):
-                        os.makedirs(nodePath, 0755)
-                    f = open(configPath, 'w')
-                    f.write("target_key=%s\n" % (newKey))
-                    f.close()
-                    os.umask(origmask)
-                except OSError as e:
-                    self._pulseDialog.hide()
-                    sys.stderr.write("Error configuring external " \
-                                     "backup device:\n" \
-                                     "%s\n\nReason:\n %s") \
-                                     % (self._newRsyncTargetDir, str(e))
-                    sys.exit(-1)
-                self._rsyncSMF.set_target_dir(self._newRsyncTargetDir)
-                self._rsyncSMF.set_target_key(newKey)
-                # Applet monitors rsyncTargetDir so make sure to notify it.
-                self._configNotify = True
-        return
-
-    def setup_services(self):
-        # Take care of the rsync plugin service first since time-slider
-        # will query it.
-        # Changes to rsync or time-slider SMF service State should be
-        # broadcast to let notification applet refresh.
-        if self._rsyncEnabled == True and \
-            self._initialRsyncState == False:
-            self._rsyncSMF.enable_service()
-            self._configNotify = True
-        elif self._rsyncEnabled == False and \
-            self._initialRsyncState == True:
-            self._rsyncSMF.disable_service()
-            self._configNotify = True
-        customSelection = self._xml.get_widget("selectfsradio").get_active()
-        if customSelection != self._initialCustomSelection:
-            self._sliderSMF.set_custom_selection(customSelection)
-        if self._initialEnabledState == False:
-            enable_default_schedules()
-            self._sliderSMF.enable_service()
-            self._configNotify = True
-
-    def set_cleanup_level(self):
-        """
-        Wrapper function to set the warning level cleanup threshold
-        value as a percentage of pool capacity.
-        """
-        level = self._xml.get_widget("capspinbutton").get_value_as_int()
-        if level != self._initialCleanupLevel:
-            self._sliderSMF.set_cleanup_level("warning", level)
-
-    def broadcast_changes(self):
-        """
-        Blunt instrument to notify D-Bus listeners such as notification
-        applet to rescan service configuration
-        """
-        if self._configNotify == False:
-            return
-        self._dbus.config_changed()
-
-    def _on_deletesnapshots_clicked(self, widget):
-        cmdpath = os.path.join(os.path.dirname(self._execPath), \
-                               "../lib/time-slider-delete")
-        p = subprocess.Popen(cmdpath, close_fds=True)
-
-
-class EnableService(threading.Thread):
-
-    def __init__(self, setupManager):
-        threading.Thread.__init__(self)
-        self._setupManager = setupManager
-
-    def run(self):
-        try:
-            # Set the service state last so that the ZFS filesystems
-            # are correctly tagged before the snapshot scripts check them
-            self._setupManager.commit_filesystem_selection()
-            self._setupManager.commit_rsync_selection()
-            self._setupManager.set_cleanup_level()
-            self._setupManager.setup_rsync_config()
-            self._setupManager.setup_services()
-            self._setupManager.broadcast_changes()
-        except RuntimeError, message:
-            sys.stderr.write(str(message))
-
-def generate_random_key(length=32):
-    """
-    Returns a 'length' byte character composed of random letters and
-    unsigned single digit integers. Used to create a random
-    signature key to identify pre-configured backup directories
-    for the rsync plugin
-    """
-    from string import letters, digits
-    from random import choice
-    return ''.join([choice(letters + digits) \
-              for i in range(length)])
-
-def main(argv):
-    rbacp = RBACprofile()
-    # The setup GUI needs to be run as root in order to ensure
-    # that the rsync backup target directory is accessible by
-    # root and to perform validation checks on it.
-    # This GUI can be launched with an euid of root in one of
-    # the following 3 ways;
-    # 0. Run by the superuser (root)
-    # 1. Run via gksu to allow a non priviliged user to authenticate
-    #    as the superuser (root)
-
-    if os.geteuid() == 0:
-        manager = SetupManager(argv)
-        gtk.gdk.threads_enter()
-        gtk.main()
-        gtk.gdk.threads_leave()
-    elif os.path.exists(argv) and os.path.exists("/usr/bin/gksu"):
-        # Run via gksu, which will prompt for the root password
-        os.unsetenv("DBUS_SESSION_BUS_ADDRESS")
-        os.execl("/usr/bin/gksu", "gksu", argv)
-        # Shouldn't reach this point
-        sys.exit(1)
-    else:
-        dialog = gtk.MessageDialog(None,
-                                   0,
-                                   gtk.MESSAGE_ERROR,
-                                   gtk.BUTTONS_CLOSE,
-                                   _("Insufficient Priviliges"))
-        dialog.format_secondary_text(_("The snapshot manager service requires "
-                                       "administrative privileges to run. "
-                                       "You have not been assigned the necessary"
-                                       "administrative priviliges."
-                                       "\n\nConsult your system administrator "))
-        dialog.set_icon_name("time-slider-setup")
-        dialog.run()
-        sys.exit(1)
-
diff --git a/usr/share/time-slider/lib/time_slider/snapnowui.py~ b/usr/share/time-slider/lib/time_slider/snapnowui.py~
deleted file mode 100755 (executable)
index 9e6c7c4..0000000
+++ /dev/null
@@ -1,224 +0,0 @@
-#!/usr/bin/python2.6
-#
-# CDDL HEADER START
-#
-# The contents of this file are subject to the terms of the
-# Common Development and Distribution License (the "License").
-# You may not use this file except in compliance with the License.
-#
-# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
-# or http://www.opensolaris.org/os/licensing.
-# See the License for the specific language governing permissions
-# and limitations under the License.
-#
-# When distributing Covered Code, include this CDDL HEADER in each
-# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
-# If applicable, add the following below this CDDL HEADER, with the
-# fields enclosed by brackets "[]" replaced with your own identifying
-# information: Portions Copyright [yyyy] [name of copyright owner]
-#
-# CDDL HEADER END
-#
-
-import sys
-import os
-import datetime
-import getopt
-import string
-
-try:
-    import pygtk
-    pygtk.require("2.4")
-except:
-    pass
-try:
-    import gtk
-    import gtk.glade
-    gtk.gdk.threads_init()
-except:
-    sys.exit(1)
-try:
-    import glib
-    import gobject
-except:
-    sys.exit(1)
-
-# here we define the path constants so that other modules can use it.
-# this allows us to get access to the shared files without having to
-# know the actual location, we just use the location of the current
-# file and use paths relative to that.
-SHARED_FILES = os.path.abspath(os.path.join(os.path.dirname(__file__),
-                               os.path.pardir,
-                               os.path.pardir))
-LOCALE_PATH = os.path.join('/usr', 'share', 'locale')
-RESOURCE_PATH = os.path.join(SHARED_FILES, 'res')
-
-# the name of the gettext domain. because we have our translation files
-# not in a global folder this doesn't really matter, setting it to the
-# application name is a good idea tough.
-GETTEXT_DOMAIN = 'time-slider'
-
-# set up the glade gettext system and locales
-gtk.glade.bindtextdomain(GETTEXT_DOMAIN, LOCALE_PATH)
-gtk.glade.textdomain(GETTEXT_DOMAIN)
-
-import zfs
-from rbac import RBACprofile
-
-class SnapshotNowDialog:
-
-    def __init__(self, dir_path, zfs_fs):
-        self.dir_path = dir_path
-        self.zfs_fs = zfs_fs
-        self.xml = gtk.glade.XML("%s/../../glade/time-slider-snapshot.glade" \
-                                  % (os.path.dirname(__file__)))
-        self.dialog = self.xml.get_widget("dialog")
-        self.dir_label = self.xml.get_widget("dir_label")
-        self.snap_name_entry = self.xml.get_widget("snapshot_name_entry")
-       # signal dictionary     
-        dic = {"on_closebutton_clicked" : gtk.main_quit,
-               "on_window_delete_event" : gtk.main_quit,
-               "on_cancel_clicked" : gtk.main_quit,
-               "on_ok_clicked" : self.__on_ok_clicked}
-        self.xml.signal_autoconnect(dic)
-
-       self.snap_name_entry.connect("activate", self.__on_entry_activate, 0)
-
-       self.dir_label.set_text(self.dir_path)
-       self.snap_name_entry.set_text("my-snapshot-%s" % datetime.datetime.now().strftime("%Y-%m-%d_%Hh%M:%S"))
-
-       self.dialog.show ()
-
-    def validate_name (self, name, showErrorDialog=False):
-       #check name validity
-       # from http://src.opensolaris.org/source/xref/onnv/onnv-gate/usr/src/common/zfs/zfs_namecheck.c#dataset_namecheck
-       # http://src.opensolaris.org/source/xref/onnv/onnv-gate/usr/src/common/zfs/zfs_namecheck.c#valid_char
-
-       invalid = False
-       _validchars = string.ascii_letters + string.digits + \
-                     "-_.:"
-       _allchars = string.maketrans("", "")
-       _invalidchars = _allchars.translate(_allchars, _validchars)
-  
-       valid_name = ""
-
-       for c in name:
-         if c not in _invalidchars:
-           valid_name = valid_name + c
-         else:
-           invalid = True
-
-       if invalid and showErrorDialog:
-         dialog = gtk.MessageDialog(None,
-                                    0,
-                                    gtk.MESSAGE_ERROR,
-                                    gtk.BUTTONS_CLOSE,
-                                    _("Invalid characters in snapshot name"))
-         dialog.set_title (_("Error"))
-         dialog.format_secondary_text(_("Allowed characters for snapshot names are :\n"
-                                        "[a-z][A-Z][0-9][-_.:\n"
-                                        "All invalid characters will be removed\n"))
-         dialog.run ()                                  
-         dialog.destroy ()
-       return valid_name
-       
-
-    def __on_entry_activate (self, widget, none):
-       self.snap_name_entry.set_text (self.validate_name (self.snap_name_entry.get_text(), True))
-       return
-       
-
-    def __on_ok_clicked (self, widget):
-      name = self.snap_name_entry.get_text()
-      valid_name = self.validate_name (name, True)
-      if name == valid_name:
-       cmd = "pfexec /usr/sbin/zfs snapshot %s@%s" % (self.zfs_fs, self.validate_name (self.snap_name_entry.get_text()))
-       fin,fout,ferr = os.popen3(cmd)
-        # Check for any error output generated and
-        # return it to caller if so.
-        error = ferr.read()
-       self.dialog.hide ()
-        if len(error) > 0:
-         dialog = gtk.MessageDialog(None,
-                                    0,
-                                    gtk.MESSAGE_ERROR,
-                                    gtk.BUTTONS_CLOSE,
-                                    _("Error occured while creating the snapshot"))
-         dialog.set_title (_("Error"))
-         dialog.format_secondary_text(error)
-         dialog.run ()
-        else:
-         dialog = gtk.MessageDialog(None,
-                                    0,
-                                    gtk.MESSAGE_INFO,
-                                    gtk.BUTTONS_CLOSE,
-                                    _("Snapshot created successfully"))
-         dialog.set_title (_("Success"))
-         dialog.format_secondary_text(_("A snapshot of zfs filesystem %(zfs_fs)s\n"
-                                      "named %(valid_name)s\n"
-                                      "has been created.\n") %
-                                      { "zfs_fs" : self.zfs_fs, "valid_name" : valid_name})
-         dialog.run ()
-
-       sys.exit(1)
-      else:
-       self.snap_name_entry.set_text (valid_name)
-
-def main(argv):
-    try:
-        opts,args = getopt.getopt(sys.argv[1:], "", [])
-    except getopt.GetoptError:
-        sys.exit(2)
-    #FIXME
-    #check for 2 args here we assume the arguments are correct
-    if len(args) != 2:
-        dialog = gtk.MessageDialog(None,
-                                   0,
-                                   gtk.MESSAGE_ERROR,
-                                   gtk.BUTTONS_CLOSE,
-                                   _("Invalid arguments count."))
-       dialog.set_title (_("Error"))
-        dialog.format_secondary_text(_("Snapshot Now requires"
-                                       " 2 arguments :\n- The path of the "
-                                      "directory to be snapshotted.\n"
-                                       "- The zfs filesystem corresponding "
-                                      "to this directory."))
-        dialog.run()
-       sys.exit (2)
-       
-    rbacp = RBACprofile()
-    # The user security attributes checked are the following:
-    # 1. The "Primary Administrator" role
-    # 4. The "ZFS Files System Management" profile.
-    #
-    # Valid combinations of the above are:
-    # - 1 or 4
-    # Note that an effective UID=0 will match any profile search so
-    # no need to check it explicitly.
-    if rbacp.has_profile("ZFS File System Management"):
-       manager = SnapshotNowDialog(args[0],args[1])
-        gtk.main()
-    elif os.path.exists(argv) and os.path.exists("/usr/bin/gksu"):
-        # Run via gksu, which will prompt for the root password
-        newargs = ["gksu", argv]
-        for arg in args:
-            newargs.append(arg)
-        os.execv("/usr/bin/gksu", newargs)
-        # Shouldn't reach this point
-        sys.exit(1)
-    else:
-        dialog = gtk.MessageDialog(None,
-                                   0,
-                                   gtk.MESSAGE_ERROR,
-                                   gtk.BUTTONS_CLOSE,
-                                   _("Insufficient Priviliges"))
-       dialog.set_title (_("Error"))
-        dialog.format_secondary_text(_("Snapshot Now requires "
-                                       "administrative privileges to run. "
-                                       "You have not been assigned the necessary"
-                                       "administrative priviliges."
-                                       "\n\nConsult your system administrator "))
-        dialog.run()
-        print argv + "is not a valid executable path"
-        sys.exit(1)
-
diff --git a/usr/share/time-slider/lib/time_slider/timesliderd.py~ b/usr/share/time-slider/lib/time_slider/timesliderd.py~
deleted file mode 100755 (executable)
index eb2e341..0000000
+++ /dev/null
@@ -1,977 +0,0 @@
-#!/usr/bin/python2.6
-#
-# CDDL HEADER START
-#
-# The contents of this file are subject to the terms of the
-# Common Development and Distribution License (the "License").
-# You may not use this file except in compliance with the License.
-#
-# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
-# or http://www.opensolaris.org/os/licensing.
-# See the License for the specific language governing permissions
-# and limitations under the License.
-#
-# When distributing Covered Code, include this CDDL HEADER in each
-# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
-# If applicable, add the following below this CDDL HEADER, with the
-# fields enclosed by brackets "[]" replaced with your own identifying
-# information: Portions Copyright [yyyy] [name of copyright owner]
-#
-# CDDL HEADER END
-#
-
-import sys
-import os
-import subprocess
-import re
-import threading
-import getopt
-import syslog
-import time
-import datetime
-import calendar
-import signal
-
-import glib
-import gobject
-import dbus
-import dbus.service
-import dbus.mainloop
-import dbus.mainloop.glib
-
-import dbussvc
-import zfs
-import smf
-import timeslidersmf
-import autosnapsmf
-import plugin
-from rbac import RBACprofile
-import util
-
-_MINUTE = 60
-_HOUR = _MINUTE * 60
-_DAY = _HOUR * 24
-_WEEK = _DAY * 7
-
-
-# Status codes for actual zpool capacity levels.
-# These are relative to the SMF property defined
-# levels for: user, warning and emergenecy levels
-STATUS_OK = 0 # Below user specified threshhold. Everything was OK
-STATUS_WARNING = 1 # Above specified user threshold level
-STATUS_CRITICAL = 2 # Above specified critical threshhold level
-STATUS_EMERGENCY = 3 # Above specified emergency threshhold level
-
-intervals = {"weeks" : _WEEK, "days" : _DAY, "hours" : _HOUR, "minutes" : _MINUTE}
-
-
-class SnapshotManager(threading.Thread):
-
-    def __init__(self, bus):
-        # Used to wake up the run() method prematurely in the event
-        # of a SIGHUP/SMF refresh
-        self._conditionLock = threading.Condition(threading.RLock())
-        # Used when schedules are being rebuilt or examined.
-        self._refreshLock = threading.Lock()
-        # Indicates that cleanup is in progress when locked
-        self._cleanupLock = threading.Lock()
-        self._datasets = zfs.Datasets()
-        # Indicates that schedules need to be rebuilt from scratch
-        self._stale = True
-        self._lastCleanupCheck = 0;
-        self._zpools = []
-        self._poolstatus = {}
-        self._destroyedsnaps = []
-
-        # This is also checked during the refresh() method but we need
-        # to know it sooner for instantiation of the PluginManager
-        self._smf = timeslidersmf.TimeSliderSMF()
-        try:
-            self.verbose = self._smf.get_verbose()
-        except RuntimeError,message:
-            sys.stderr.write("Error determing whether debugging is enabled\n")
-            self.verbose = False
-
-        self._dbus = dbussvc.AutoSnap(bus,
-                                      '/org/opensolaris/TimeSlider/autosnap',
-                                      self)
-
-        self._plugin = plugin.PluginManager(self.verbose)
-        self.exitCode = smf.SMF_EXIT_OK
-        self.refresh()
-
-        # Seems we're up and running OK. 
-        # Signal our parent so we can daemonise
-        os.kill(os.getppid(), signal.SIGUSR1)
-
-        # SMF/svc.startd sends SIGHUP to force a
-        # a refresh of the daemon
-        signal.signal(signal.SIGHUP, self._signalled)
-
-        # Init done. Now initiaslise threading.
-        threading.Thread.__init__ (self)
-        self.setDaemon(True)
-
-    def run(self):
-        # Deselect swap and dump volumes so they don't get snapshotted.
-        for vol in self._datasets.list_volumes():
-            name = vol.rsplit("/")
-            try:
-                if (name[1] == "swap" or name[1] == "dump"):
-                    util.debug("Auto excluding %s volume" % vol, self.verbose)
-                    volume = zfs.Volume(vol)
-                    volume.set_auto_snap(False)
-            except IndexError:
-                pass
-            
-        nexttime = None
-        waittime = None
-        while True:
-            try:
-                self.refresh()
-                # First check and, if necessary, perform any remedial cleanup.
-                # This is best done before creating any new snapshots which may
-                # otherwise get immediately gobbled up by the remedial cleanup.
-                if self._needs_cleanup() == True:
-                    self._perform_cleanup()
-                    # Check to see if cleanup actually deleted anything before
-                    # notifying the user. Avoids the popup appearing continuously
-                    if len(self._destroyedsnaps) > 0:
-                        self._send_notification()
-                    self._send_to_syslog()
-
-                nexttime = self._check_snapshots()
-                # Overdue snapshots are already taken automatically
-                # inside _check_snapshots() so nexttime should never be
-                # < 0. It can be None however, which is fine since it 
-                # will cause the scheduler thread to sleep indefinitely
-                # or until a SIGHUP is caught.
-                if nexttime:
-                    util.debug("Waiting until " + str (nexttime), self.verbose)
-                waittime = None
-                if nexttime != None:
-                    waittime = nexttime - long(time.time())
-                    if (waittime <= 0):
-                        # We took too long and missed a snapshot, so break out
-                        # and catch up on it the next time through the loop
-                        continue
-                # waittime could be None if no auto-snap schedules are online
-                self._conditionLock.acquire()
-                if waittime:
-                    util.debug("Waiting %d seconds" % (waittime), self.verbose)
-                    self._conditionLock.wait(waittime)
-                else: #None. Just wait a while to check for cleanups.
-                    util.debug("No auto-snapshot schedules online.", \
-                               self.verbose)
-                    self._conditionLock.wait(_MINUTE * 15)
-
-            except OSError, message:
-                sys.stderr.write("Caught OSError exception in snapshot" +
-                                 " manager thread\n")
-                sys.stderr.write("Error details:\n" + \
-                                 "--------BEGIN ERROR MESSAGE--------\n" + \
-                                 str(message) + \
-                                 "\n--------END ERROR MESSAGE--------\n")
-                self.exitCode = smf.SMF_EXIT_ERR_FATAL
-                # Exit this thread
-                break
-            except RuntimeError,message:
-                sys.stderr.write("Caught RuntimeError exception in snapshot" +
-                                 " manager thread\n")
-                sys.stderr.write("Error details:\n" + \
-                                 "--------BEGIN ERROR MESSAGE--------\n" + \
-                                 str(message) + \
-                                 "\n--------END ERROR MESSAGE--------\n")
-                # Exit this thread
-                break
-
-    def _signalled(self, signum, frame):
-        if signum == signal.SIGHUP:
-            if self._refreshLock.acquire(False) == False:
-                return
-            self._stale = True
-            self._refreshLock.release()
-            self._conditionLock.acquire()
-            self._conditionLock.notify()
-            self._conditionLock.release()
-
-    def refresh(self):
-        """
-        Checks if defined snapshot schedules are out
-        of date and rebuilds and updates if necessary
-        """
-        self._refreshLock.acquire()
-        if self._stale == True:
-            self._configure_svc_props()
-            self._rebuild_schedules()
-            self._update_schedules()
-            self._plugin.refresh()
-            self._stale = False
-        self._refreshLock.release()
-
-    def _configure_svc_props(self):
-        try:
-            self.verbose = self._smf.get_verbose()
-        except RuntimeError,message:
-            sys.stderr.write("Error determing whether debugging is enabled\n")
-            self.verbose = False
-
-        try:
-            cleanup = self._smf.get_remedial_cleanup()
-            warn = self._smf.get_cleanup_level("warning")
-            util.debug("Warning level value is:   %d%%" % warn, self.verbose)
-            crit = self._smf.get_cleanup_level("critical")
-            util.debug("Critical level value is:  %d%%" % crit, self.verbose)
-            emer = self._smf.get_cleanup_level("emergency")
-            util.debug("Emergency level value is: %d%%" % emer, self.verbose)
-        except RuntimeError,message:
-            sys.stderr.write("Failed to determine cleanup threshhold levels\n")
-            sys.stderr.write("Details:\n" + \
-                             "--------BEGIN ERROR MESSAGE--------\n" + \
-                             str(message) + \
-                             "\n---------END ERROR MESSAGE---------\n")
-            sys.stderr.write("Using factory defaults of 80%, 90% and 95%\n")
-            #Go with defaults
-            #FIXME - this would be an appropriate case to mark svc as degraded
-            self._remedialCleanup = True
-            self._warningLevel = 80
-            self._criticalLevel = 90
-            self._emergencyLevel = 95
-        else:
-            self._remedialCleanup = cleanup
-            self._warningLevel = warn
-            self._criticalLevel = crit
-            self._emergencyLevel = emer
-
-        try:
-            self._keepEmpties = self._smf.get_keep_empties()
-        except RuntimeError,message:
-            # Not fatal, just assume we delete them (default configuration)
-            sys.stderr.write("Can't determine whether to keep empty snapshots\n")
-            sys.stderr.write("Details:\n" + \
-                             "--------BEGIN ERROR MESSAGE--------\n" + \
-                             str(message) + \
-                             "\n---------END ERROR MESSAGE---------\n")
-            sys.stderr.write("Assuming default value: False\n")
-            self._keepEmpties = False
-
-        # Previously, snapshot labels used the ":" character was used as a 
-        # separator character for datestamps. Windows filesystems such as
-        # CIFS and FAT choke on this character so now we use a user definable
-        # separator value, with a default value of "_"
-        # We need to check for both the old and new format when looking for
-        # snapshots.
-        self._separator = self._smf.get_separator()
-        self._prefix = "%s[:%s]" \
-            % (autosnapsmf.SNAPLABELPREFIX, self._separator)
-
-        # Rebuild pool list
-        self._zpools = []
-        try:
-            for poolname in zfs.list_zpools():
-                # Do not try to examine FAULTED pools
-                zpool = zfs.ZPool(poolname)
-                if zpool.health == "FAULTED":
-                    util.debug("Ignoring faulted Zpool: %s\n" \
-                               % (zpool.name), \
-                               self.verbose)
-                else:
-                    self._zpools.append(zpool)
-                util.debug(str(zpool), self.verbose)
-        except RuntimeError,message:
-            sys.stderr.write("Could not list Zpools\n")
-            self.exitCode = smf.SMF_EXIT_ERR_FATAL
-            # Propogate exception up to thread's run() method
-            raise RuntimeError,message
-
-
-    def _rebuild_schedules(self):
-        """
-        Builds 2 lists of default and custom auto-snapshot SMF instances
-        """
-
-        self._last = {}
-        self._next = {}
-        self._keep = {}
-
-        try:
-            _defaultSchedules = autosnapsmf.get_default_schedules()
-            _customSchedules = autosnapsmf.get_custom_schedules()
-        except RuntimeError,message:
-            self.exitCode = smf.SMF_EXIT_ERR_FATAL
-            raise RuntimeError, "Error reading SMF schedule instances\n" + \
-                                "Details:\n" + str(message)
-        else:
-            # Now set it in stone.
-            self._defaultSchedules = tuple(_defaultSchedules)
-            self._customSchedules = tuple(_customSchedules)
-            
-            # Build the combined schedule tuple from default + custom schedules
-            _defaultSchedules.extend(_customSchedules)
-            self._allSchedules = tuple(_defaultSchedules)
-            for schedule,i,p,keep in self._allSchedules:
-                self._last[schedule] = 0
-                self._next[schedule] = 0
-                self._keep[schedule] = keep
-
-    def _update_schedules(self):
-        interval = 0
-        idx = 1 # Used to index subsets for schedule overlap calculation
-        last = None
-
-        for schedule,interval,period,keep in self._allSchedules:
-            # Shortcut if we've already processed this schedule and it's 
-            # still up to date. Don't skip the default schedules though
-            # because overlap affects their scheduling
-            if [schedule,interval,period,keep] not in \
-                self._defaultSchedules and \
-                (self._next[schedule] > self._last[schedule]):
-                util.debug("Short circuiting %s recalculation" \
-                           % (schedule), \
-                           self.verbose)
-                continue
-
-            # If we don't have an internal timestamp for the given schedule
-            # ask zfs for the last snapshot and get it's creation timestamp.
-            if self._last[schedule] == 0:
-                try:
-                    snaps = self._datasets.list_snapshots("%s%s" % \
-                                                         (self._prefix,
-                                                          schedule))
-                except RuntimeError,message:
-                    self.exitCode = smf.SMF_EXIT_ERR_FATAL
-                    sys.stderr.write("Failed to list snapshots during schedule update\n")
-                    #Propogate up to the thread's run() method
-                    raise RuntimeError,message
-
-                if len(snaps) > 0:
-                    util.debug("Last %s snapshot was: %s" % \
-                               (schedule, snaps[-1][0]), \
-                               self.verbose)
-                    self._last[schedule] = snaps[-1][1]
-
-            last = self._last[schedule]
-            if interval != "months": # months is non-constant. See below.
-                util.debug("Recalculating %s schedule" % (schedule), \
-                           self.verbose)
-                try:
-                    totalinterval = intervals[interval] * period
-                except KeyError:
-                    self.exitCode = smf.SMF_EXIT_ERR_CONFIG
-                    sys.stderr.write(schedule + \
-                                      " schedule has invalid interval: " + \
-                                      "'%s\'\n" % interval)
-                    #Propogate up to thread's run() method
-                    raise RuntimeError
-                if [schedule,interval,period,keep] in self._defaultSchedules:
-                    # This is one of the default schedules so check for an
-                    # overlap with one of the dominant shchedules.
-                    for s,i,p,k in self._defaultSchedules[:idx]:
-                        last = max(last, self._last[s])
-                    idx += 1
-
-            else: # interval == "months"
-                if self._next[schedule] > last:
-                    util.debug("Short circuiting " + \
-                               schedule + \
-                               " recalculation", \
-                               self.verbose)
-                    continue
-                util.debug("Recalculating %s schedule" % (schedule), \
-                           self.verbose)
-                snap_tm = time.gmtime(self._last[schedule])
-                # Increment year if period >= than 1 calender year.
-                year = snap_tm.tm_year
-                year += period / 12
-                period = period % 12
-
-                mon = (snap_tm.tm_mon + period) % 12
-                # Result of 0 actually means december.
-                if mon == 0:
-                    mon = 12
-                # Account for period that spans calendar year boundary.
-                elif snap_tm.tm_mon + period > 12:
-                    year += 1
-
-                d,dlastmon = calendar.monthrange(snap_tm.tm_year, snap_tm.tm_mon)
-                d,dnewmon = calendar.monthrange(year, mon)
-                mday = snap_tm.tm_mday
-                if dlastmon > dnewmon and snap_tm.tm_mday > dnewmon:
-                   mday = dnewmon
-                
-                tm =(year, mon, mday, \
-                    snap_tm.tm_hour, snap_tm.tm_min, snap_tm.tm_sec, \
-                    0, 0, -1)
-                newt = calendar.timegm(tm)
-                new_tm = time.gmtime(newt)
-                totalinterval = newt - self._last[schedule]
-
-            self._next[schedule] = last + totalinterval
-
-    def _next_due(self):
-        schedule = None
-        earliest = None
-        now = long(time.time())
-        
-        for s,i,p,k in self._defaultSchedules:
-            due = self._next[s]
-            if due <= now:
-                #Default Schedule - so break out at the first 
-                #schedule that is overdue. The subordinate schedules
-                #will re-adjust afterwards.
-                earliest,schedule = due,s
-                break
-            elif earliest != None:
-                if due < earliest:
-                    earliest,schedule = due,s
-            else: #FIXME better optimisation with above condition
-                earliest,schedule = due,s
-        for s,i,p,k in self._customSchedules:
-            due = self._next[s]
-            if earliest != None:
-                if due < earliest:
-                    earliest,schedule = due,s
-            else: #FIXME better optimisation with above condition
-                earliest,schedule = due,s
-        return earliest,schedule
-
-    def _check_snapshots(self):
-        """
-        Check the schedules and see what the required snapshot is.
-        Take one immediately on the first overdue snapshot required
-        """
-        # Make sure a refresh() doesn't mess with the schedule while
-        # we're reading through it.
-        self._refreshLock.acquire()
-        next,schedule = self._next_due()
-        self._refreshLock.release()
-        now = long(time.time())
-        while next != None and next <= now:
-            label = self._take_snapshots(schedule)
-            self._plugin.execute_plugins(schedule, label)
-            self._refreshLock.acquire()
-            self._update_schedules()
-            next,schedule = self._next_due();
-            self._refreshLock.release()
-            dt = datetime.datetime.fromtimestamp(next)
-            util.debug("Next snapshot is %s due at: %s" % \
-                       (schedule, dt.isoformat()), \
-                       self.verbose)
-        return next
-                    
-    def _take_snapshots(self, schedule):
-        # Set the time before taking snapshot to avoid clock skew due
-        # to time taken to complete snapshot.
-        tm = long(time.time())
-        label = "%s%s%s-%s" % \
-                (autosnapsmf.SNAPLABELPREFIX, self._separator, schedule,
-                 datetime.datetime.now().strftime("%Y-%m-%d-%Hh%M"))
-        try:
-            self._datasets.create_auto_snapshot_set(label, tag=schedule)
-        except RuntimeError, message:
-            # Write an error message, set the exit code and pass it up the
-            # stack so the thread can terminate
-            sys.stderr.write("Failed to create snapshots for schedule: %s\n" \
-                             % (schedule))
-            self.exitCode = smf.SMF_EXIT_MON_DEGRADE
-            raise RuntimeError,message
-        self._last[schedule] = tm;
-        self._perform_purge(schedule)
-        return label
-
-    def _prune_snapshots(self, dataset, schedule):
-        """Cleans out zero sized snapshots, kind of cautiously"""
-            # Per schedule: We want to delete 0 sized
-            # snapshots but we need to keep at least one around (the most
-            # recent one) for each schedule so that that overlap is 
-            # maintained from frequent -> hourly -> daily etc.
-            # Start off with the smallest interval schedule first and
-            # move up. This increases the amount of data retained where
-            # several snapshots are taken together like a frequent hourly
-            # and daily snapshot taken at 12:00am. If 3 snapshots are all
-            # identical and reference the same identical data they will all
-            # be initially reported as zero for used size. Deleting the
-            # daily first then the hourly would shift make the data referenced
-            # by all 3 snapshots unique to the frequent scheduled snapshot.
-            # This snapshot would probably be purged within an how ever and the
-            # data referenced by it would be gone for good.
-            # Doing it the other way however ensures that the data should
-            # remain accessible to the user for at least a week as long as
-            # the pool doesn't run low on available space before that.
-
-        try:
-            snaps = dataset.list_snapshots("%s%s" % (self._prefix,schedule))
-            # Clone the list because we want to remove items from it
-            # while iterating through it.
-            remainingsnaps = snaps[:]
-        except RuntimeError,message:
-            sys.stderr.write("Failed to list snapshots during snapshot cleanup\n")
-            self.exitCode = smf.SMF_EXIT_ERR_FATAL
-            raise RuntimeError,message
-
-        if (self._keepEmpties == False):
-            try: # remove the newest one from the list.
-                snaps.pop()
-            except IndexError:
-                pass
-            for snapname in snaps:
-                try:
-                    snapshot = zfs.Snapshot(snapname)
-                except Exception,message:
-                    sys.stderr.write(str(message))
-                    # Not fatal, just skip to the next snapshot
-                    continue
-
-                try:
-                    if snapshot.get_used_size() == 0:
-                        util.debug("Destroying zero sized: " + snapname, \
-                                   self.verbose)
-                        try:
-                            snapshot.destroy()
-                        except RuntimeError,message:
-                            sys.stderr.write("Failed to destroy snapshot: " +
-                                             snapname + "\n")
-                            self.exitCode = smf.SMF_EXIT_MON_DEGRADE
-                            # Propogate exception so thread can exit
-                            raise RuntimeError,message
-                        remainingsnaps.remove(snapname)
-                except RuntimeError,message:
-                    sys.stderr.write("Can not determine used size of: " + \
-                                     snapname + "\n")
-                    self.exitCode = smf.SMF_EXIT_MON_DEGRADE
-                    #Propogate the exception to the thead run() method
-                    raise RuntimeError,message
-
-        # Deleting individual snapshots instead of recursive sets
-        # breaks the recursion chain and leaves child snapshots
-        # dangling so we need to take care of cleaning up the 
-        # snapshots.
-        target = len(remainingsnaps) - self._keep[schedule]
-        counter = 0
-        while counter < target:
-            util.debug("Destroy expired snapshot: " + \
-                       remainingsnaps[counter], 
-                       self.verbose)
-            try:
-                snapshot = zfs.Snapshot(remainingsnaps[counter])
-            except Exception,message:
-                    sys.stderr.write(str(message))
-                    # Not fatal, just skip to the next snapshot
-                    counter += 1
-                    continue
-            try:
-                snapshot.destroy()
-            except RuntimeError,message:
-                sys.stderr.write("Failed to destroy snapshot: " +
-                                 snapshot.name + "\n")
-                self.exitCode = smf.SMF_EXIT_ERR_FATAL
-                # Propogate exception so thread can exit
-                raise RuntimeError,message
-            else:
-                counter += 1
-
-    def _perform_purge(self, schedule):
-        """Cautiously cleans out zero sized snapshots"""
-        # We need to avoid accidentally pruning auto snapshots received
-        # from one zpool to another. We ensure this by examining only
-        # snapshots whose parent fileystems and volumes are explicitly
-        # tagged to be snapshotted.
-        try:
-            for name in self._datasets.list_auto_snapshot_sets(schedule):
-                dataset = zfs.ReadWritableDataset(name)
-                self._prune_snapshots(dataset, schedule)
-        except RuntimeError,message:
-            sys.stderr.write("Error listing datasets during " + \
-                             "removal of expired snapshots\n")
-            self.exitCode = smf.SMF_EXIT_ERR_FATAL
-            # Propogate up to thread's run() method
-            raise RuntimeError,message
-
-    def _needs_cleanup(self):
-        if self._remedialCleanup == False:
-            # Sys admin has explicitly instructed for remedial cleanups
-            # not to be performed.
-            return False
-        now = long(time.time())
-        # Don't run checks any less than 15 minutes apart.
-        if self._cleanupLock.acquire(False) == False:
-            #Indicates that a cleanup is already running.
-            return False
-        # FIXME - Make the cleanup interval equal to the minimum snapshot interval
-        # if custom snapshot schedules are defined and enabled.
-        elif ((now - self._lastCleanupCheck) < (_MINUTE * 15)):
-            pass
-        else:
-            for zpool in self._zpools:
-                try:
-                    if zpool.get_capacity() > self._warningLevel:
-                        # Before getting into a panic, determine if the pool
-                        # is one we actually take snapshots on, by checking
-                        # for one of the "auto-snapshot:<schedule> tags. Not
-                        # super fast, but it only happens under exceptional
-                        # circumstances of a zpool nearing it's capacity.
-
-                        for sched in self._allSchedules:
-                            sets = zpool.list_auto_snapshot_sets(sched[0])
-                            if len(sets) > 0:
-                                util.debug("%s needs a cleanup" \
-                                           % zpool.name, \
-                                           self.verbose)
-                                self._cleanupLock.release()
-                                return True
-                except RuntimeError, message:
-                    sys.stderr.write("Error checking zpool capacity of: " + \
-                                     zpool.name + "\n")
-                    self._cleanupLock.release()
-                    self.exitCode = smf.SMF_EXIT_ERR_FATAL
-                    # Propogate up to thread's run() mehod.
-                    raise RuntimeError,message
-            self._lastCleanupCheck = long(time.time())
-        self._cleanupLock.release()
-        return False
-
-    def _perform_cleanup(self):
-        if self._cleanupLock.acquire(False) == False:
-            # Cleanup already running. Skip
-            return
-        self._destroyedsnaps = []
-        for zpool in self._zpools:
-            try:
-                self._poolstatus[zpool.name] = 0
-                capacity = zpool.get_capacity()
-                if capacity > self._warningLevel:
-                    self._run_warning_cleanup(zpool)
-                    self._poolstatus[zpool.name] = 1
-                    capacity = zpool.get_capacity()
-                if capacity > self._criticalLevel:
-                    self._run_critical_cleanup(zpool)
-                    self._poolstatus[zpool.name] = 2
-                    capacity = zpool.get_capacity()
-                if capacity > self._emergencyLevel:
-                    self._run_emergency_cleanup(zpool)
-                    self._poolstatus[zpool.name] = 3
-                    capacity = zpool.get_capacity()
-                if capacity > self._emergencyLevel:
-                    self._run_emergency_cleanup(zpool)
-                    self._poolstatus[zpool.name] = 4
-            # This also catches exceptions thrown from _run_<level>_cleanup()
-            # and _run_cleanup() in methods called by _perform_cleanup()
-            except RuntimeError,message:
-                sys.stderr.write("Remedial space cleanup failed because " + \
-                                 "of failure to determinecapacity of: " + \
-                                 zpool.name + "\n")
-                self.exitCode = smf.SMF_EXIT_ERR_FATAL
-                self._cleanupLock.release()
-                # Propogate up to thread's run() method.
-                raise RuntimeError,message
-
-            # Bad - there's no more snapshots left and nothing 
-            # left to delete. We don't disable the service since
-            # it will permit self recovery and snapshot
-            # retention when space becomes available on
-            # the pool (hopefully).
-            util.debug("%s pool status after cleanup:" \
-                       % zpool.name, \
-                       self.verbose)
-            util.debug(zpool, self.verbose)
-        util.debug("Cleanup completed. %d snapshots were destroyed" \
-                   % len(self._destroyedsnaps), \
-                   self.verbose)
-        # Avoid needless list iteration for non-debug mode
-        if self.verbose == True and len(self._destroyedsnaps) > 0:
-            for snap in self._destroyedsnaps:
-                sys.stderr.write("\t%s\n" % snap)
-        self._cleanupLock.release()
-
-    def _run_warning_cleanup(self, zpool):
-        util.debug("Performing warning level cleanup on %s" % \
-                   zpool.name, \
-                   self.verbose)
-        self._run_cleanup(zpool, "daily", self._warningLevel)
-        if zpool.get_capacity() > self._warningLevel:
-            self._run_cleanup(zpool, "hourly", self._warningLevel)
-
-    def _run_critical_cleanup(self, zpool):
-        util.debug("Performing critical level cleanup on %s" % \
-                   zpool.name, \
-                   self.verbose)
-        self._run_cleanup(zpool, "weekly", self._criticalLevel)
-        if zpool.get_capacity() > self._criticalLevel:
-            self._run_cleanup(zpool, "daily", self._criticalLevel)
-        if zpool.get_capacity() > self._criticalLevel:
-            self._run_cleanup(zpool, "hourly", self._criticalLevel)
-
-    def _run_emergency_cleanup(self, zpool):
-        util.debug("Performing emergency level cleanup on %s" % \
-                   zpool.name, \
-                   self.verbose)
-        self._run_cleanup(zpool, "monthly", self._emergencyLevel)
-        if zpool.get_capacity() > self._emergencyLevel:
-            self._run_cleanup(zpool, "weekly", self._emergencyLevel)
-        if zpool.get_capacity() > self._emergencyLevel:
-            self._run_cleanup(zpool, "daily", self._emergencyLevel)
-        if zpool.get_capacity() > self._emergencyLevel:
-            self._run_cleanup(zpool, "hourly", self._emergencyLevel)
-        if zpool.get_capacity() > self._emergencyLevel:
-            self._run_cleanup(zpool, "frequent", self._emergencyLevel)
-        #Finally, as a last resort, delete custom scheduled snaphots
-        for schedule,i,p,k in self._customSchedules:
-            if zpool.get_capacity() < self._emergencyLevel:
-                break
-            else:
-                self._run_cleanup(zpool, schedule, self._emergencyLevel)
-
-    def _run_cleanup(self, zpool, schedule, threshold):
-        clonedsnaps = []
-        snapshots = []
-        try:
-            clonedsnaps = self._datasets.list_cloned_snapshots()
-        except RuntimeError,message:
-                sys.stderr.write("Error (non-fatal) listing cloned snapshots" +
-                                 " while recovering pool capacity\n")
-                sys.stderr.write("Error details:\n" + \
-                                 "--------BEGIN ERROR MESSAGE--------\n" + \
-                                 str(message) + \
-                                 "\n--------END ERROR MESSAGE--------\n")    
-
-        # Build a list of snapshots in the given schedule, that are not
-        # cloned, and sort the result in reverse chronological order.
-        try:
-            snapshots = [s for s,t in \
-                            zpool.list_snapshots("%s%s" \
-                            % (self._prefix,schedule)) \
-                            if not s in clonedsnaps]
-            snapshots.reverse()
-        except RuntimeError,message:
-            sys.stderr.write("Error listing snapshots" +
-                             " while recovering pool capacity\n")
-            self.exitCode = smf.SMF_EXIT_ERR_FATAL
-            # Propogate the error up to the thread's run() method.
-            raise RuntimeError,message
-   
-        while zpool.get_capacity() > threshold:
-            if len(snapshots) == 0:
-                syslog.syslog(syslog.LOG_NOTICE,
-                              "No more %s snapshots left" \
-                               % schedule)
-                return
-
-            """This is not an exact science. Deleteing a zero sized 
-            snapshot can have unpredictable results. For example a
-            pair of snapshots may share exclusive reference to a large
-            amount of data (eg. a large core file). The usage of both
-            snapshots will initially be seen to be 0 by zfs(1). Deleting
-            one of the snapshots will make the data become unique to the
-            single remaining snapshot that references it uniquely. The
-            remaining snapshot's size will then show up as non zero. So
-            deleting 0 sized snapshot is not as pointless as it might seem.
-            It also means we have to loop through this, each snapshot set
-            at a time and observe the before and after results. Perhaps
-            better way exists...."""
-
-            # Start with the oldest first
-            snapname = snapshots.pop()
-            snapshot = zfs.Snapshot(snapname)
-            # It would be nicer, for performance purposes, to delete sets
-            # of snapshots recursively but this might destroy more data than
-            # absolutely necessary, plus the previous purging of zero sized
-            # snapshots can easily break the recursion chain between
-            # filesystems.
-            # On the positive side there should be fewer snapshots and they
-            # will mostly non-zero so we should get more effectiveness as a
-            # result of deleting snapshots since they should be nearly always
-            # non zero sized.
-            util.debug("Destroying %s" % snapname, self.verbose)
-            try:
-                snapshot.destroy()
-            except RuntimeError,message:
-                # Would be nice to be able to mark service as degraded here
-                # but it's better to try to continue on rather than to give
-                # up alltogether (SMF maintenance state)
-                sys.stderr.write("Warning: Cleanup failed to destroy: %s\n" % \
-                                 (snapshot.name))
-                sys.stderr.write("Details:\n%s\n" % (str(message)))
-            else:
-                self._destroyedsnaps.append(snapname)
-            # Give zfs some time to recalculate.
-            time.sleep(3)
-        
-    def _send_to_syslog(self):
-        for zpool in self._zpools:
-            status = self._poolstatus[zpool.name]
-            if status == 4:
-                syslog.syslog(syslog.LOG_EMERG,
-                              "%s is over %d%% capacity. " \
-                              "All automatic snapshots were destroyed" \
-                               % (zpool.name, self._emergencyLevel))
-            elif status == 3:
-                syslog.syslog(syslog.LOG_ALERT,
-                              "%s exceeded %d%% capacity. " \
-                              "Automatic snapshots over 1 hour old were destroyed" \
-                               % (zpool.name, self._emergencyLevel))
-            elif status == 2:
-                syslog.syslog(syslog.LOG_CRIT,
-                              "%s exceeded %d%% capacity. " \
-                              "Weekly, hourly and daily automatic snapshots were destroyed" \
-                               % (zpool.name, self._criticalLevel))                             
-            elif status == 1:
-                syslog.syslog(syslog.LOG_WARNING,
-                              "%s exceeded %d%% capacity. " \
-                              "Hourly and daily automatic snapshots were destroyed" \
-                               % (zpool.name, self._warningLevel))
-
-        if len(self._destroyedsnaps) > 0:
-            syslog.syslog(syslog.LOG_NOTICE,
-                          "%d automatic snapshots were destroyed" \
-                           % len(self._destroyedsnaps))
-
-    def _send_notification(self):
-        worstpool = None
-        worststatus = 0
-
-        for zpool in self._zpools:
-            status = self._poolstatus[zpool.name]
-            # >= to ensure that something should always be set.
-            if status >= worststatus:
-                worstpool = zpool.name
-                worststatus = status
-
-        #FIXME make the various levels indexible
-        if worststatus == 4:
-            self._dbus.capacity_exceeded(worstpool, 4, self._emergencyLevel)
-        elif worststatus == 3:
-            self._dbus.capacity_exceeded(worstpool, 3, self._emergencyLevel)
-        elif worststatus == 2:
-            self._dbus.capacity_exceeded(worstpool, 2, self._criticalLevel)
-        elif worststatus == 1:
-            self._dbus.capacity_exceeded(worstpool, 1, self._warningLevel)
-        #elif: 0 everything is fine. Do nothing.
-
-
-def monitor_threads(snapthread):
-    if snapthread.is_alive():
-        return True
-    else:
-        sys.stderr.write("Snapshot monitor thread exited.\n")
-        if snapthread.exitCode == smf.SMF_EXIT_MON_DEGRADE:
-            # FIXME - it would be nicer to mark the service as degraded than
-            # go into maintenance state for some situations such as a
-            # particular snapshot schedule failing.
-            # But for now SMF does not implement this feature. But if/when it
-            # does it's better to use svcadm to put the # service into the
-            # correct state since the daemon shouldn't exit whentransitioning
-            # to a degraded state.
-            #sys.stderr.write("Placing service into maintenance state\n")
-            #subprocess.call(["/usr/sbin/svcadm", "mark", "maintenance",
-            #                 os.getenv("SMF_FMRI")])
-            # SMF will take care of kill the daemon
-            sys.exit(smf.SMF_EXIT_ERR_FATAL)
-            return False
-        elif snapthread.exitCode == smf.SMF_EXIT_ERR_FATAL:
-            #sys.stderr.write("Placing service into maintenance state\n")
-            #subprocess.call(["/usr/sbin/svcadm", "mark", "maintenance",
-            #                 os.getenv("SMF_FMRI")])
-            # SMF will take care of killing the daemon
-            sys.exit(smf.SMF_EXIT_ERR_FATAL)
-            return False
-        else:
-            sys.stderr.write("Snapshot monitor thread exited abnormally\n")
-            sys.stderr.write("Exit code: %d\n" % (snapthread.exitCode))
-            #subprocess.call(["/usr/sbin/svcadm", "mark", "maintenance",
-            #                 os.getenv("SMF_FMRI")])
-            sys.exit(smf.SMF_EXIT_ERR_FATAL)
-            return False
-
-
-def child_sig_handler(signum, frame):
-    if signum == signal.SIGUSR1:
-        sys.exit(smf.SMF_EXIT_OK)
-    elif signum == signal.SIGCHLD:
-        sys.exit(smf.SMF_EXIT_ERR_FATAL)
-    elif signum == signal.SIGALRM:
-        sys.exit(smf.SMF_EXIT_ERR_FATAL)
-
-# Default daemon parameters.
-# File mode creation mask of the daemon.
-UMASK = 0
-# Default working directory for the daemon.
-WORKDIR = "/"
-# Default maximum for the number of available file descriptors.
-MAXFD = 1024
-
-def create_daemon():
-    """
-    Detach a process from the controlling terminal and run it in the
-    background as a daemon.
-    """
-    #Catch signals that we might receive from child
-    signal.signal(signal.SIGCHLD, child_sig_handler)
-    signal.signal(signal.SIGUSR1, child_sig_handler)
-    signal.signal(signal.SIGALRM, child_sig_handler)
-    try:
-        pid = os.fork()
-    except OSError, e:
-        raise Exception, "%s [%d]" % (e.strerror, e.errno)
-
-    if (pid == 0):
-        #Reset signals that we set to trap in parent
-        signal.signal(signal.SIGCHLD, signal.SIG_DFL)
-        signal.signal(signal.SIGUSR1, signal.SIG_DFL)
-        signal.signal(signal.SIGALRM, signal.SIG_DFL)
-        os.setsid()
-        os.chdir(WORKDIR)
-        os.umask(UMASK)
-    else:
-        #Wait for the child to give the OK or otherwise.
-        signal.pause()
-
-
-def main(argv):
-
-    # Check SMF invocation environment
-    if os.getenv("SMF_FMRI") == None or os.getenv("SMF_METHOD") != "start":
-        sys.stderr.write("Command line invocation of %s unsupported.\n" \
-                         % (sys.argv[0]))
-        sys.stderr.write("This command is intended for smf(5) invocation only.\n")
-        sys.exit(smf.SMF_EXIT_ERR_NOSMF)
-
-    # Daemonise the service.
-    create_daemon()
-
-    # The user security attributes checked are the following:
-    # Note that UID == 0 will match any profile search so
-    # no need to check it explicitly.
-    syslog.openlog("time-sliderd", 0, syslog.LOG_DAEMON)
-    rbacp = RBACprofile()
-    if rbacp.has_profile("ZFS File System Management"):
-
-        gobject.threads_init()
-
-        # Tell dbus to use the gobject mainloop for async ops
-        dbus.mainloop.glib.DBusGMainLoop(set_as_default=True)
-        dbus.mainloop.glib.threads_init()
-        # Register a bus name with the system dbus daemon
-        systemBus = dbus.SystemBus()
-        name = dbus.service.BusName("org.opensolaris.TimeSlider", systemBus)
-
-        # Create and start the snapshot manger. Takes care of
-        # auto snapshotting service and auto cleanup.
-        snapshot = SnapshotManager(systemBus)
-        snapshot.start()
-        gobject.timeout_add(2000, monitor_threads, snapshot)
-
-        mainloop = gobject.MainLoop()
-        try:
-            mainloop.run()
-        except KeyboardInterrupt:
-            mainloop.quit()
-            sys.exit(smf.SMF_EXIT_OK)
-    else:
-        syslog.syslog(syslog.LOG_ERR,
-               "%s has insufficient privileges to run time-sliderd!" \
-               % rbacp.name)
-        syslog.closelog()    
-        sys.exit(smf.SMF_EXIT_ERR_PERM)
-    syslog.closelog()
-    sys.exit(smf.SMF_EXIT_OK)
-
diff --git a/usr/share/time-slider/lib/time_slider/tmp.py~ b/usr/share/time-slider/lib/time_slider/tmp.py~
deleted file mode 100755 (executable)
index 4072f3b..0000000
+++ /dev/null
@@ -1,23 +0,0 @@
-#!/usr/bin/python2.6
-import os
-
-backupDirs = []
-
-for root, dirs, files in os.walk("/ts-test/TIMESLIDER/nanmbp"):
-    if '.time-slider' in dirs:
-#        dirs.remove('.time-slider')
-        backupDirs.append(os.path.join(root, ".time-slider/rsync"))
-       print "root %s" % root
-       s1 = root.split ("/ts-test/TIMESLIDER/nanmbp/", 1)
-       print s1
-
-for dirName in backupDirs:
-    print "dirName %s " % dirName
-    s1 = dirName.split ("/ts-test/TIMESLIDER/nanmbp/",1)
-    s2 = s1[1].split ("/.time-slider/rsync",1)
-    print s2[0]
-    os.chdir(dirName)
-    dirList = ["toto %s" % d for d in os.listdir(dirName) \
-                if os.path.isdir(d) and
-                not os.path.islink(d)] 
-    print dirList
diff --git a/usr/share/time-slider/lib/time_slider/tmp2.py~ b/usr/share/time-slider/lib/time_slider/tmp2.py~
deleted file mode 100755 (executable)
index 84a3a59..0000000
+++ /dev/null
@@ -1,183 +0,0 @@
-#!/usr/bin/python2.6
-#
-# CDDL HEADER START
-#
-# The contents of this file are subject to the terms of the
-# Common Development and Distribution License (the "License").
-# You may not use this file except in compliance with the License.
-#
-# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
-# or http://www.opensolaris.org/os/licensing.
-# See the License for the specific language governing permissions
-# and limitations under the License.
-#
-# When distributing Covered Code, include this CDDL HEADER in each
-# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
-# If applicable, add the following below this CDDL HEADER, with the
-# fields enclosed by brackets "[]" replaced with your own identifying
-# information: Portions Copyright [yyyy] [name of copyright owner]
-#
-# CDDL HEADER END
-#
-
-import threading
-import sys
-import os
-import time
-import getopt
-import locale
-import shutil
-import fcntl
-import bisect import insort
-
-try:
-    import pygtk
-    pygtk.require("2.4")
-except:
-    pass
-try:
-    import gtk
-    import gtk.glade
-    gtk.gdk.threads_init()
-except:
-    sys.exit(1)
-try:
-    import glib
-    import gobject
-except:
-    sys.exit(1)
-
-from os.path import abspath, dirname, join, pardir
-sys.path.insert(0, join(dirname(__file__), pardir, "plugin"))
-import plugin
-sys.path.insert(0, join(dirname(__file__), pardir, "plugin", "rsync"))
-import rsyncsmf
-
-
-# here we define the path constants so that other modules can use it.
-# this allows us to get access to the shared files without having to
-# know the actual location, we just use the location of the current
-# file and use paths relative to that.
-SHARED_FILES = os.path.abspath(os.path.join(os.path.dirname(__file__),
-                               os.path.pardir,
-                               os.path.pardir))
-LOCALE_PATH = os.path.join('/usr', 'share', 'locale')
-RESOURCE_PATH = os.path.join(SHARED_FILES, 'res')
-
-# the name of the gettext domain. because we have our translation files
-# not in a global folder this doesn't really matter, setting it to the
-# application name is a good idea tough.
-GETTEXT_DOMAIN = 'time-slider'
-
-# set up the glade gettext system and locales
-gtk.glade.bindtextdomain(GETTEXT_DOMAIN, LOCALE_PATH)
-gtk.glade.textdomain(GETTEXT_DOMAIN)
-
-import zfs
-from rbac import RBACprofile
-
-class RsyncBackup:
-
-    def __init__(self, mountpoint, rsync_dir = None,  fsname= None, snaplabel= None, creationtime= None):
-
-       if rsync_dir == None:
-         self.__init_from_mp (mountpoint)
-       else:
-         self.rsync_dir = rsync_dir
-          self.mountpoint = mountpoint
-          self.fsname = fsname
-          self.snaplabel = snaplabel
-  
-          self.creationtime = creationtime
-          try:
-              tm = time.localtime(self.creationtime)
-              self.creationtime_str = unicode(time.strftime ("%c", tm),
-                         locale.getpreferredencoding()).encode('utf-8')
-          except:
-              self.creationtime_str = time.ctime(self.creationtime)
-    
-    def __init_from_mp (self, mountpoint):
-       self.rsyncsmf = rsyncsmf.RsyncSMF("%s:rsync" %(plugin.PLUGINBASEFMRI))
-        rsyncBaseDir = self.rsyncsmf.get_target_dir()
-        sys,nodeName,rel,ver,arch = os.uname()
-        self.rsync_dir = os.path.join(rsyncBaseDir,
-                                     rsyncsmf.RSYNCDIRPREFIX,
-                                     nodeName)
-       self.mountpoint = mountpoint
-       
-       s1 = mountpoint.split ("%s/" % self.rsync_dir, 1)
-       s2 = s1[1].split ("/%s" % rsyncsmf.RSYNCDIRSUFFIX, 1)
-       s3 = s2[1].split ('/',2)
-        self.fsname = s2[0]
-        self.snaplabel =  s3[1]
-       self.creationtime = os.stat(mountpoint).st_mtime
-
-    def __str__(self):
-       ret = "self.rsync_dir = %s\n \
-              self.mountpoint = %s\n \
-              self.fsname = %s\n \
-              self.snaplabel = %s\n" % (self.rsync_dir, 
-                                        self.mountpoint, self.fsname,
-                                        self.snaplabel)
-       return ret                                       
-
-
-    def exists(self):
-        return os.path.exists(self.mountpoint)
-
-    def destroy(self):
-       lockFileDir = os.path.join(self.rsync_dir,
-                            self.fsname,
-                            rsyncsmf.RSYNCLOCKSUFFIX)
-
-       if not os.path.exists(lockFileDir):
-         os.makedirs(lockFileDir, 0755)
-       
-       lockFile = os.path.join(lockFileDir, self.snaplabel + ".lock")
-       try:
-         lockFp = open(lockFile, 'w')
-         fcntl.flock(lockFp, fcntl.LOCK_EX | fcntl.LOCK_NB)
-       except IOError:
-         raise RuntimeError, \
-         "couldn't delete %s, already used by another process" % self.mountpoint
-         return 
-
-       trashDir = os.path.join(self.rsync_dir,
-                         self.fsname,
-                         rsyncsmf.RSYNCTRASHSUFFIX)
-       if not os.path.exists(trashDir):
-         os.makedirs(trashDir, 0755)
-
-       backupTrashDir = os.path.join (self.rsync_dir,
-                                self.fsname,
-                                rsyncsmf.RSYNCTRASHSUFFIX,
-                                self.snaplabel)
-
-       # move then delete
-       os.rename (self.mountpoint, backupTrashDir)
-       shutil.rmtree (backupTrashDir)
-
-       log = "%s/%s/%s/%s/%s.log" % (self.rsync_dir,
-                                  self.fsname,
-                                  rsyncsmf.RSYNCDIRSUFFIX,
-                                  ".partial",
-                                  self.snaplabel)
-       if os.path.exists (log):
-            os.unlink (log)
-
-       lockFp.close()
-       os.unlink(lockFile)
-
-
-backupDirs = []
-for root, dirs, files in os.walk(rsyncsmf.RsyncSMF("%s:rsync" %(plugin.PLUGINBASEFMRI)).get_target_dir ()):
-            if '.time-slider' in dirs:
-                dirs.remove('.time-slider')
-                backupDir = os.path.join(root, rsyncsmf.RSYNCDIRSUFFIX)
-                if os.path.exists(backupDir):
-                    insort(backupDirs, os.path.abspath(backupDir))
-
-
-print backupDirs
-
-
diff --git a/usr/share/time-slider/lib/time_slider/util.py~ b/usr/share/time-slider/lib/time_slider/util.py~
deleted file mode 100644 (file)
index e48326c..0000000
+++ /dev/null
@@ -1,134 +0,0 @@
-#!/usr/bin/python2.6
-#
-# CDDL HEADER START
-#
-# The contents of this file are subject to the terms of the
-# Common Development and Distribution License (the "License").
-# You may not use this file except in compliance with the License.
-#
-# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
-# or http://www.opensolaris.org/os/licensing.
-# See the License for the specific language governing permissions
-# and limitations under the License.
-#
-# When distributing Covered Code, include this CDDL HEADER in each
-# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
-# If applicable, add the following below this CDDL HEADER, with the
-# fields enclosed by brackets "[]" replaced with your own identifying
-# information: Portions Copyright [yyyy] [name of copyright owner]
-#
-# CDDL HEADER END
-#
-
-import os
-import subprocess
-import sys
-import syslog
-import statvfs
-import math
-import gio
-
-def run_command(command, raise_on_try=True):
-    """
-    Wrapper function around subprocess.Popen
-    Returns a tuple of standard out and stander error.
-    Throws a RunTimeError if the command failed to execute or
-    if the command returns a non-zero exit status.
-    """
-    try:
-        p = subprocess.Popen(command,
-                             stdout=subprocess.PIPE,
-                             stderr=subprocess.PIPE,
-                             close_fds=True)
-        outdata,errdata = p.communicate()
-        err = p.wait()
-    except OSError, message:
-        raise RuntimeError, "%s subprocess error:\n %s" % \
-                            (command, str(message))
-    if err != 0 and raise_on_try:
-        raise RuntimeError, '%s failed with exit code %d\n%s' % \
-                            (str(command), err, errdata)
-    return outdata,errdata
-
-def debug(message, verbose):
-    """
-    Prints message out to standard error and syslog if
-    verbose = True.
-    Note that the caller needs to first establish a syslog
-    context using syslog.openlog()
-    """
-    if verbose:
-        syslog.syslog(syslog.LOG_NOTICE, message + '\n')
-        sys.stderr.write(message + '\n')
-
-def log_error(loglevel, message):
-    """
-    Trivial syslog wrapper that also outputs to stderr
-    Requires caller to have first opened a syslog session
-    using syslog.openlog()
-    """
-    syslog.syslog(loglevel, message + '\n')
-    sys.stderr.write(message + '\n')
-
-def get_filesystem_capacity(path):
-    """Returns filesystem space usage of path as an integer percentage of
-       the entire capacity of path.
-    """
-    if not os.path.exists(path):
-        raise ValueError("%s is a non-existent path" % path)
-    f = os.statvfs(path)
-
-    unavailBlocks = f[statvfs.F_BLOCKS] - f[statvfs.F_BAVAIL]
-    capacity = int(math.ceil(100 * (unavailBlocks / float(f[statvfs.F_BLOCKS]))))
-
-    return capacity
-
-def get_available_size(path):
-    """Returns the available space in bytes under path"""
-    if not os.path.exists(path):
-        raise ValueError("%s is a non-existent path" % path)
-    f = os.statvfs(path)
-    free = long(f[statvfs.F_BAVAIL] * f[statvfs.F_FRSIZE])
-    
-    return free
-
-def get_used_size(path):
-    """Returns the used space in bytes of fileystem associated
-       with path"""
-    if not os.path.exists(path):
-        raise ValueError("%s is a non-existent path" % path)
-    f = os.statvfs(path)
-
-    unavailBlocks = f[statvfs.F_BLOCKS] - f[statvfs.F_BAVAIL]
-    used = long(unavailBlocks * f[statvfs.F_FRSIZE])
-
-    return used
-
-def get_total_size(path):
-    """Returns the total storage space in bytes of fileystem
-       associated with path"""
-    if not os.path.exists(path):
-        raise ValueError("%s is a non-existent path" % path)
-    f = os.statvfs(path)
-    total = long(f[statvfs.F_BLOCKS] * f[statvfs.F_FRSIZE])
-
-    return total
-
-def path_to_volume(path):
-    """
-       Tries to map a given path name to a gio Volume and
-       returns the gio.Volume object the enclosing
-       volume.
-       If it fails to find an enclosing volume it returns
-       None
-    """
-    gFile = gio.File(path)
-    try:
-        mount = gFile.find_enclosing_mount()
-    except gio.Error:
-        return None
-    else:
-        if mount != None:
-            volume = mount.get_volume()
-            return volume
-    return None
diff --git a/usr/share/time-slider/lib/time_slider/zfs.py~ b/usr/share/time-slider/lib/time_slider/zfs.py~
deleted file mode 100755 (executable)
index b35e58d..0000000
+++ /dev/null
@@ -1,1049 +0,0 @@
-#!/usr/bin/python2.6
-#
-# CDDL HEADER START
-#
-# The contents of this file are subject to the terms of the
-# Common Development and Distribution License (the "License").
-# You may not use this file except in compliance with the License.
-#
-# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
-# or http://www.opensolaris.org/os/licensing.
-# See the License for the specific language governing permissions
-# and limitations under the License.
-#
-# When distributing Covered Code, include this CDDL HEADER in each
-# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
-# If applicable, add the following below this CDDL HEADER, with the
-# fields enclosed by brackets "[]" replaced with your own identifying
-# information: Portions Copyright [yyyy] [name of copyright owner]
-#
-# CDDL HEADER END
-#
-
-import subprocess
-import re
-import threading
-from bisect import insort, bisect_left, bisect_right
-
-import util
-
-BYTESPERMB = 1048576
-
-# Commonly used command paths
-PFCMD = "/usr/bin/pfexec"
-ZFSCMD = "/usr/sbin/zfs"
-ZPOOLCMD = "/usr/sbin/zpool"
-
-
-class Datasets(Exception):
-    """
-    Container class for all zfs datasets. Maintains a centralised
-    list of datasets (generated on demand) and accessor methods. 
-    Also allows clients to notify when a refresh might be necessary.
-    """
-    # Class wide instead of per-instance in order to avoid duplication
-    filesystems = None
-    volumes = None
-    snapshots = None
-    
-    # Mutex locks to prevent concurrent writes to above class wide
-    # dataset lists.
-    _filesystemslock = threading.Lock()
-    _volumeslock = threading.Lock()
-    snapshotslock = threading.Lock()
-
-    def create_auto_snapshot_set(self, label, tag = None):
-        """
-        Create a complete set of snapshots as if this were
-        for a standard zfs-auto-snapshot operation.
-        
-        Keyword arguments:
-        label:
-            A label to apply to the snapshot name. Cannot be None.
-        tag:
-            A string indicating one of the standard auto-snapshot schedules
-            tags to check (eg. "frequent" for will map to the tag:
-            com.sun:auto-snapshot:frequent). If specified as a zfs property
-            on a zfs dataset, the property corresponding to the tag will 
-            override the wildcard property: "com.sun:auto-snapshot"
-            Default value = None
-        """
-        everything = []
-        included = []
-        excluded = []
-        single = []
-        recursive = []
-        finalrecursive = []
-
-        # Get auto-snap property in two passes. First with the schedule
-        # specific tag override value, then with the general property value
-        cmd = [ZFSCMD, "list", "-H", "-t", "filesystem,volume",
-               "-o", "name,com.sun:auto-snapshot", "-s", "name"]
-        if tag:
-            overrideprop = "com.sun:auto-snapshot:" + tag
-            scmd = [ZFSCMD, "list", "-H", "-t", "filesystem,volume",
-                    "-o", "name," + overrideprop, "-s", "name"]
-            outdata,errdata = util.run_command(scmd)
-            for line in outdata.rstrip().split('\n'):
-                line = line.split()
-                # Skip over unset values. 
-                if line[1] == "-":
-                    continue
-                # Add to everything list. This is used later
-                # for identifying parents/children of a given
-                # filesystem or volume.
-                everything.append(line[0])
-                if line[1] == "true":
-                    included.append(line[0])
-                elif line[1] == "false":
-                    excluded.append(line[0])
-        # Now use the general property. If no value
-        # was set in the first pass, we set it here.
-        outdata,errdata = util.run_command(cmd)
-        for line in outdata.rstrip().split('\n'):
-            line = line.split()
-            idx = bisect_right(everything, line[0])
-            if len(everything) == 0 or \
-               everything[idx-1] != line[0]:           
-                # Dataset is neither included nor excluded so far
-                if line[1] == "-":
-                    continue
-                everything.insert(idx, line[0])
-                if line[1] == "true":
-                    included.insert(0, line[0])
-                elif line[1] == "false":
-                    excluded.append(line[0])
-
-        # Now figure out what can be recursively snapshotted and what
-        # must be singly snapshotted. Single snapshot restrictions apply
-        # to those datasets who have a child in the excluded list.
-        # 'included' is sorted in reverse alphabetical order. 
-        for datasetname in included:
-            excludedchild = False
-            idx = bisect_right(everything, datasetname)
-            children = [name for name in everything[idx:] if \
-                        name.find(datasetname) == 0]
-            for child in children:
-                idx = bisect_left(excluded, child)
-                if idx < len(excluded) and excluded[idx] == child:
-                    excludedchild = True
-                    single.append(datasetname)
-                    break
-            if excludedchild == False:
-                # We want recursive list sorted in alphabetical order
-                # so insert instead of append to the list.
-                recursive.insert(0, datasetname)
-
-        for datasetname in recursive:
-            parts = datasetname.rsplit('/', 1)
-            parent = parts[0]
-            if parent == datasetname:
-                # Root filesystem of the Zpool, so
-                # this can't be inherited and must be
-                # set locally.
-                finalrecursive.append(datasetname)
-                continue
-            idx = bisect_right(recursive, parent)
-            if len(recursive) > 0 and \
-               recursive[idx-1] == parent:
-                # Parent already marked for recursive snapshot: so skip
-                continue
-            else:
-                finalrecursive.append(datasetname)
-
-        for name in finalrecursive:
-            dataset = ReadWritableDataset(name)
-            dataset.create_snapshot(label, True)
-        for name in single:
-            dataset = ReadWritableDataset(name)
-            dataset.create_snapshot(label, False)
-
-    def list_auto_snapshot_sets(self, tag = None):
-        """
-        Returns a list of zfs filesystems and volumes tagged with
-        the "com.sun:auto-snapshot" property set to "true", either
-        set locally or inherited. Snapshots are excluded from the
-        returned result.
-
-        Keyword Arguments:
-        tag:
-            A string indicating one of the standard auto-snapshot schedules
-            tags to check (eg. "frequent" will map to the tag:
-            com.sun:auto-snapshot:frequent). If specified as a zfs property
-            on a zfs dataset, the property corresponding to the tag will 
-            override the wildcard property: "com.sun:auto-snapshot"
-            Default value = None
-        """
-        #Get auto-snap property in two passes. First with the global
-        #value, then overriding with the label/schedule specific value
-
-        included = []
-        excluded = []
-
-        cmd = [ZFSCMD, "list", "-H", "-t", "filesystem,volume",
-               "-o", "name,com.sun:auto-snapshot", "-s", "name"]
-        if tag:
-            overrideprop = "com.sun:auto-snapshot:" + tag
-            scmd = [ZFSCMD, "list", "-H", "-t", "filesystem,volume",
-                    "-o", "name," + overrideprop, "-s", "name"]
-            outdata,errdata = util.run_command(scmd)
-            for line in outdata.rstrip().split('\n'):
-                line = line.split()
-                if line[1] == "true":
-                    included.append(line[0])
-                elif line[1] == "false":
-                    excluded.append(line[0])
-        outdata,errdata = util.run_command(cmd)
-        for line in outdata.rstrip().split('\n'):
-            line = line.split()
-            # Only set values that aren't already set. Don't override
-            try:
-                included.index(line[0])
-                continue
-            except ValueError:
-                try:
-                    excluded.index(line[0])
-                    continue
-                except ValueError:
-                    # Dataset is not listed in either list.
-                    if line[1] == "true":
-                        included.append(line[0])
-        return included
-
-    def list_filesystems(self, pattern = None):
-        """
-        List pattern matching filesystems sorted by name.
-        
-        Keyword arguments:
-        pattern -- Filter according to pattern (default None)
-        """
-        filesystems = []
-        # Need to first ensure no other thread is trying to
-        # build this list at the same time.
-        Datasets._filesystemslock.acquire()
-        if Datasets.filesystems == None:
-            Datasets.filesystems = []
-            cmd = [ZFSCMD, "list", "-H", "-t", "filesystem", \
-                   "-o", "name,mountpoint", "-s", "name"]
-            try:
-                p = subprocess.Popen(cmd,
-                                     stdout=subprocess.PIPE,
-                                     stderr=subprocess.PIPE,
-                                     close_fds=True)
-                outdata,errdata = p.communicate()
-                err = p.wait()
-            except OSError, message:
-                raise RuntimeError, "%s subprocess error:\n %s" % \
-                                    (cmd, str(message))
-            if err != 0:
-                Datasets._filesystemslock.release()
-                raise RuntimeError, '%s failed with exit code %d\n%s' % \
-                                    (str(cmd), err, errdata)
-            for line in outdata.rstrip().split('\n'):
-                line = line.rstrip().split()
-                Datasets.filesystems.append([line[0], line[1]])
-        Datasets._filesystemslock.release()
-
-        if pattern == None:
-            filesystems = Datasets.filesystems[:]
-        else:
-            # Regular expression pattern to match "pattern" parameter.
-            regexpattern = ".*%s.*" % pattern
-            patternobj = re.compile(regexpattern)
-
-            for fsname,fsmountpoint in Datasets.filesystems:
-                patternmatchobj = re.match(patternobj, fsname)
-                if patternmatchobj != None:
-                    filesystems.append(fsname, fsmountpoint)
-        return filesystems
-
-    def list_volumes(self, pattern = None):
-        """
-        List pattern matching volumes sorted by name.
-        
-        Keyword arguments:
-        pattern -- Filter according to pattern (default None)
-        """
-        volumes = []
-        Datasets._volumeslock.acquire()
-        if Datasets.volumes == None:
-            Datasets.volumes = []
-            cmd = [ZFSCMD, "list", "-H", "-t", "volume", \
-                   "-o", "name", "-s", "name"]
-            try:
-                p = subprocess.Popen(cmd,
-                                     stdout=subprocess.PIPE,
-                                     stderr=subprocess.PIPE,
-                                     close_fds=True)
-                outdata,errdata = p.communicate()
-                err = p.wait()
-            except OSError, message:
-                raise RuntimeError, "%s subprocess error:\n %s" % \
-                                    (cmd, str(message))
-            if err != 0:
-                Datasets._volumeslock.release()
-                raise RuntimeError, '%s failed with exit code %d\n%s' % \
-                                    (str(cmd), err, errdata)
-            for line in outdata.rstrip().split('\n'):
-                Datasets.volumes.append(line.rstrip())
-        Datasets._volumeslock.release()
-
-        if pattern == None:
-            volumes = Datasets.volumes[:]
-        else:
-            # Regular expression pattern to match "pattern" parameter.
-            regexpattern = ".*%s.*" % pattern
-            patternobj = re.compile(regexpattern)
-
-            for volname in Datasets.volumes:
-                patternmatchobj = re.match(patternobj, volname)
-                if patternmatchobj != None:
-                    volumes.append(volname)
-        return volumes
-
-    def list_snapshots(self, pattern = None):
-        """
-        List pattern matching snapshots sorted by creation date.
-        Oldest listed first
-        
-        Keyword arguments:
-        pattern -- Filter according to pattern (default None)
-        """
-        snapshots = []
-        Datasets.snapshotslock.acquire()
-        if Datasets.snapshots == None:
-            Datasets.snapshots = []
-            snaps = []
-            cmd = [ZFSCMD, "get", "-H", "-p", "-o", "value,name", "creation"]
-            try:
-                p = subprocess.Popen(cmd,
-                                     stdout=subprocess.PIPE,
-                                     stderr=subprocess.PIPE,
-                                     close_fds=True)
-                outdata,errdata = p.communicate()
-                err= p.wait()
-            except OSError, message:
-                Datasets.snapshotslock.release()
-                raise RuntimeError, "%s subprocess error:\n %s" % \
-                                    (cmd, str(message))
-            if err != 0:
-                Datasets.snapshotslock.release()
-                raise RuntimeError, '%s failed with exit code %d\n%s' % \
-                                    (str(cmd), err, errdata)
-            for dataset in outdata.rstrip().split('\n'):
-                if re.search("@", dataset):
-                    insort(snaps, dataset.split())
-            for snap in snaps:
-                Datasets.snapshots.append([snap[1], long(snap[0])])
-        if pattern == None:
-            snapshots = Datasets.snapshots[:]
-        else:
-            # Regular expression pattern to match "pattern" parameter.
-            regexpattern = ".*@.*%s" % pattern
-            patternobj = re.compile(regexpattern)
-
-            for snapname,snaptime in Datasets.snapshots:
-                patternmatchobj = re.match(patternobj, snapname)
-                if patternmatchobj != None:
-                    snapshots.append([snapname, snaptime])
-        Datasets.snapshotslock.release()
-        return snapshots
-
-    def list_cloned_snapshots(self):
-        """
-        Returns a list of snapshots that have cloned filesystems
-        dependent on them.
-        Snapshots with cloned filesystems can not be destroyed
-        unless dependent cloned filesystems are first destroyed.
-        """
-        cmd = [ZFSCMD, "list", "-H", "-o", "origin"]
-        outdata,errdata = util.run_command(cmd)
-        result = []
-        for line in outdata.rstrip().split('\n'):
-            details = line.rstrip()
-            if details != "-":
-                try:
-                    result.index(details)
-                except ValueError:
-                    result.append(details)
-        return result
-
-    def list_held_snapshots(self):
-        """
-        Returns a list of snapshots that have a "userrefs"
-        property value of greater than 0. Resul list is
-        sorted in order of creation time. Oldest listed first.
-        """
-        cmd = [ZFSCMD, "list", "-H",
-               "-t", "snapshot",
-               "-s", "creation",
-               "-o", "userrefs,name"]
-        outdata,errdata = util.run_command(cmd)
-        result = []
-        for line in outdata.rstrip().split('\n'):
-            details = line.split()
-            if details[0] != "0":
-                result.append(details[1])
-        return result
-
-    def refresh_snapshots(self):
-        """
-        Should be called when snapshots have been created or deleted
-        and a rescan should be performed. Rescan gets deferred until
-        next invocation of zfs.Dataset.list_snapshots()
-        """
-        # FIXME in future.
-        # This is a little sub-optimal because we should be able to modify
-        # the snapshot list in place in some situations and regenerate the 
-        # snapshot list without calling out to zfs(1m). But on the
-        # pro side, we will pick up any new snapshots since the last
-        # scan that we would be otherwise unaware of.
-        Datasets.snapshotslock.acquire()
-        Datasets.snapshots = None
-        Datasets.snapshotslock.release()
-
-
-class ZPool:
-    """
-    Base class for ZFS storage pool objects
-    """
-    def __init__(self, name):
-        self.name = name
-        self.health = self.__get_health()
-        self.__datasets = Datasets()
-        self.__filesystems = None
-        self.__volumes = None
-        self.__snapshots = None
-
-    def __get_health(self):
-        """
-        Returns pool health status: 'ONLINE', 'DEGRADED' or 'FAULTED'
-        """
-        cmd = [ZPOOLCMD, "list", "-H", "-o", "health", self.name]
-        outdata,errdata = util.run_command(cmd)
-        result = outdata.rstrip()
-        return result
-
-    def get_capacity(self):
-        """
-        Returns the percentage of total pool storage in use.
-        Calculated based on the "used" and "available" properties
-        of the pool's top-level filesystem because the values account
-        for reservations and quotas of children in their calculations,
-        giving a more practical indication of how much capacity is used
-        up on the pool.
-        """
-        if self.health == "FAULTED":
-            raise ZPoolFaultedError("Can not determine capacity of zpool: %s" \
-                                    "because it is in a FAULTED state" \
-                                    % (self.name))
-
-        cmd = [ZFSCMD, "get", "-H", "-p", "-o", "value", \
-               "used,available", self.name]
-        outdata,errdata = util.run_command(cmd)
-        _used,_available = outdata.rstrip().split('\n')
-        used = float(_used)
-        available = float(_available) 
-        return 100.0 * used/(used + available)
-
-    def get_available_size(self):
-        """
-        How much unused space is available for use on this Zpool.
-        Answer in bytes.
-        """
-        # zpool(1) doesn't report available space in
-        # units suitable for calulations but zfs(1)
-        # can so use it to find the value for the
-        # filesystem matching the pool.
-        # The root filesystem of the pool is simply
-        # the pool name.
-        poolfs = Filesystem(self.name)
-        avail = poolfs.get_available_size()
-        return avail
-
-    def get_used_size(self):
-        """
-        How much space is in use on this Zpool.
-        Answer in bytes
-        """
-        # Same as ZPool.get_available_size(): zpool(1)
-        # doesn't generate suitable out put so use
-        # zfs(1) on the toplevel filesystem
-        if self.health == "FAULTED":
-            raise ZPoolFaultedError("Can not determine used size of zpool: %s" \
-                                    "because it is in a FAULTED state" \
-                                    % (self.name))
-        poolfs = Filesystem(self.name)
-        used = poolfs.get_used_size()
-        return used
-
-    def list_filesystems(self):
-        """
-        Return a list of filesystems on this Zpool.
-        List is sorted by name.
-        """
-        if self.__filesystems == None:
-            result = []
-            # Provides pre-sorted filesystem list
-            for fsname,fsmountpoint in self.__datasets.list_filesystems():
-                if re.match(self.name, fsname):
-                    result.append([fsname, fsmountpoint])
-            self.__filesystems = result
-        return self.__filesystems
-
-    def list_volumes(self):
-        """
-        Return a list of volumes (zvol) on this Zpool
-        List is sorted by name
-        """
-        if self.__volumes == None:
-            result = []
-            regexpattern = "^%s" % self.name
-            patternobj = re.compile(regexpattern)
-            for volname in self.__datasets.list_volumes():
-                patternmatchobj = re.match(patternobj, volname)
-                if patternmatchobj != None:
-                    result.append(volname)
-            result.sort()
-            self.__volumes = result
-        return self.__volumes
-
-    def list_auto_snapshot_sets(self, tag = None):
-        """
-        Returns a list of zfs filesystems and volumes tagged with
-        the "com.sun:auto-snapshot" property set to "true", either
-        set locally or inherited. Snapshots are excluded from the
-        returned result. Results are not sorted.
-
-        Keyword Arguments:
-        tag:
-            A string indicating one of the standard auto-snapshot schedules
-            tags to check (eg. "frequent" will map to the tag:
-            com.sun:auto-snapshot:frequent). If specified as a zfs property
-            on a zfs dataset, the property corresponding to the tag will 
-            override the wildcard property: "com.sun:auto-snapshot"
-            Default value = None
-        """
-        result = []
-        allsets = self.__datasets.list_auto_snapshot_sets(tag)
-        if len(allsets) == 0:
-            return result
-
-        regexpattern = "^%s" % self.name
-        patternobj = re.compile(regexpattern)
-        for datasetname in allsets:
-            patternmatchobj = re.match(patternobj, datasetname)
-            if patternmatchobj != None:
-                result.append(datasetname)
-        return result
-
-    def list_snapshots(self, pattern = None):
-        """
-        List pattern matching snapshots sorted by creation date.
-        Oldest listed first
-           
-        Keyword arguments:
-        pattern -- Filter according to pattern (default None)   
-        """
-        # If there isn't a list of snapshots for this dataset
-        # already, create it now and store it in order to save
-        # time later for potential future invocations.
-        Datasets.snapshotslock.acquire()
-        if Datasets.snapshots == None:
-            self.__snapshots = None
-        Datasets.snapshotslock.release()
-        if self.__snapshots == None:
-            result = []
-            regexpattern = "^%s.*@"  % self.name
-            patternobj = re.compile(regexpattern)
-            for snapname,snaptime in self.__datasets.list_snapshots():
-                patternmatchobj = re.match(patternobj, snapname)
-                if patternmatchobj != None:
-                    result.append([snapname, snaptime])
-            # Results already sorted by creation time
-            self.__snapshots = result
-        if pattern == None:
-            return self.__snapshots
-        else:
-            snapshots = []
-            regexpattern = "^%s.*@.*%s" % (self.name, pattern)
-            patternobj = re.compile(regexpattern)
-            for snapname,snaptime in self.__snapshots:
-                patternmatchobj = re.match(patternobj, snapname)
-                if patternmatchobj != None:
-                    snapshots.append([snapname, snaptime])
-            return snapshots
-
-    def __str__(self):
-        return_string = "ZPool name: " + self.name
-        return_string = return_string + "\n\tHealth: " + self.health
-        try:
-            return_string = return_string + \
-                            "\n\tUsed: " + \
-                            str(self.get_used_size()/BYTESPERMB) + "Mb"
-            return_string = return_string + \
-                            "\n\tAvailable: " + \
-                            str(self.get_available_size()/BYTESPERMB) + "Mb"
-            return_string = return_string + \
-                            "\n\tCapacity: " + \
-                            str(self.get_capacity()) + "%"
-        except ZPoolFaultedError:
-            pass
-        return return_string
-
-
-class ReadableDataset:
-    """
-    Base class for Filesystem, Volume and Snapshot classes
-    Provides methods for read only operations common to all.
-    """
-    def __init__(self, name, creation = None):
-        self.name = name
-        self.__creationTime = creation
-        self.datasets = Datasets()
-
-    def __str__(self):
-        return_string = "ReadableDataset name: " + self.name + "\n"
-        return return_string
-
-    def get_creation_time(self):
-        if self.__creationTime == None:
-            cmd = [ZFSCMD, "get", "-H", "-p", "-o", "value", "creation",
-                   self.name]
-            outdata,errdata = util.run_command(cmd)
-            self.__creationTime = long(outdata.rstrip())
-        return self.__creationTime
-
-    def exists(self):
-        """
-        Returns True if the dataset is still existent on the system.
-        False otherwise
-        """
-        # Test existance of the dataset by checking the output of a 
-        # simple zfs get command on the snapshot
-        cmd = [ZFSCMD, "get", "-H", "-o", "name", "type", self.name]
-        try:
-            p = subprocess.Popen(cmd,
-                                 stdout=subprocess.PIPE,
-                                 stderr=subprocess.PIPE,
-                                 close_fds=True)
-            outdata,errdata = p.communicate()
-            err = p.wait()
-        except OSError, message:
-            raise RuntimeError, "%s subprocess error:\n %s" % \
-                            (command, str(message))
-        if err != 0:
-            # Doesn't exist
-            return False
-
-        result = outdata.rstrip()
-        if result == self.name:
-            return True
-        else:
-            return False
-
-    def get_used_size(self):
-        cmd = [ZFSCMD, "get", "-H", "-p", "-o", "value", "used", self.name]
-        outdata,errdata = util.run_command(cmd)
-        return long(outdata.rstrip())
-
-    def get_user_property(self, prop, local=False):
-        if local == True:
-            cmd = [ZFSCMD, "get", "-s", "local", "-H", "-o", "value", prop, self.name]
-        else:
-            cmd = [ZFSCMD, "get", "-H", "-o", "value", prop, self.name]
-        outdata,errdata = util.run_command(cmd)
-        return outdata.rstrip()
-
-    def set_user_property(self, prop, value):
-        cmd = [PFCMD, ZFSCMD, "set", "%s=%s" % (prop, value), self.name]
-        outdata,errdata = util.run_command(cmd)
-    
-    def unset_user_property(self, prop):
-        cmd = [PFCMD, ZFSCMD, "inherit", prop, self.name]
-        outdata,errdata = util.run_command(cmd)
-
-class Snapshot(ReadableDataset):
-    """
-    ZFS Snapshot object class.
-    Provides information and operations specfic to ZFS snapshots
-    """    
-    def __init__(self, name, creation = None):
-        """
-        Keyword arguments:
-        name -- Name of the ZFS snapshot
-        creation -- Creation time of the snapshot if known (Default None)
-        """
-        ReadableDataset.__init__(self, name, creation)
-        self.fsname, self.snaplabel = self.__split_snapshot_name()
-        self.poolname = self.__get_pool_name()
-
-    def __get_pool_name(self):
-        name = self.fsname.split("/", 1)
-        return name[0]
-
-    def __split_snapshot_name(self):
-        name = self.name.split("@", 1)
-        # Make sure this is really a snapshot and not a
-        # filesystem otherwise a filesystem could get 
-        # destroyed instead of a snapshot. That would be
-        # really really bad.
-        if name[0] == self.name:
-            raise SnapshotError("\'%s\' is not a valid snapshot name" \
-                                % (self.name))
-        return name[0],name[1]
-
-    def get_referenced_size(self):
-        """
-        How much unique storage space is used by this snapshot.
-        Answer in bytes
-        """
-        cmd = [ZFSCMD, "get", "-H", "-p", \
-               "-o", "value", "referenced", \
-               self.name]
-        outdata,errdata = util.run_command(cmd)
-        return long(outdata.rstrip())
-
-    def list_children(self):
-        """Returns a recursive list of child snapshots of this snapshot"""
-        cmd = [ZFSCMD,
-               "list", "-t", "snapshot", "-H", "-r", "-o", "name",
-               self.fsname]
-        outdata,errdata = util.run_command(cmd)
-        result = []
-        for line in outdata.rstrip().split('\n'):
-            if re.search("@%s" % (self.snaplabel), line) and \
-                line != self.name:
-                    result.append(line)
-        return result
-
-    def has_clones(self):
-        """Returns True if the snapshot has any dependent clones"""
-        cmd = [ZFSCMD, "list", "-H", "-o", "origin,name"]
-        outdata,errdata = util.run_command(cmd)
-        for line in outdata.rstrip().split('\n'):
-            details = line.rstrip().split()
-            if details[0] == self.name and \
-                details[1] != '-':
-                return True
-        return False
-
-    def destroy(self, deferred=True):
-        """
-        Permanently remove this snapshot from the filesystem
-        Performs deferred destruction by default.
-        """
-        # Be sure it genuninely exists before trying to destroy it
-        if self.exists() == False:
-            return
-        if deferred == False:
-            cmd = [PFCMD, ZFSCMD, "destroy", self.name]
-        else:
-            cmd = [PFCMD, ZFSCMD, "destroy", "-d", self.name]
-
-        outdata,errdata = util.run_command(cmd)
-        # Clear the global snapshot cache so that a rescan will be
-        # triggered on the next call to Datasets.list_snapshots()
-        self.datasets.refresh_snapshots()
-
-    def hold(self, tag):
-        """
-        Place a hold on the snapshot with the specified "tag" string.
-        """
-        # FIXME - fails if hold is already held
-        # Be sure it genuninely exists before trying to place a hold
-        if self.exists() == False:
-            return
-
-        cmd = [PFCMD, ZFSCMD, "hold", tag, self.name]
-        outdata,errdata = util.run_command(cmd)
-
-    def holds(self):
-        """
-        Returns a list of user hold tags for this snapshot
-        """
-        cmd = [ZFSCMD, "holds", self.name]
-        results = []
-        outdata,errdata = util.run_command(cmd)
-
-        for line in outdata.rstrip().split('\n'):
-            if len(line) == 0:
-                continue
-            # The first line heading columns are  NAME TAG TIMESTAMP
-            # Filter that line out.
-            line = line.split()
-            if (line[0] != "NAME" and line[1] != "TAG"):
-                results.append(line[1])
-        return results
-
-    def release(self, tag,):
-        """
-        Release the hold on the snapshot with the specified "tag" string.
-        """
-        # FIXME raises exception if no hold exists.
-        # Be sure it genuninely exists before trying to destroy it
-        if self.exists() == False:
-            return
-
-        cmd = [PFCMD, ZFSCMD, "release", tag, self.name]
-
-        outdata,errdata = util.run_command(cmd)
-        # Releasing the snapshot might cause it get automatically
-        # deleted by zfs.
-        # Clear the global snapshot cache so that a rescan will be
-        # triggered on the next call to Datasets.list_snapshots()
-        self.datasets.refresh_snapshots()
-
-
-    def __str__(self):
-        return_string = "Snapshot name: " + self.name
-        return_string = return_string + "\n\tCreation time: " \
-                        + str(self.get_creation_time())
-        return_string = return_string + "\n\tUsed Size: " \
-                        + str(self.get_used_size())
-        return_string = return_string + "\n\tReferenced Size: " \
-                        + str(self.get_referenced_size())
-        return return_string
-
-
-class ReadWritableDataset(ReadableDataset):
-    """
-    Base class for ZFS filesystems and volumes.
-    Provides methods for operations and properties
-    common to both filesystems and volumes.
-    """
-    def __init__(self, name, creation = None):
-        ReadableDataset.__init__(self, name, creation)
-        self.__snapshots = None
-
-    def __str__(self):
-        return_string = "ReadWritableDataset name: " + self.name + "\n"
-        return return_string
-
-    def get_auto_snap(self, schedule = None):
-        if schedule:
-            cmd = [ZFSCMD, "get", "-H", "-o", "value", \
-               "com.sun:auto-snapshot", self.name]
-        cmd = [ZFSCMD, "get", "-H", "-o", "value", \
-               "com.sun:auto-snapshot", self.name]
-        outdata,errdata = util.run_command(cmd)
-        if outdata.rstrip() == "true":
-            return True
-        else:
-            return False
-
-    def get_available_size(self):
-        cmd = [ZFSCMD, "get", "-H", "-p", "-o", "value", "available", \
-               self.name]
-        outdata,errdata = util.run_command(cmd)
-        return long(outdata.rstrip())
-
-    def create_snapshot(self, snaplabel, recursive = False):
-        """
-        Create a snapshot for the ReadWritable dataset using the supplied
-        snapshot label.
-
-        Keyword Arguments:
-        snaplabel:
-            A string to use as the snapshot label.
-            The bit that comes after the "@" part of the snapshot
-            name.
-        recursive:
-            Recursively snapshot childfren of this dataset.
-            Default = False
-        """
-        cmd = [PFCMD, ZFSCMD, "snapshot"]
-        if recursive == True:
-            cmd.append("-r")
-        cmd.append("%s@%s" % (self.name, snaplabel))
-        outdata,errdata = util.run_command(cmd, False)
-       if errdata:
-         print errdata
-        self.datasets.refresh_snapshots()
-
-    def list_children(self):
-        
-        # Note, if more dataset types ever come around they will
-        # need to be added to the filsystem,volume args below.
-        # Not for the forseeable future though.
-        cmd = [ZFSCMD, "list", "-H", "-r", "-t", "filesystem,volume",
-               "-o", "name", self.name]
-        outdata,errdata = util.run_command(cmd)
-        result = []
-        for line in outdata.rstrip().split('\n'):
-            if line.rstrip() != self.name:
-                result.append(line.rstrip())
-        return result
-
-
-    def list_snapshots(self, pattern = None):
-        """
-        List pattern matching snapshots sorted by creation date.
-        Oldest listed first
-           
-        Keyword arguments:
-        pattern -- Filter according to pattern (default None)   
-        """
-        # If there isn't a list of snapshots for this dataset
-        # already, create it now and store it in order to save
-        # time later for potential future invocations.
-        Datasets.snapshotslock.acquire()
-        if Datasets.snapshots == None:
-            self.__snapshots = None
-        Datasets.snapshotslock.release()
-        if self.__snapshots == None:
-            result = []
-            regexpattern = "^%s@" % self.name
-            patternobj = re.compile(regexpattern)
-            for snapname,snaptime in self.datasets.list_snapshots():
-                patternmatchobj = re.match(patternobj, snapname)
-                if patternmatchobj != None:
-                    result.append([snapname, snaptime])
-            # Results already sorted by creation time
-            self.__snapshots = result
-        if pattern == None:
-            return self.__snapshots
-        else:
-            snapshots = []
-            regexpattern = "^%s@.*%s" % (self.name, pattern)
-            patternobj = re.compile(regexpattern)
-            for snapname,snaptime in self.__snapshots:
-                patternmatchobj = re.match(patternobj, snapname)
-                if patternmatchobj != None:
-                    snapshots.append(snapname)
-            return snapshots
-
-    def set_auto_snap(self, include, inherit = False):
-        if inherit == True:
-            self.unset_user_property("com.sun:auto-snapshot")
-        else:
-            if include == True:
-                value = "true"
-            else:
-                value = "false"
-            self.set_user_property("com.sun:auto-snapshot", value)
-
-        return
-
-
-class Filesystem(ReadWritableDataset):
-    """ZFS Filesystem class"""
-    def __init__(self, name, mountpoint = None):
-        ReadWritableDataset.__init__(self, name)
-        self.__mountpoint = mountpoint
-
-    def __str__(self):
-        return_string = "Filesystem name: " + self.name + \
-                        "\n\tMountpoint: " + self.get_mountpoint() + \
-                        "\n\tMounted: " + str(self.is_mounted()) + \
-                        "\n\tAuto snap: " + str(self.get_auto_snap())
-        return return_string
-
-    def get_mountpoint(self):
-        if (self.__mountpoint == None):
-            cmd = [ZFSCMD, "get", "-H", "-o", "value", "mountpoint", \
-                   self.name]
-            outdata,errdata = util.run_command(cmd)
-            result = outdata.rstrip()
-            self.__mountpoint = result
-        return self.__mountpoint
-
-    def is_mounted(self):
-        cmd = [ZFSCMD, "get", "-H", "-o", "value", "mounted", \
-               self.name]
-        outdata,errdata = util.run_command(cmd)
-        result = outdata.rstrip()
-        if result == "yes":
-            return True
-        else:
-            return False
-
-    def list_children(self):
-        cmd = [ZFSCMD, "list", "-H", "-r", "-t", "filesystem", "-o", "name",
-               self.name]
-        outdata,errdata = util.run_command(cmd)
-        result = []
-        for line in outdata.rstrip().split('\n'):
-            if line.rstrip() != self.name:
-                result.append(line.rstrip())
-        return result
-
-
-class Volume(ReadWritableDataset):
-    """
-    ZFS Volume Class
-    This is basically just a stub and does nothing
-    unique from ReadWritableDataset parent class.
-    """
-    def __init__(self, name):
-        ReadWritableDataset.__init__(self, name)
-
-    def __str__(self):
-        return_string = "Volume name: " + self.name + "\n"
-        return return_string
-
-
-class ZFSError(Exception):
-    """Generic base class for ZPoolFaultedError and SnapshotError
-
-    Attributes:
-        msg -- explanation of the error
-    """
-    def __init__(self, msg):
-        self.msg = msg
-    def __str__(self):
-        return repr(self.msg)
-
-
-class ZPoolFaultedError(ZFSError):
-    """Exception raised for queries made against ZPools that
-       are in a FAULTED state
-
-    Attributes:
-        msg -- explanation of the error
-    """
-    def __init__(self, msg):
-        ZFSError.__init__(self, msg)
-
-
-class SnapshotError(ZFSError):
-    """Exception raised for invalid snapshot names provided to
-       Snapshot() constructor.
-
-    Attributes:
-        msg -- explanation of the error
-    """
-    def __init__(self, msg):
-        ZFSError.__init__(self, msg)
-
-
-def list_zpools():
-    """Returns a list of all zpools on the system"""
-    result = []
-    cmd = [ZPOOLCMD, "list", "-H", "-o", "name"]
-    outdata,errdata = util.run_command(cmd)
-    for line in outdata.rstrip().split('\n'):
-        result.append(line.rstrip())
-    return result
-
-
-if __name__ == "__main__":
-    for zpool in list_zpools():
-        pool = ZPool(zpool)
-        print pool
-        for filesys,mountpoint in pool.list_filesystems():
-            fs = Filesystem(filesys, mountpoint)
-            print fs
-            print "\tSnapshots:"
-            for snapshot, snaptime in fs.list_snapshots():
-                snap = Snapshot(snapshot, snaptime)
-                print "\t\t" + snap.name
-
-        for volname in pool.list_volumes():
-            vol = Volume(volname)
-            print vol
-            print "\tSnapshots:"
-            for snapshot, snaptime in vol.list_snapshots():
-                snap = Snapshot(snapshot, snaptime)
-                print "\t\t" + snap.name
-