Initial attempt at python3 conversion 0.2.98.python3.1
authorRalf Ertzinger <ralf@skytale.net>
Sat, 9 Nov 2019 11:43:46 +0000 (11:43 +0000)
committerRalf Ertzinger <ralf@skytale.net>
Sat, 9 Nov 2019 11:43:46 +0000 (11:43 +0000)
25 files changed:
.gitignore
usr/share/time-slider/lib/plugin/plugin.py
usr/share/time-slider/lib/plugin/rsync/backup.py
usr/share/time-slider/lib/plugin/rsync/trigger.py
usr/share/time-slider/lib/plugin/zfssend/zfssend.py
usr/share/time-slider/lib/time_slider/__init__.py
usr/share/time-slider/lib/time_slider/applet.py
usr/share/time-slider/lib/time_slider/autosnapsmf.py
usr/share/time-slider/lib/time_slider/deletegui.py
usr/share/time-slider/lib/time_slider/fileversion.py
usr/share/time-slider/lib/time_slider/linux/autosnapsmf.py
usr/share/time-slider/lib/time_slider/linux/rbac.py
usr/share/time-slider/lib/time_slider/linux/smf.py
usr/share/time-slider/lib/time_slider/linux/timesliderconfig.py
usr/share/time-slider/lib/time_slider/linux/timeslidersmf.py
usr/share/time-slider/lib/time_slider/rbac.py
usr/share/time-slider/lib/time_slider/setupgui.py
usr/share/time-slider/lib/time_slider/smf.py
usr/share/time-slider/lib/time_slider/snapnowui.py
usr/share/time-slider/lib/time_slider/timesliderd.py
usr/share/time-slider/lib/time_slider/timeslidersmf.py
usr/share/time-slider/lib/time_slider/tmp.py
usr/share/time-slider/lib/time_slider/tmp2.py
usr/share/time-slider/lib/time_slider/util.py
usr/share/time-slider/lib/time_slider/zfs.py

index 51fa109..1a3a326 100644 (file)
@@ -1,3 +1,4 @@
 *.pyc
 *.rpm
 *.tar.bz2
 *.pyc
 *.rpm
 *.tar.bz2
+.mypy_cache/
index 7d1e3d1..e509b19 100644 (file)
@@ -23,7 +23,7 @@
 import os
 import sys
 import subprocess
 import os
 import sys
 import subprocess
-import pluginsmf
+from . import pluginsmf
 
 from time_slider import smf, autosnapsmf, util
 
 
 from time_slider import smf, autosnapsmf, util
 
@@ -46,15 +46,15 @@ class Plugin(Exception):
         command = self.smfInst.get_trigger_command()
         try:
             statinfo = os.stat(command)
         command = self.smfInst.get_trigger_command()
         try:
             statinfo = os.stat(command)
-            other_x = (statinfo.st_mode & 01)
+            other_x = (statinfo.st_mode & 0o1)
             if other_x == 0:
             if other_x == 0:
-              raise RuntimeError'Plugin: %s:\nConfigured trigger command is not ' \
+              raise RuntimeError('Plugin: %s:\nConfigured trigger command is not ' \
                                   'executable:\n%s' \
                                   'executable:\n%s' \
-                                  % (self.smfInst.instanceName, command)  
+                                  % (self.smfInst.instanceName, command))  
         except OSError:
         except OSError:
-            raise RuntimeError'Plugin: %s:\nCan not access the configured ' \
+            raise RuntimeError('Plugin: %s:\nCan not access the configured ' \
                                 'plugin/trigger_command:\n%s' \
                                 'plugin/trigger_command:\n%s' \
-                                % (self.smfInst.instanceName, command)      
+                                % (self.smfInst.instanceName, command))      
 
 
     def execute(self, schedule, label):
 
 
     def execute(self, schedule, label):
@@ -96,9 +96,9 @@ class Plugin(Exception):
                                           stdout=subprocess.PIPE,
                                           stderr=subprocess.PIPE,
                                           close_fds=True)
                                           stdout=subprocess.PIPE,
                                           stderr=subprocess.PIPE,
                                           close_fds=True)
-        except OSError, message:
-            raise RuntimeError"%s subprocess error:\n %s" % \
-                                (cmd, str(message))
+        except OSError as message:
+            raise RuntimeError("%s subprocess error:\n %s" % \
+                                (cmd, str(message)))
             self._proc = None
 
     def is_running(self):
             self._proc = None
 
     def is_running(self):
@@ -142,8 +142,8 @@ class PluginManager():
         err = p.wait()
         if err != 0:
             self._refreshLock.release()
         err = p.wait()
         if err != 0:
             self._refreshLock.release()
-            raise RuntimeError'%s failed with exit code %d\n%s' % \
-                                (str(cmd), err, errdata)
+            raise RuntimeError('%s failed with exit code %d\n%s' % \
+                                (str(cmd), err, errdata))
         for line in outdata.rstrip().split('\n'):
             line = line.rstrip().split()
             state = line[0]
         for line in outdata.rstrip().split('\n'):
             line = line.rstrip().split()
             state = line[0]
@@ -158,7 +158,7 @@ class PluginManager():
                 try:
                     plugin = Plugin(fmri, self.verbose)
                     self.plugins.append(plugin)
                 try:
                     plugin = Plugin(fmri, self.verbose)
                     self.plugins.append(plugin)
-                except RuntimeError, message:
+                except RuntimeError as message:
                     sys.stderr.write("Ignoring misconfigured plugin: %s\n" \
                                      % (fmri))
                     sys.stderr.write("Reason:\n%s\n" % (message))
                     sys.stderr.write("Ignoring misconfigured plugin: %s\n" \
                                      % (fmri))
                     sys.stderr.write("Reason:\n%s\n" % (message))
index 2d1779f..0b403cc 100644 (file)
@@ -39,7 +39,7 @@ import copy
 from bisect import insort, bisect_left
 
 from time_slider import util, zfs, dbussvc, autosnapsmf, timeslidersmf
 from bisect import insort, bisect_left
 
 from time_slider import util, zfs, dbussvc, autosnapsmf, timeslidersmf
-import rsyncsmf
+from . import rsyncsmf
 
 
 # Set to True if SMF property value of "plugin/command" is "true"
 
 
 # Set to True if SMF property value of "plugin/command" is "true"
@@ -408,7 +408,7 @@ class BackupQueue():
                         not os.path.islink(d)]
             for d in dirList:
                 mtime = os.stat(d).st_mtime
                         not os.path.islink(d)]
             for d in dirList:
                 mtime = os.stat(d).st_mtime
-                insort(self._backups, [long(mtime), os.path.abspath(d)])
+                insort(self._backups, [int(mtime), os.path.abspath(d)])
                 self._backupTimes[dirName][d] = mtime
 
     def _find_backup_device(self):
                 self._backupTimes[dirName][d] = mtime
 
     def _find_backup_device(self):
@@ -599,7 +599,7 @@ class BackupQueue():
             lockFile = os.path.join(lockFileDir, tail)
 
             if not os.path.exists(lockFileDir):
             lockFile = os.path.join(lockFileDir, tail)
 
             if not os.path.exists(lockFileDir):
-                os.makedirs(lockFileDir, 0755)
+                os.makedirs(lockFileDir, 0o755)
             try:
                 lockFp = open(lockFile, 'w')
                 fcntl.flock(lockFp, fcntl.LOCK_EX | fcntl.LOCK_NB)
             try:
                 lockFp = open(lockFile, 'w')
                 fcntl.flock(lockFp, fcntl.LOCK_EX | fcntl.LOCK_NB)
@@ -620,7 +620,7 @@ class BackupQueue():
             trashDir = os.path.join(trash, tail)
 
             if not os.path.exists(trash):
             trashDir = os.path.join(trash, tail)
 
             if not os.path.exists(trash):
-                os.makedirs(trash, 0755)
+                os.makedirs(trash, 0o755)
 
             util.debug("Deleting rsync backup to recover space: %s"\
                 % (dirName), self._verbose)
 
             util.debug("Deleting rsync backup to recover space: %s"\
                 % (dirName), self._verbose)
@@ -763,7 +763,7 @@ class BackupQueue():
             self._bus.rsync_started(self._rsyncBaseDir)
 
         ctime,snapName = self._currentQueueSet[0]
             self._bus.rsync_started(self._rsyncBaseDir)
 
         ctime,snapName = self._currentQueueSet[0]
-        snapshot = zfs.Snapshot(snapName, long(ctime))
+        snapshot = zfs.Snapshot(snapName, int(ctime))
         # Make sure the snapshot didn't get destroyed since we last
         # checked it.
         remainingList = self._currentQueueSet[1:]
         # Make sure the snapshot didn't get destroyed since we last
         # checked it.
         remainingList = self._currentQueueSet[1:]
@@ -835,18 +835,18 @@ class BackupQueue():
         dirList = []
 
         if not os.path.exists(partialDir):
         dirList = []
 
         if not os.path.exists(partialDir):
-            os.makedirs(partialDir, 0755)
+            os.makedirs(partialDir, 0o755)
         if not os.path.exists(logDir):
         if not os.path.exists(logDir):
-            os.makedirs(logDir, 0755)
+            os.makedirs(logDir, 0o755)
 
         if not os.path.exists(targetDir):
 
         if not os.path.exists(targetDir):
-            os.makedirs(targetDir, 0755)
+            os.makedirs(targetDir, 0o755)
             # Add the new directory to our internal
             # mtime dictionary and sorted list.
             self._backupTimes[targetDir] = {}
             insort(self._backupDirs, targetDir)
         else:
             # Add the new directory to our internal
             # mtime dictionary and sorted list.
             self._backupTimes[targetDir] = {}
             insort(self._backupDirs, targetDir)
         else:
-            for name,value in self._backupTimes[targetDir].items():
+            for name,value in list(self._backupTimes[targetDir].items()):
                 if ctime > value:
                     if nearestOlder == None or \
                        value > nearestOlder[1]:
                 if ctime > value:
                     if nearestOlder == None or \
                        value > nearestOlder[1]:
@@ -876,7 +876,7 @@ class BackupQueue():
                                     link + ".lock")
 
             if not os.path.exists(lockFileDir):
                                     link + ".lock")
 
             if not os.path.exists(lockFileDir):
-                os.makedirs(lockFileDir, 0755)
+                os.makedirs(lockFileDir, 0o755)
 
             try:
                 lockFp = open(lockFile, 'w')
 
             try:
                 lockFp = open(lockFile, 'w')
@@ -902,7 +902,7 @@ class BackupQueue():
         # Set umask temporarily so that rsync backups are read-only to
         # the owner by default. Rync will override this to match the
         # permissions of each snapshot as appropriate.
         # Set umask temporarily so that rsync backups are read-only to
         # the owner by default. Rync will override this to match the
         # permissions of each snapshot as appropriate.
-        origmask = os.umask(0222)
+        origmask = os.umask(0o222)
         util.debug("Starting rsync backup of '%s' to: %s" \
                    % (sourceDir, partialDir),
                    self._verbose)
         util.debug("Starting rsync backup of '%s' to: %s" \
                    % (sourceDir, partialDir),
                    self._verbose)
@@ -977,10 +977,10 @@ class BackupQueue():
         # they match the snapshot creation time. This is extremely important
         # because the backup mechanism relies on it to determine backup times
         # and nearest matches for incremental rsync (linkDest)
         # they match the snapshot creation time. This is extremely important
         # because the backup mechanism relies on it to determine backup times
         # and nearest matches for incremental rsync (linkDest)
-        os.utime(backupDir, (long(ctime), long(ctime)))
+        os.utime(backupDir, (int(ctime), int(ctime)))
         # Update the dictionary and time sorted list with ctime also
         # Update the dictionary and time sorted list with ctime also
-        self._backupTimes[targetDir][snapshot.snaplabel] = long(ctime)
-        insort(self._backups, [long(ctime), os.path.abspath(backupDir)]) 
+        self._backupTimes[targetDir][snapshot.snaplabel] = int(ctime)
+        insort(self._backups, [int(ctime), os.path.abspath(backupDir)]) 
         snapshot.set_user_property(self._propName, "completed")
         snapshot.release(self._propName)
         self._currentQueueSet = remainingList
         snapshot.set_user_property(self._propName, "completed")
         snapshot.release(self._propName)
         self._currentQueueSet = remainingList
@@ -1027,7 +1027,7 @@ class BackupQueue():
                                  snapshot.fsname,
                                  rsyncsmf.RSYNCTRASHSUFFIX)
             if not os.path.exists(trash):
                                  snapshot.fsname,
                                  rsyncsmf.RSYNCTRASHSUFFIX)
             if not os.path.exists(trash):
-                os.makedirs(trash, 0755)
+                os.makedirs(trash, 0o755)
             for mtime,dirName in purgeList:
                 trashDir = os.path.join(trash,
                                         dirName)
             for mtime,dirName in purgeList:
                 trashDir = os.path.join(trash,
                                         dirName)
@@ -1042,7 +1042,7 @@ class BackupQueue():
                                             dirName + ".lock")
 
                     if not os.path.exists(lockFileDir):
                                             dirName + ".lock")
 
                     if not os.path.exists(lockFileDir):
-                        os.makedirs(lockFileDir, 0755)
+                        os.makedirs(lockFileDir, 0o755)
 
                     try:
                         lockFp = open(lockFile, 'w')
 
                     try:
                         lockFp = open(lockFile, 'w')
@@ -1195,7 +1195,7 @@ def list_pending_snapshots(propName):
     outdata,errdata = util.run_command(cmd)
     for line in outdata.rstrip().split('\n'):
         ctimeStr,name = line.split()
     outdata,errdata = util.run_command(cmd)
     for line in outdata.rstrip().split('\n'):
         ctimeStr,name = line.split()
-        insort(sortsnaplist, tuple((long(ctimeStr), name)))
+        insort(sortsnaplist, tuple((int(ctimeStr), name)))
     sortsnaplist.reverse()
     return sortsnaplist
 
     sortsnaplist.reverse()
     return sortsnaplist
 
@@ -1217,7 +1217,7 @@ def main(argv):
     lockFileDir = os.path.normpath(tempfile.gettempdir() + '/' + \
                                                        ".time-slider")
     if not os.path.exists(lockFileDir):
     lockFileDir = os.path.normpath(tempfile.gettempdir() + '/' + \
                                                        ".time-slider")
     if not os.path.exists(lockFileDir):
-            os.makedirs(lockFileDir, 0755)
+            os.makedirs(lockFileDir, 0o755)
     lockFile = os.path.join(lockFileDir, 'rsync-backup.lock')
 
     lockFp = open(lockFile, 'w')
     lockFile = os.path.join(lockFileDir, 'rsync-backup.lock')
 
     lockFp = open(lockFile, 'w')
index 3904eed..6837d87 100644 (file)
@@ -25,7 +25,7 @@ import sys
 import subprocess
 import syslog
 
 import subprocess
 import syslog
 
-import rsyncsmf
+from . import rsyncsmf
 from time_slider import util, smf, zfs
 
 # Set to True if SMF property value of "plugin/command" is "true"
 from time_slider import util, smf, zfs
 
 # Set to True if SMF property value of "plugin/command" is "true"
index 0490615..12e1ff5 100644 (file)
@@ -33,7 +33,7 @@ import time_slider.zfs
 # Set to True if SMF property value of "plugin/command" is "true"
 verboseprop = "plugin/verbose"
 propbasename = "org.opensolaris:time-slider-plugin"
 # Set to True if SMF property value of "plugin/command" is "true"
 verboseprop = "plugin/verbose"
 propbasename = "org.opensolaris:time-slider-plugin"
-print _("Do I work?")
+print(_("Do I work?"))
 
 def main(argv):
 
 
 def main(argv):
 
@@ -129,7 +129,7 @@ def main(argv):
     # Check to see if the receive command is accessible and executable
     try:
         statinfo = os.stat(recvcmd[0])
     # Check to see if the receive command is accessible and executable
     try:
         statinfo = os.stat(recvcmd[0])
-        other_x = (statinfo.st_mode & 01)
+        other_x = (statinfo.st_mode & 0o1)
         if other_x == 0:
             log_error(syslog.LOG_ERR,
                       "Plugin: %s: Configured receive/command is not " \
         if other_x == 0:
             log_error(syslog.LOG_ERR,
                       "Plugin: %s: Configured receive/command is not " \
@@ -193,13 +193,13 @@ def main(argv):
             senderrno = sendP.wait()
 
             if senderrno != 0:
             senderrno = sendP.wait()
 
             if senderrno != 0:
-                raise RuntimeError"Send command: %s failed with exit code" \
+                raise RuntimeError("Send command: %s failed with exit code" \
                                     "%d. Error message: \n%s" \
                                     "%d. Error message: \n%s" \
-                                    % (str(sendcmd), senderrno, senderr)
+                                    % (str(sendcmd), senderrno, senderr))
             if recverrno != 0:
             if recverrno != 0:
-                raise RuntimeError"Receive command %s failed with exit " \
+                raise RuntimeError("Receive command %s failed with exit " \
                                     "code %d. Error message: \n%s" \
                                     "code %d. Error message: \n%s" \
-                                    % (str(recvcmd), recverrno, recverr)
+                                    % (str(recvcmd), recverrno, recverr))
 
             if prevsnapname != None:
                 util.debug("Releasing hold on %s" % (prevsnapname), verbose)
 
             if prevsnapname != None:
                 util.debug("Releasing hold on %s" % (prevsnapname), verbose)
@@ -208,7 +208,7 @@ def main(argv):
                       % (prevsnapname),
                       verbose)
                 snapshot.release(propname)
                       % (prevsnapname),
                       verbose)
                 snapshot.release(propname)
-        except Exception, message:
+        except Exception as message:
             log_error(syslog.LOG_ERR,
                       "Error during snapshot send/receive operation: %s" \
                       % (message))
             log_error(syslog.LOG_ERR,
                       "Error during snapshot send/receive operation: %s" \
                       % (message))
index 4d7d17c..11a0ccb 100755 (executable)
@@ -45,8 +45,8 @@ gettext.bindtextdomain(GETTEXT_DOMAIN, LOCALE_PATH)
 gettext.textdomain(GETTEXT_DOMAIN)
 
 # register the gettext function for the whole interpreter as "_"
 gettext.textdomain(GETTEXT_DOMAIN)
 
 # register the gettext function for the whole interpreter as "_"
-import __builtin__
-__builtin__._ = gettext.gettext
+import builtins
+builtins._ = gettext.gettext
 
 
 
 
 
 
index 1fc5cee..0ea88ff 100755 (executable)
@@ -638,7 +638,7 @@ def main(argv):
     try:
         mainloop.run()
     except:
     try:
         mainloop.run()
     except:
-        print "Exiting"
+        print("Exiting")
 
 if __name__ == '__main__':
     main()
 
 if __name__ == '__main__':
     main()
index 7a15529..5999955 100755 (executable)
@@ -21,8 +21,8 @@
 #
 
 import threading
 #
 
 import threading
-import smf
-import util
+from . import smf
+from . import util
 
 factoryDefaultSchedules = ("monthly", "weekly", "daily", "hourly", "frequent")
 
 
 factoryDefaultSchedules = ("monthly", "weekly", "daily", "hourly", "frequent")
 
@@ -50,9 +50,9 @@ class AutoSnap(smf.SMFInstance):
             period = int(self.get_prop(ZFSPROPGROUP, "period"))
             keep =  int(self.get_prop(ZFSPROPGROUP, "keep"))
 
             period = int(self.get_prop(ZFSPROPGROUP, "period"))
             keep =  int(self.get_prop(ZFSPROPGROUP, "keep"))
 
-        except OSError, message:
-            raise RuntimeError"%s subprocess error:\n %s" % \
-                                (cmd, str(message))
+        except OSError as message:
+            raise RuntimeError("%s subprocess error:\n %s" % \
+                                (cmd, str(message)))
         finally:
             _scheddetaillock.release()
       
         finally:
             _scheddetaillock.release()
       
@@ -119,11 +119,11 @@ def get_default_schedules():
             instance = AutoSnap(s)
             try:
                 _defaultSchedules.append(instance.get_schedule_details())
             instance = AutoSnap(s)
             try:
                 _defaultSchedules.append(instance.get_schedule_details())
-            except RuntimeError, message:
-                raise RuntimeError"Error getting schedule details for " + \
+            except RuntimeError as message:
+                raise RuntimeError("Error getting schedule details for " + \
                                     "default auto-snapshot SMF instance:" + \
                                     "\n\t" + instanceName + "\nDetails:\n" + \
                                     "default auto-snapshot SMF instance:" + \
                                     "\n\t" + instanceName + "\nDetails:\n" + \
-                                    str(message)
+                                    str(message))
     return _defaultSchedules
 
 def get_custom_schedules():
     return _defaultSchedules
 
 def get_custom_schedules():
@@ -154,11 +154,11 @@ def get_custom_schedules():
                 instance = AutoSnap(label)
                 try:
                     _customSchedules.append(instance.get_schedule_details())
                 instance = AutoSnap(label)
                 try:
                     _customSchedules.append(instance.get_schedule_details())
-                except RuntimeError, message:
-                    raise RuntimeError"Error getting schedule details " + \
+                except RuntimeError as message:
+                    raise RuntimeError("Error getting schedule details " + \
                                         "for custom auto-snapshot SMF " + \
                                         "instance:\n\t" + label + "\n" + \
                                         "for custom auto-snapshot SMF " + \
                                         "instance:\n\t" + label + "\n" + \
-                                        "Details:\n" + str(message) 
+                                        "Details:\n" + str(message)) 
     return _customSchedules
 
 
     return _customSchedules
 
 
@@ -166,5 +166,5 @@ if __name__ == "__main__":
     defaults = get_default_schedules()
     for sched in defaults:
         S = AutoSnap(sched[0])
     defaults = get_default_schedules()
     for sched in defaults:
         S = AutoSnap(sched[0])
-        print S.get_schedule_details()
+        print(S.get_schedule_details())
 
 
index f619b3e..d9dfe63 100755 (executable)
@@ -73,8 +73,8 @@ GETTEXT_DOMAIN = 'time-slider'
 gtk.glade.bindtextdomain(GETTEXT_DOMAIN, LOCALE_PATH)
 gtk.glade.textdomain(GETTEXT_DOMAIN)
 
 gtk.glade.bindtextdomain(GETTEXT_DOMAIN, LOCALE_PATH)
 gtk.glade.textdomain(GETTEXT_DOMAIN)
 
-import zfs
-from rbac import RBACprofile
+from . import zfs
+from .rbac import RBACprofile
 
 class RsyncBackup:
 
 
 class RsyncBackup:
 
@@ -91,7 +91,7 @@ class RsyncBackup:
             self.creationtime = creationtime
             try:
                 tm = time.localtime(self.creationtime)
             self.creationtime = creationtime
             try:
                 tm = time.localtime(self.creationtime)
-                self.creationtime_str = unicode(time.strftime ("%c", tm),
+                self.creationtime_str = str(time.strftime ("%c", tm),
                            locale.getpreferredencoding()).encode('utf-8')
             except:
                 self.creationtime_str = time.ctime(self.creationtime)
                            locale.getpreferredencoding()).encode('utf-8')
             except:
                 self.creationtime_str = time.ctime(self.creationtime)
@@ -133,22 +133,21 @@ class RsyncBackup:
                              rsyncsmf.RSYNCLOCKSUFFIX)
 
         if not os.path.exists(lockFileDir):
                              rsyncsmf.RSYNCLOCKSUFFIX)
 
         if not os.path.exists(lockFileDir):
-            os.makedirs(lockFileDir, 0755)
+            os.makedirs(lockFileDir, 0o755)
 
         lockFile = os.path.join(lockFileDir, self.snaplabel + ".lock")
         try:
             lockFp = open(lockFile, 'w')
             fcntl.flock(lockFp, fcntl.LOCK_EX | fcntl.LOCK_NB)
         except IOError:
 
         lockFile = os.path.join(lockFileDir, self.snaplabel + ".lock")
         try:
             lockFp = open(lockFile, 'w')
             fcntl.flock(lockFp, fcntl.LOCK_EX | fcntl.LOCK_NB)
         except IOError:
-            raise RuntimeError, \
-            "couldn't delete %s, already used by another process" % self.mountpoint
+            raise RuntimeError("couldn't delete %s, already used by another process" % self.mountpoint)
             return
 
         trashDir = os.path.join(self.rsync_dir,
                           self.fsname,
                           rsyncsmf.RSYNCTRASHSUFFIX)
         if not os.path.exists(trashDir):
             return
 
         trashDir = os.path.join(self.rsync_dir,
                           self.fsname,
                           rsyncsmf.RSYNCTRASHSUFFIX)
         if not os.path.exists(trashDir):
-            os.makedirs(trashDir, 0755)
+            os.makedirs(trashDir, 0o755)
 
         backupTrashDir = os.path.join (self.rsync_dir,
                                  self.fsname,
 
         backupTrashDir = os.path.join (self.rsync_dir,
                                  self.fsname,
@@ -207,7 +206,7 @@ class DeleteSnapManager:
     def initialise_view(self):
         if len(self.shortcircuit) == 0:
             # Set TreeViews
     def initialise_view(self):
         if len(self.shortcircuit) == 0:
             # Set TreeViews
-            self.liststorefs = gtk.ListStore(str, str, str, str, str, long,
+            self.liststorefs = gtk.ListStore(str, str, str, str, str, int,
                                              gobject.TYPE_PYOBJECT)
             list_filter = self.liststorefs.filter_new()
             list_sort = gtk.TreeModelSort(list_filter)
                                              gobject.TYPE_PYOBJECT)
             list_filter = self.liststorefs.filter_new()
             list_sort = gtk.TreeModelSort(list_filter)
@@ -441,7 +440,7 @@ class DeleteSnapManager:
         for snapshot in newlist:
             try:
                 tm = time.localtime(snapshot.get_creation_time())
         for snapshot in newlist:
             try:
                 tm = time.localtime(snapshot.get_creation_time())
-                t = unicode(time.strftime ("%c", tm),
+                t = str(time.strftime ("%c", tm),
                     locale.getpreferredencoding()).encode('utf-8')
             except:
                 t = time.ctime(snapshot.get_creation_time())
                     locale.getpreferredencoding()).encode('utf-8')
             except:
                 t = time.ctime(snapshot.get_creation_time())
@@ -711,7 +710,7 @@ class DeleteSnapshots(threading.Thread):
             if backup.exists():
                 try:
                     backup.destroy ()
             if backup.exists():
                 try:
                     backup.destroy ()
-                except RuntimeError, inst:
+                except RuntimeError as inst:
                     self.errors.append(str(inst))
             deleted += 1
             self.progress = deleted / (total * 1.0)
                     self.errors.append(str(inst))
             deleted += 1
             self.progress = deleted / (total * 1.0)
@@ -752,5 +751,5 @@ def main(argv):
                                        "administrative priviliges."
                                        "\n\nConsult your system administrator "))
         dialog.run()
                                        "administrative priviliges."
                                        "\n\nConsult your system administrator "))
         dialog.run()
-        print argv + "is not a valid executable path"
+        print(argv + "is not a valid executable path")
         sys.exit(1)
         sys.exit(1)
index 0744be5..3702f12 100755 (executable)
@@ -132,7 +132,7 @@ class File:
   COLUMN_STRING_DATE,
   COLUMN_DATE,
   COLUMN_SIZE
   COLUMN_STRING_DATE,
   COLUMN_DATE,
   COLUMN_SIZE
-) = range (5)
+) = list(range(5))
 
 
 
 
 
 
index 3152cf9..79b5000 100755 (executable)
@@ -21,8 +21,8 @@
 #
 
 import time_slider.autosnapsmf as base
 #
 
 import time_slider.autosnapsmf as base
-import smf
-from timesliderconfig import Config
+from . import smf
+from .timesliderconfig import Config
 
 SNAPLABELPREFIX = base.SNAPLABELPREFIX
 
 
 SNAPLABELPREFIX = base.SNAPLABELPREFIX
 
@@ -49,11 +49,11 @@ def get_default_schedules():
             instance = AutoSnap(s)
             try:
                 _defaultSchedules.append(instance.get_schedule_details())
             instance = AutoSnap(s)
             try:
                 _defaultSchedules.append(instance.get_schedule_details())
-            except RuntimeError, message:
-                raise RuntimeError"Error getting schedule details for " + \
+            except RuntimeError as message:
+                raise RuntimeError("Error getting schedule details for " + \
                                     "default auto-snapshot SMF instance:" + \
                                     "\n\t" + instanceName + "\nDetails:\n" + \
                                     "default auto-snapshot SMF instance:" + \
                                     "\n\t" + instanceName + "\nDetails:\n" + \
-                                    str(message)
+                                    str(message))
     return _defaultSchedules
 
 
     return _defaultSchedules
 
 
@@ -79,11 +79,11 @@ def get_custom_schedules():
                     instance = AutoSnap(label)
                     try:
                         _customSchedules.append(instance.get_schedule_details())
                     instance = AutoSnap(label)
                     try:
                         _customSchedules.append(instance.get_schedule_details())
-                    except RuntimeError, message:
-                        raise RuntimeError"Error getting schedule details " + \
+                    except RuntimeError as message:
+                        raise RuntimeError("Error getting schedule details " + \
                                             "for custom auto-snapshot SMF " + \
                                             "instance:\n\t" + label + "\n" + \
                                             "for custom auto-snapshot SMF " + \
                                             "instance:\n\t" + label + "\n" + \
-                                            "Details:\n" + str(message)
+                                            "Details:\n" + str(message))
     return _customSchedules
 
 class AutoSnap(base.AutoSnap):
     return _customSchedules
 
 class AutoSnap(base.AutoSnap):
@@ -126,4 +126,4 @@ if __name__ == "__main__":
     defaults = get_default_schedules()
     for sched in defaults:
         S = AutoSnap(sched[0])
     defaults = get_default_schedules()
     for sched in defaults:
         S = AutoSnap(sched[0])
-        print S.get_schedule_details()
+        print(S.get_schedule_details())
index 64c7428..e369032 100755 (executable)
@@ -51,8 +51,8 @@ class RBACprofile(base.RBACprofile):
 
 if __name__ == "__main__":
   rbac = RBACprofile()
 
 if __name__ == "__main__":
   rbac = RBACprofile()
-  print rbac.name
-  print rbac.uid
-  print rbac.profiles
-  print rbac.auths
+  print(rbac.name)
+  print(rbac.uid)
+  print(rbac.profiles)
+  print(rbac.auths)
 
 
index 748ca22..1ecc3fb 100644 (file)
@@ -20,7 +20,7 @@
 # CDDL HEADER END
 #
 
 # CDDL HEADER END
 #
 
-import timesliderconfig
+from . import timesliderconfig
 import time_slider.smf as base
 
 class SMFInstance(base.SMFInstance):
 import time_slider.smf as base
 
 class SMFInstance(base.SMFInstance):
@@ -41,5 +41,5 @@ class SMFInstance(base.SMFInstance):
 
 if __name__ == "__main__":
   S = SMFInstance('svc:/application/time-slider')
 
 if __name__ == "__main__":
   S = SMFInstance('svc:/application/time-slider')
-  print S
+  print(S)
 
 
index e681dee..b656f73 100644 (file)
@@ -20,7 +20,7 @@
 # CDDL HEADER END
 #
 
 # CDDL HEADER END
 #
 
-import ConfigParser
+import configparser
 import sys
 import time_slider.util as util
 
 import sys
 import time_slider.util as util
 
@@ -69,14 +69,14 @@ default_properties = {
     },
 }
 
     },
 }
 
-class MyConfigParser(ConfigParser.ConfigParser):
+class MyConfigParser(configparser.ConfigParser):
     def __init__(self):
     def __init__(self):
-        ConfigParser.ConfigParser.__init__(self)
+        configparser.ConfigParser.__init__(self)
 
 
-        for section, content in default_properties.iteritems():
+        for section, content in default_properties.items():
             if not self.has_section(section):
                 self.add_section(section)
             if not self.has_section(section):
                 self.add_section(section)
-            for k,v in content.iteritems():
+            for k,v in content.items():
                 self.set(section, k, str(v))
 
 class Config:
                 self.set(section, k, str(v))
 
 class Config:
@@ -89,7 +89,7 @@ class Config:
             result = self.config.get(section, option)
             util.debug('CONFIG: GET section %s, option %s with value %s\n' % (section, option, result), 1)
             return result
             result = self.config.get(section, option)
             util.debug('CONFIG: GET section %s, option %s with value %s\n' % (section, option, result), 1)
             return result
-        except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
+        except (configparser.NoOptionError, configparser.NoSectionError):
             util.debug('CONFIG: NOTFOUND section %s, option %s\n' % (section, option), 1)
             return ''
 
             util.debug('CONFIG: NOTFOUND section %s, option %s\n' % (section, option), 1)
             return ''
 
index 64f3a5b..0c5febf 100755 (executable)
@@ -21,7 +21,7 @@
 #
 
 import time_slider.timeslidersmf as base
 #
 
 import time_slider.timeslidersmf as base
-import smf
+from . import smf
 import threading
 
 class TimeSliderSMF(base.TimeSliderSMF):
 import threading
 
 class TimeSliderSMF(base.TimeSliderSMF):
@@ -62,4 +62,4 @@ base.TimeSliderSMF.__bases__ = (smf.SMFInstance,)
 
 if __name__ == "__main__":
   S = TimeSliderSMF('svc:/application/time-slider')
 
 if __name__ == "__main__":
   S = TimeSliderSMF('svc:/application/time-slider')
-  print S
+  print(S)
index cb59d54..0af6a44 100755 (executable)
@@ -23,7 +23,7 @@
 import os
 import pwd
 
 import os
 import pwd
 
-import util
+from . import util
 
 class RBACprofile:
 
 
 class RBACprofile:
 
@@ -111,8 +111,8 @@ class RBACprofile:
 
 if __name__ == "__main__":
   rbac = RBACprofile()
 
 if __name__ == "__main__":
   rbac = RBACprofile()
-  print rbac.name
-  print rbac.uid
-  print rbac.profiles
-  print rbac.auths
+  print(rbac.name)
+  print(rbac.uid)
+  print(rbac.profiles)
+  print(rbac.auths)
 
 
index 12451a8..e54496b 100755 (executable)
@@ -24,9 +24,9 @@ import sys
 import os
 import subprocess
 import threading
 import os
 import subprocess
 import threading
-import util
-import smf
-from autosnapsmf import enable_default_schedules, disable_default_schedules
+from . import util
+from . import smf
+from .autosnapsmf import enable_default_schedules, disable_default_schedules
 
 from os.path import abspath, dirname, join, pardir
 sys.path.insert(0, join(dirname(__file__), pardir, "plugin"))
 
 from os.path import abspath, dirname, join, pardir
 sys.path.insert(0, join(dirname(__file__), pardir, "plugin"))
@@ -53,7 +53,7 @@ import dbus
 import dbus.service
 import dbus.mainloop
 import dbus.mainloop.glib
 import dbus.service
 import dbus.mainloop
 import dbus.mainloop.glib
-import dbussvc
+from . import dbussvc
 
 
 # This is the rough guess ratio used for rsync backup device size
 
 
 # This is the rough guess ratio used for rsync backup device size
@@ -79,9 +79,9 @@ GETTEXT_DOMAIN = 'time-slider'
 gtk.glade.bindtextdomain(GETTEXT_DOMAIN, LOCALE_PATH)
 gtk.glade.textdomain(GETTEXT_DOMAIN)
 
 gtk.glade.bindtextdomain(GETTEXT_DOMAIN, LOCALE_PATH)
 gtk.glade.textdomain(GETTEXT_DOMAIN)
 
-import zfs
-from timeslidersmf import TimeSliderSMF
-from rbac import RBACprofile
+from . import zfs
+from .timeslidersmf import TimeSliderSMF
+from .rbac import RBACprofile
 
 
 class FilesystemIntention:
 
 
 class FilesystemIntention:
@@ -310,7 +310,7 @@ class SetupManager:
         # Initialise SMF service instance state.
         try:
             self._sliderSMF = TimeSliderSMF()
         # Initialise SMF service instance state.
         try:
             self._sliderSMF = TimeSliderSMF()
-        except RuntimeError,message:
+        except RuntimeError as message:
             self._xml.get_widget("toplevel").set_sensitive(False)
             dialog = gtk.MessageDialog(self._xml.get_widget("toplevel"),
                                        0,
             self._xml.get_widget("toplevel").set_sensitive(False)
             dialog = gtk.MessageDialog(self._xml.get_widget("toplevel"),
                                        0,
@@ -1187,9 +1187,9 @@ class SetupManager:
                                           rsyncsmf.RSYNCCONFIGFILE)
                 newKey = generate_random_key()
                 try:
                                           rsyncsmf.RSYNCCONFIGFILE)
                 newKey = generate_random_key()
                 try:
-                    origmask = os.umask(0222)
+                    origmask = os.umask(0o222)
                     if not os.path.exists(nodePath):
                     if not os.path.exists(nodePath):
-                        os.makedirs(nodePath, 0755)
+                        os.makedirs(nodePath, 0o755)
                     f = open(configPath, 'w')
                     f.write("target_key=%s\n" % (newKey))
                     f.close()
                     f = open(configPath, 'w')
                     f.write("target_key=%s\n" % (newKey))
                     f.close()
@@ -1268,7 +1268,7 @@ class EnableService(threading.Thread):
             self._setupManager.setup_rsync_config()
             self._setupManager.setup_services()
             self._setupManager.broadcast_changes()
             self._setupManager.setup_rsync_config()
             self._setupManager.setup_services()
             self._setupManager.broadcast_changes()
-        except RuntimeError, message:
+        except RuntimeError as message:
             sys.stderr.write(str(message))
 
 def generate_random_key(length=32):
             sys.stderr.write(str(message))
 
 def generate_random_key(length=32):
index 5c64109..1185cf2 100644 (file)
@@ -22,7 +22,7 @@
 
 import subprocess
 import threading
 
 import subprocess
 import threading
-import util
+from . import util
 
 #SMF EXIT CODES
 SMF_EXIT_OK          = 0
 
 #SMF EXIT CODES
 SMF_EXIT_OK          = 0
@@ -149,5 +149,5 @@ class SMFInstance(Exception):
 
 if __name__ == "__main__":
   S = SMFInstance('svc:/application/time-slider')
 
 if __name__ == "__main__":
   S = SMFInstance('svc:/application/time-slider')
-  print S
+  print(S)
 
 
index 688a196..441123f 100644 (file)
@@ -62,8 +62,8 @@ GETTEXT_DOMAIN = 'time-slider'
 gtk.glade.bindtextdomain(GETTEXT_DOMAIN, LOCALE_PATH)
 gtk.glade.textdomain(GETTEXT_DOMAIN)
 
 gtk.glade.bindtextdomain(GETTEXT_DOMAIN, LOCALE_PATH)
 gtk.glade.textdomain(GETTEXT_DOMAIN)
 
-import zfs
-from rbac import RBACprofile
+from . import zfs
+from .rbac import RBACprofile
 
 class SnapshotNowDialog:
 
 
 class SnapshotNowDialog:
 
@@ -219,6 +219,6 @@ def main(argv):
                                        "administrative priviliges."
                                        "\n\nConsult your system administrator "))
         dialog.run()
                                        "administrative priviliges."
                                        "\n\nConsult your system administrator "))
         dialog.run()
-        print argv + "is not a valid executable path"
+        print(argv + "is not a valid executable path")
         sys.exit(1)
 
         sys.exit(1)
 
index a45592d..3c664fa 100755 (executable)
@@ -35,21 +35,21 @@ import argparse
 import logging
 from logging.handlers import SysLogHandler
 
 import logging
 from logging.handlers import SysLogHandler
 
-import glib
-import gobject
+from gi.repository import GLib as glib
+from gi.repository import GObject as gobject
 import dbus
 import dbus.service
 import dbus.mainloop
 import dbus.mainloop.glib
 
 import dbus
 import dbus.service
 import dbus.mainloop
 import dbus.mainloop.glib
 
-import dbussvc
-import zfs
-import smf
+from . import dbussvc
+from . import zfs
+from . import smf
 import time_slider.linux.timeslidersmf as timeslidersmf
 import time_slider.linux.autosnapsmf as autosnapsmf
 # import plugin
 from time_slider.linux.rbac import RBACprofile
 import time_slider.linux.timeslidersmf as timeslidersmf
 import time_slider.linux.autosnapsmf as autosnapsmf
 # import plugin
 from time_slider.linux.rbac import RBACprofile
-import util
+from . import util
 
 import time_slider.linux.timesliderconfig as timesliderconfig
 
 
 import time_slider.linux.timesliderconfig as timesliderconfig
 
@@ -94,7 +94,7 @@ class SnapshotManager(threading.Thread):
         self._smf = timeslidersmf.TimeSliderSMF()
         try:
             self.verbose = self._smf.get_verbose()
         self._smf = timeslidersmf.TimeSliderSMF()
         try:
             self.verbose = self._smf.get_verbose()
-        except RuntimeError,message:
+        except RuntimeError as message:
             self.logger.error("Error determing whether debugging is enabled")
             self.verbose = False
 
             self.logger.error("Error determing whether debugging is enabled")
             self.verbose = False
 
@@ -106,7 +106,7 @@ class SnapshotManager(threading.Thread):
         self.exitCode = smf.SMF_EXIT_OK
         self.refresh()
 
         self.exitCode = smf.SMF_EXIT_OK
         self.refresh()
 
-        # Seems we're up and running OK. 
+        # Seems we're up and running OK.
         # Signal our parent so we can daemonise
         os.kill(os.getppid(), signal.SIGUSR1)
 
         # Signal our parent so we can daemonise
         os.kill(os.getppid(), signal.SIGUSR1)
 
@@ -129,7 +129,7 @@ class SnapshotManager(threading.Thread):
                     volume.set_auto_snap(False)
             except IndexError:
                 pass
                     volume.set_auto_snap(False)
             except IndexError:
                 pass
-            
+
         nexttime = None
         waittime = None
         while True:
         nexttime = None
         waittime = None
         while True:
@@ -149,14 +149,14 @@ class SnapshotManager(threading.Thread):
                 nexttime = self._check_snapshots()
                 # Overdue snapshots are already taken automatically
                 # inside _check_snapshots() so nexttime should never be
                 nexttime = self._check_snapshots()
                 # Overdue snapshots are already taken automatically
                 # inside _check_snapshots() so nexttime should never be
-                # < 0. It can be None however, which is fine since it 
+                # < 0. It can be None however, which is fine since it
                 # will cause the scheduler thread to sleep indefinitely
                 # or until a SIGHUP is caught.
                 if nexttime:
                     util.debug("Waiting until " + str (nexttime), self.verbose)
                 waittime = None
                 if nexttime != None:
                 # will cause the scheduler thread to sleep indefinitely
                 # or until a SIGHUP is caught.
                 if nexttime:
                     util.debug("Waiting until " + str (nexttime), self.verbose)
                 waittime = None
                 if nexttime != None:
-                    waittime = nexttime - long(time.time())
+                    waittime = nexttime - int(time.time())
                     if (waittime <= 0):
                         # We took too long and missed a snapshot, so break out
                         # and catch up on it the next time through the loop
                     if (waittime <= 0):
                         # We took too long and missed a snapshot, so break out
                         # and catch up on it the next time through the loop
@@ -171,7 +171,7 @@ class SnapshotManager(threading.Thread):
                                self.verbose)
                     self._conditionLock.wait(_MINUTE * 15)
 
                                self.verbose)
                     self._conditionLock.wait(_MINUTE * 15)
 
-            except OSError, message:
+            except OSError as message:
                 self.logger.error("Caught OSError exception in snapshot" +
                                  " manager thread")
                 self.logger.error("Error details:\n" + \
                 self.logger.error("Caught OSError exception in snapshot" +
                                  " manager thread")
                 self.logger.error("Error details:\n" + \
@@ -181,7 +181,7 @@ class SnapshotManager(threading.Thread):
                 self.exitCode = smf.SMF_EXIT_ERR_FATAL
                 # Exit this thread
                 break
                 self.exitCode = smf.SMF_EXIT_ERR_FATAL
                 # Exit this thread
                 break
-            except RuntimeError,message:
+            except RuntimeError as message:
                 self.logger.error("Caught RuntimeError exception in snapshot" +
                                  " manager thread")
                 self.logger.error("Error details:\n" + \
                 self.logger.error("Caught RuntimeError exception in snapshot" +
                                  " manager thread")
                 self.logger.error("Error details:\n" + \
@@ -218,7 +218,7 @@ class SnapshotManager(threading.Thread):
     def _configure_svc_props(self):
         try:
             self.verbose = self._smf.get_verbose()
     def _configure_svc_props(self):
         try:
             self.verbose = self._smf.get_verbose()
-        except RuntimeError,message:
+        except RuntimeError as message:
             self.logger.error("Error determing whether debugging is enabled")
             self.verbose = False
 
             self.logger.error("Error determing whether debugging is enabled")
             self.verbose = False
 
@@ -230,7 +230,7 @@ class SnapshotManager(threading.Thread):
             util.debug("Critical level value is:  %d%%" % crit, self.verbose)
             emer = self._smf.get_cleanup_level("emergency")
             util.debug("Emergency level value is: %d%%" % emer, self.verbose)
             util.debug("Critical level value is:  %d%%" % crit, self.verbose)
             emer = self._smf.get_cleanup_level("emergency")
             util.debug("Emergency level value is: %d%%" % emer, self.verbose)
-        except RuntimeError,message:
+        except RuntimeError as message:
             self.logger.error("Failed to determine cleanup threshhold levels")
             self.logger.error("Details:\n" + \
                              "--------BEGIN ERROR MESSAGE--------\n" + \
             self.logger.error("Failed to determine cleanup threshhold levels")
             self.logger.error("Details:\n" + \
                              "--------BEGIN ERROR MESSAGE--------\n" + \
@@ -251,7 +251,7 @@ class SnapshotManager(threading.Thread):
 
         try:
             self._keepEmpties = self._smf.get_keep_empties()
 
         try:
             self._keepEmpties = self._smf.get_keep_empties()
-        except RuntimeError,message:
+        except RuntimeError as message:
             # Not fatal, just assume we delete them (default configuration)
             self.logger.error("Can't determine whether to keep empty snapshots")
             self.logger.error("Details:\n" + \
             # Not fatal, just assume we delete them (default configuration)
             self.logger.error("Can't determine whether to keep empty snapshots")
             self.logger.error("Details:\n" + \
@@ -261,7 +261,7 @@ class SnapshotManager(threading.Thread):
             self.logger.error("Assuming default value: False")
             self._keepEmpties = False
 
             self.logger.error("Assuming default value: False")
             self._keepEmpties = False
 
-        # Previously, snapshot labels used the ":" character was used as a 
+        # Previously, snapshot labels used the ":" character was used as a
         # separator character for datestamps. Windows filesystems such as
         # CIFS and FAT choke on this character so now we use a user definable
         # separator value, with a default value of "_"
         # separator character for datestamps. Windows filesystems such as
         # CIFS and FAT choke on this character so now we use a user definable
         # separator value, with a default value of "_"
@@ -284,11 +284,11 @@ class SnapshotManager(threading.Thread):
                 else:
                     self._zpools.append(zpool)
                 util.debug(str(zpool), self.verbose)
                 else:
                     self._zpools.append(zpool)
                 util.debug(str(zpool), self.verbose)
-        except RuntimeError,message:
+        except RuntimeError as message:
             self.logger.error("Could not list Zpools")
             self.exitCode = smf.SMF_EXIT_ERR_FATAL
             # Propogate exception up to thread's run() method
             self.logger.error("Could not list Zpools")
             self.exitCode = smf.SMF_EXIT_ERR_FATAL
             # Propogate exception up to thread's run() method
-            raise RuntimeError,message
+            raise RuntimeError(message)
 
 
     def _rebuild_schedules(self):
 
 
     def _rebuild_schedules(self):
@@ -303,15 +303,15 @@ class SnapshotManager(threading.Thread):
         try:
             _defaultSchedules = autosnapsmf.get_default_schedules()
             _customSchedules = autosnapsmf.get_custom_schedules()
         try:
             _defaultSchedules = autosnapsmf.get_default_schedules()
             _customSchedules = autosnapsmf.get_custom_schedules()
-        except RuntimeError,message:
+        except RuntimeError as message:
             self.exitCode = smf.SMF_EXIT_ERR_FATAL
             self.exitCode = smf.SMF_EXIT_ERR_FATAL
-            raise RuntimeError"Error reading SMF schedule instances\n" + \
-                                "Details:\n" + str(message)
+            raise RuntimeError("Error reading SMF schedule instances\n" + \
+                                "Details:\n" + str(message))
         else:
             # Now set it in stone.
             self._defaultSchedules = tuple(_defaultSchedules)
             self._customSchedules = tuple(_customSchedules)
         else:
             # Now set it in stone.
             self._defaultSchedules = tuple(_defaultSchedules)
             self._customSchedules = tuple(_customSchedules)
-            
+
             # Build the combined schedule tuple from default + custom schedules
             _defaultSchedules.extend(_customSchedules)
             self._allSchedules = tuple(_defaultSchedules)
             # Build the combined schedule tuple from default + custom schedules
             _defaultSchedules.extend(_customSchedules)
             self._allSchedules = tuple(_defaultSchedules)
@@ -326,7 +326,7 @@ class SnapshotManager(threading.Thread):
         last = None
 
         for schedule,interval,period,keep in self._allSchedules:
         last = None
 
         for schedule,interval,period,keep in self._allSchedules:
-            # Shortcut if we've already processed this schedule and it's 
+            # Shortcut if we've already processed this schedule and it's
             # still up to date. Don't skip the default schedules though
             # because overlap affects their scheduling
             if [schedule,interval,period,keep] not in \
             # still up to date. Don't skip the default schedules though
             # because overlap affects their scheduling
             if [schedule,interval,period,keep] not in \
@@ -344,11 +344,11 @@ class SnapshotManager(threading.Thread):
                     snaps = self._datasets.list_snapshots("%s%s" % \
                                                          (self._prefix,
                                                           schedule))
                     snaps = self._datasets.list_snapshots("%s%s" % \
                                                          (self._prefix,
                                                           schedule))
-                except RuntimeError,message:
+                except RuntimeError as message:
                     self.exitCode = smf.SMF_EXIT_ERR_FATAL
                     self.logger.error("Failed to list snapshots during schedule update")
                     #Propogate up to the thread's run() method
                     self.exitCode = smf.SMF_EXIT_ERR_FATAL
                     self.logger.error("Failed to list snapshots during schedule update")
                     #Propogate up to the thread's run() method
-                    raise RuntimeError,message
+                    raise RuntimeError(message)
 
                 if len(snaps) > 0:
                     util.debug("Last %s snapshot was: %s" % \
 
                 if len(snaps) > 0:
                     util.debug("Last %s snapshot was: %s" % \
@@ -388,7 +388,7 @@ class SnapshotManager(threading.Thread):
                 snap_tm = time.gmtime(self._last[schedule])
                 # Increment year if period >= than 1 calender year.
                 year = snap_tm.tm_year
                 snap_tm = time.gmtime(self._last[schedule])
                 # Increment year if period >= than 1 calender year.
                 year = snap_tm.tm_year
-                year += period / 12
+                year += period // 12
                 period = period % 12
 
                 mon = (snap_tm.tm_mon + period) % 12
                 period = period % 12
 
                 mon = (snap_tm.tm_mon + period) % 12
@@ -404,7 +404,7 @@ class SnapshotManager(threading.Thread):
                 mday = snap_tm.tm_mday
                 if dlastmon > dnewmon and snap_tm.tm_mday > dnewmon:
                    mday = dnewmon
                 mday = snap_tm.tm_mday
                 if dlastmon > dnewmon and snap_tm.tm_mday > dnewmon:
                    mday = dnewmon
-                
+
                 tm =(year, mon, mday, \
                     snap_tm.tm_hour, snap_tm.tm_min, snap_tm.tm_sec, \
                     0, 0, -1)
                 tm =(year, mon, mday, \
                     snap_tm.tm_hour, snap_tm.tm_min, snap_tm.tm_sec, \
                     0, 0, -1)
@@ -417,12 +417,12 @@ class SnapshotManager(threading.Thread):
     def _next_due(self):
         schedule = None
         earliest = None
     def _next_due(self):
         schedule = None
         earliest = None
-        now = long(time.time())
-        
+        now = int(time.time())
+
         for s,i,p,k in self._defaultSchedules:
             due = self._next[s]
             if due <= now:
         for s,i,p,k in self._defaultSchedules:
             due = self._next[s]
             if due <= now:
-                #Default Schedule - so break out at the first 
+                #Default Schedule - so break out at the first
                 #schedule that is overdue. The subordinate schedules
                 #will re-adjust afterwards.
                 earliest,schedule = due,s
                 #schedule that is overdue. The subordinate schedules
                 #will re-adjust afterwards.
                 earliest,schedule = due,s
@@ -451,7 +451,7 @@ class SnapshotManager(threading.Thread):
         self._refreshLock.acquire()
         next,schedule = self._next_due()
         self._refreshLock.release()
         self._refreshLock.acquire()
         next,schedule = self._next_due()
         self._refreshLock.release()
-        now = long(time.time())
+        now = int(time.time())
         while next != None and next <= now:
             label = self._take_snapshots(schedule)
             # self._plugin.execute_plugins(schedule, label)
         while next != None and next <= now:
             label = self._take_snapshots(schedule)
             # self._plugin.execute_plugins(schedule, label)
@@ -464,23 +464,23 @@ class SnapshotManager(threading.Thread):
                        (schedule, dt.isoformat()), \
                        self.verbose)
         return next
                        (schedule, dt.isoformat()), \
                        self.verbose)
         return next
-                    
+
     def _take_snapshots(self, schedule):
         # Set the time before taking snapshot to avoid clock skew due
         # to time taken to complete snapshot.
     def _take_snapshots(self, schedule):
         # Set the time before taking snapshot to avoid clock skew due
         # to time taken to complete snapshot.
-        tm = long(time.time())
+        tm = int(time.time())
         label = "%s%s%s-%s" % \
                 (autosnapsmf.SNAPLABELPREFIX, self._separator, schedule,
                  datetime.datetime.now().strftime("%Y-%m-%d-%Hh%M"))
         try:
             self._datasets.create_auto_snapshot_set(label, tag=schedule)
         label = "%s%s%s-%s" % \
                 (autosnapsmf.SNAPLABELPREFIX, self._separator, schedule,
                  datetime.datetime.now().strftime("%Y-%m-%d-%Hh%M"))
         try:
             self._datasets.create_auto_snapshot_set(label, tag=schedule)
-        except RuntimeError, message:
+        except RuntimeError as message:
             # Write an error message, set the exit code and pass it up the
             # stack so the thread can terminate
             self.logger.error("Failed to create snapshots for schedule: %s" \
                              % (schedule))
             self.exitCode = smf.SMF_EXIT_MON_DEGRADE
             # Write an error message, set the exit code and pass it up the
             # stack so the thread can terminate
             self.logger.error("Failed to create snapshots for schedule: %s" \
                              % (schedule))
             self.exitCode = smf.SMF_EXIT_MON_DEGRADE
-            raise RuntimeError,message
+            raise RuntimeError(message)
         self._last[schedule] = tm;
         self._perform_purge(schedule)
         return label
         self._last[schedule] = tm;
         self._perform_purge(schedule)
         return label
@@ -489,7 +489,7 @@ class SnapshotManager(threading.Thread):
         """Cleans out zero sized snapshots, kind of cautiously"""
             # Per schedule: We want to delete 0 sized
             # snapshots but we need to keep at least one around (the most
         """Cleans out zero sized snapshots, kind of cautiously"""
             # Per schedule: We want to delete 0 sized
             # snapshots but we need to keep at least one around (the most
-            # recent one) for each schedule so that that overlap is 
+            # recent one) for each schedule so that that overlap is
             # maintained from frequent -> hourly -> daily etc.
             # Start off with the smallest interval schedule first and
             # move up. This increases the amount of data retained where
             # maintained from frequent -> hourly -> daily etc.
             # Start off with the smallest interval schedule first and
             # move up. This increases the amount of data retained where
@@ -510,10 +510,10 @@ class SnapshotManager(threading.Thread):
             # Clone the list because we want to remove items from it
             # while iterating through it.
             remainingsnaps = snaps[:]
             # Clone the list because we want to remove items from it
             # while iterating through it.
             remainingsnaps = snaps[:]
-        except RuntimeError,message:
+        except RuntimeError as message:
             self.logger.error("Failed to list snapshots during snapshot cleanup")
             self.exitCode = smf.SMF_EXIT_ERR_FATAL
             self.logger.error("Failed to list snapshots during snapshot cleanup")
             self.exitCode = smf.SMF_EXIT_ERR_FATAL
-            raise RuntimeError,message
+            raise RuntimeError(message)
 
         if (self._keepEmpties == False):
             try: # remove the newest one from the list.
 
         if (self._keepEmpties == False):
             try: # remove the newest one from the list.
@@ -523,7 +523,7 @@ class SnapshotManager(threading.Thread):
             for snapname in snaps:
                 try:
                     snapshot = zfs.Snapshot(snapname)
             for snapname in snaps:
                 try:
                     snapshot = zfs.Snapshot(snapname)
-                except Exception,message:
+                except Exception as message:
                     self.logger.error(str(message))
                     # Not fatal, just skip to the next snapshot
                     continue
                     self.logger.error(str(message))
                     # Not fatal, just skip to the next snapshot
                     continue
@@ -534,45 +534,45 @@ class SnapshotManager(threading.Thread):
                                    self.verbose)
                         try:
                             snapshot.destroy()
                                    self.verbose)
                         try:
                             snapshot.destroy()
-                        except RuntimeError,message:
+                        except RuntimeError as message:
                             self.logger.error("Failed to destroy snapshot: " +
                                              snapname)
                             self.exitCode = smf.SMF_EXIT_MON_DEGRADE
                             # Propogate exception so thread can exit
                             self.logger.error("Failed to destroy snapshot: " +
                                              snapname)
                             self.exitCode = smf.SMF_EXIT_MON_DEGRADE
                             # Propogate exception so thread can exit
-                            raise RuntimeError,message
+                            raise RuntimeError(message)
                         remainingsnaps.remove(snapname)
                         remainingsnaps.remove(snapname)
-                except RuntimeError,message:
+                except RuntimeError as message:
                     self.logger.error("Can not determine used size of: " + \
                                      snapname)
                     self.exitCode = smf.SMF_EXIT_MON_DEGRADE
                     #Propogate the exception to the thead run() method
                     self.logger.error("Can not determine used size of: " + \
                                      snapname)
                     self.exitCode = smf.SMF_EXIT_MON_DEGRADE
                     #Propogate the exception to the thead run() method
-                    raise RuntimeError,message
+                    raise RuntimeError(message)
 
         # Deleting individual snapshots instead of recursive sets
         # breaks the recursion chain and leaves child snapshots
 
         # Deleting individual snapshots instead of recursive sets
         # breaks the recursion chain and leaves child snapshots
-        # dangling so we need to take care of cleaning up the 
+        # dangling so we need to take care of cleaning up the
         # snapshots.
         target = len(remainingsnaps) - self._keep[schedule]
         counter = 0
         while counter < target:
             util.debug("Destroy expired snapshot: " + \
         # snapshots.
         target = len(remainingsnaps) - self._keep[schedule]
         counter = 0
         while counter < target:
             util.debug("Destroy expired snapshot: " + \
-                       remainingsnaps[counter], 
+                       remainingsnaps[counter],
                        self.verbose)
             try:
                 snapshot = zfs.Snapshot(remainingsnaps[counter])
                        self.verbose)
             try:
                 snapshot = zfs.Snapshot(remainingsnaps[counter])
-            except Exception,message:
+            except Exception as message:
                     self.logger.error(str(message))
                     # Not fatal, just skip to the next snapshot
                     counter += 1
                     continue
             try:
                 snapshot.destroy()
                     self.logger.error(str(message))
                     # Not fatal, just skip to the next snapshot
                     counter += 1
                     continue
             try:
                 snapshot.destroy()
-            except RuntimeError,message:
+            except RuntimeError as message:
                 self.logger.error("Failed to destroy snapshot: " +
                                  snapshot.name)
                 self.exitCode = smf.SMF_EXIT_ERR_FATAL
                 # Propogate exception so thread can exit
                 self.logger.error("Failed to destroy snapshot: " +
                                  snapshot.name)
                 self.exitCode = smf.SMF_EXIT_ERR_FATAL
                 # Propogate exception so thread can exit
-                raise RuntimeError,message
+                raise RuntimeError(message)
             else:
                 counter += 1
 
             else:
                 counter += 1
 
@@ -586,19 +586,19 @@ class SnapshotManager(threading.Thread):
             for name in self._datasets.list_auto_snapshot_sets(schedule):
                 dataset = zfs.ReadWritableDataset(name)
                 self._prune_snapshots(dataset, schedule)
             for name in self._datasets.list_auto_snapshot_sets(schedule):
                 dataset = zfs.ReadWritableDataset(name)
                 self._prune_snapshots(dataset, schedule)
-        except RuntimeError,message:
+        except RuntimeError as message:
             self.logger.error("Error listing datasets during " + \
                              "removal of expired snapshots")
             self.exitCode = smf.SMF_EXIT_ERR_FATAL
             # Propogate up to thread's run() method
             self.logger.error("Error listing datasets during " + \
                              "removal of expired snapshots")
             self.exitCode = smf.SMF_EXIT_ERR_FATAL
             # Propogate up to thread's run() method
-            raise RuntimeError,message
+            raise RuntimeError(message)
 
     def _needs_cleanup(self):
         if self._remedialCleanup == False:
             # Sys admin has explicitly instructed for remedial cleanups
             # not to be performed.
             return False
 
     def _needs_cleanup(self):
         if self._remedialCleanup == False:
             # Sys admin has explicitly instructed for remedial cleanups
             # not to be performed.
             return False
-        now = long(time.time())
+        now = int(time.time())
         # Don't run checks any less than 15 minutes apart.
         if self._cleanupLock.acquire(False) == False:
             #Indicates that a cleanup is already running.
         # Don't run checks any less than 15 minutes apart.
         if self._cleanupLock.acquire(False) == False:
             #Indicates that a cleanup is already running.
@@ -625,14 +625,14 @@ class SnapshotManager(threading.Thread):
                                            self.verbose)
                                 self._cleanupLock.release()
                                 return True
                                            self.verbose)
                                 self._cleanupLock.release()
                                 return True
-                except RuntimeError, message:
+                except RuntimeError as message:
                     self.logger.error("Error checking zpool capacity of: " + \
                                      zpool.name)
                     self._cleanupLock.release()
                     self.exitCode = smf.SMF_EXIT_ERR_FATAL
                     # Propogate up to thread's run() mehod.
                     self.logger.error("Error checking zpool capacity of: " + \
                                      zpool.name)
                     self._cleanupLock.release()
                     self.exitCode = smf.SMF_EXIT_ERR_FATAL
                     # Propogate up to thread's run() mehod.
-                    raise RuntimeError,message
-            self._lastCleanupCheck = long(time.time())
+                    raise RuntimeError(message)
+            self._lastCleanupCheck = int(time.time())
         self._cleanupLock.release()
         return False
 
         self._cleanupLock.release()
         return False
 
@@ -662,16 +662,16 @@ class SnapshotManager(threading.Thread):
                     self._poolstatus[zpool.name] = 4
             # This also catches exceptions thrown from _run_<level>_cleanup()
             # and _run_cleanup() in methods called by _perform_cleanup()
                     self._poolstatus[zpool.name] = 4
             # This also catches exceptions thrown from _run_<level>_cleanup()
             # and _run_cleanup() in methods called by _perform_cleanup()
-            except RuntimeError,message:
+            except RuntimeError as message:
                 self.logger.error("Remedial space cleanup failed because " + \
                                  "of failure to determinecapacity of: " + \
                                  zpool.name)
                 self.exitCode = smf.SMF_EXIT_ERR_FATAL
                 self._cleanupLock.release()
                 # Propogate up to thread's run() method.
                 self.logger.error("Remedial space cleanup failed because " + \
                                  "of failure to determinecapacity of: " + \
                                  zpool.name)
                 self.exitCode = smf.SMF_EXIT_ERR_FATAL
                 self._cleanupLock.release()
                 # Propogate up to thread's run() method.
-                raise RuntimeError,message
+                raise RuntimeError(message)
 
 
-            # Bad - there's no more snapshots left and nothing 
+            # Bad - there's no more snapshots left and nothing
             # left to delete. We don't disable the service since
             # it will permit self recovery and snapshot
             # retention when space becomes available on
             # left to delete. We don't disable the service since
             # it will permit self recovery and snapshot
             # retention when space becomes available on
@@ -732,13 +732,13 @@ class SnapshotManager(threading.Thread):
         snapshots = []
         try:
             clonedsnaps = self._datasets.list_cloned_snapshots()
         snapshots = []
         try:
             clonedsnaps = self._datasets.list_cloned_snapshots()
-        except RuntimeError,message:
+        except RuntimeError as message:
                 self.logger.error("Error (non-fatal) listing cloned snapshots" +
                                  " while recovering pool capacity")
                 self.logger.error("Error details:\n" + \
                                  "--------BEGIN ERROR MESSAGE--------\n" + \
                                  str(message) + \
                 self.logger.error("Error (non-fatal) listing cloned snapshots" +
                                  " while recovering pool capacity")
                 self.logger.error("Error details:\n" + \
                                  "--------BEGIN ERROR MESSAGE--------\n" + \
                                  str(message) + \
-                                 "\n--------END ERROR MESSAGE--------")    
+                                 "\n--------END ERROR MESSAGE--------")
 
         # Build a list of snapshots in the given schedule, that are not
         # cloned, and sort the result in reverse chronological order.
 
         # Build a list of snapshots in the given schedule, that are not
         # cloned, and sort the result in reverse chronological order.
@@ -748,13 +748,13 @@ class SnapshotManager(threading.Thread):
                             % (self._prefix,schedule)) \
                             if not s in clonedsnaps]
             snapshots.reverse()
                             % (self._prefix,schedule)) \
                             if not s in clonedsnaps]
             snapshots.reverse()
-        except RuntimeError,message:
+        except RuntimeError as message:
             self.logger.error("Error listing snapshots" +
                              " while recovering pool capacity")
             self.exitCode = smf.SMF_EXIT_ERR_FATAL
             # Propogate the error up to the thread's run() method.
             self.logger.error("Error listing snapshots" +
                              " while recovering pool capacity")
             self.exitCode = smf.SMF_EXIT_ERR_FATAL
             # Propogate the error up to the thread's run() method.
-            raise RuntimeError,message
-   
+            raise RuntimeError(message)
+
         while zpool.get_capacity() > threshold:
             if len(snapshots) == 0:
                 self.logger.info( \
         while zpool.get_capacity() > threshold:
             if len(snapshots) == 0:
                 self.logger.info( \
@@ -762,7 +762,7 @@ class SnapshotManager(threading.Thread):
                                % schedule)
                 return
 
                                % schedule)
                 return
 
-            """This is not an exact science. Deleteing a zero sized 
+            """This is not an exact science. Deleteing a zero sized
             snapshot can have unpredictable results. For example a
             pair of snapshots may share exclusive reference to a large
             amount of data (eg. a large core file). The usage of both
             snapshot can have unpredictable results. For example a
             pair of snapshots may share exclusive reference to a large
             amount of data (eg. a large core file). The usage of both
@@ -790,7 +790,7 @@ class SnapshotManager(threading.Thread):
             util.debug("Destroying %s" % snapname, self.verbose)
             try:
                 snapshot.destroy()
             util.debug("Destroying %s" % snapname, self.verbose)
             try:
                 snapshot.destroy()
-            except RuntimeError,message:
+            except RuntimeError as message:
                 # Would be nice to be able to mark service as degraded here
                 # but it's better to try to continue on rather than to give
                 # up alltogether (SMF maintenance state)
                 # Would be nice to be able to mark service as degraded here
                 # but it's better to try to continue on rather than to give
                 # up alltogether (SMF maintenance state)
@@ -801,7 +801,7 @@ class SnapshotManager(threading.Thread):
                 self._destroyedsnaps.append(snapname)
             # Give zfs some time to recalculate.
             time.sleep(3)
                 self._destroyedsnaps.append(snapname)
             # Give zfs some time to recalculate.
             time.sleep(3)
-        
+
     def _send_to_syslog(self):
         for zpool in self._zpools:
             status = self._poolstatus[zpool.name]
     def _send_to_syslog(self):
         for zpool in self._zpools:
             status = self._poolstatus[zpool.name]
@@ -818,7 +818,7 @@ class SnapshotManager(threading.Thread):
                 self.logger.critical( \
                               "%s exceeded %d%% capacity. " \
                               "Weekly, hourly and daily automatic snapshots were destroyed" \
                 self.logger.critical( \
                               "%s exceeded %d%% capacity. " \
                               "Weekly, hourly and daily automatic snapshots were destroyed" \
-                               % (zpool.name, self._criticalLevel))                             
+                               % (zpool.name, self._criticalLevel))
             elif status == 1:
                 self.logger.warning( \
                               "%s exceeded %d%% capacity. " \
             elif status == 1:
                 self.logger.warning( \
                               "%s exceeded %d%% capacity. " \
@@ -916,8 +916,8 @@ def create_daemon():
     signal.signal(signal.SIGALRM, child_sig_handler)
     try:
         pid = os.fork()
     signal.signal(signal.SIGALRM, child_sig_handler)
     try:
         pid = os.fork()
-    except OSError, e:
-        raise Exception, "%s [%d]" % (e.strerror, e.errno)
+    except OSError as e:
+        raise Exception("%s [%d]" % (e.strerror, e.errno))
 
     if (pid == 0):
         #Reset signals that we set to trap in parent
 
     if (pid == 0):
         #Reset signals that we set to trap in parent
index 22c2925..db33937 100755 (executable)
@@ -22,8 +22,8 @@
 
 import subprocess
 import threading
 
 import subprocess
 import threading
-import smf
-import util
+from . import smf
+from . import util
 
 #SMF EXIT CODES
 SMF_EXIT_OK          = 0
 
 #SMF EXIT CODES
 SMF_EXIT_OK          = 0
@@ -150,5 +150,5 @@ class TimeSliderSMF(smf.SMFInstance):
 
 if __name__ == "__main__":
   S = TimeSliderSMF('svc:/application/time-slider')
 
 if __name__ == "__main__":
   S = TimeSliderSMF('svc:/application/time-slider')
-  print S
+  print(S)
 
 
index 9f4c9f6..10dcb98 100755 (executable)
@@ -7,17 +7,17 @@ for root, dirs, files in os.walk("/ts-test/TIMESLIDER/nanmbp"):
     if '.time-slider' in dirs:
 #        dirs.remove('.time-slider')
         backupDirs.append(os.path.join(root, ".time-slider/rsync"))
     if '.time-slider' in dirs:
 #        dirs.remove('.time-slider')
         backupDirs.append(os.path.join(root, ".time-slider/rsync"))
-       print "root %s" % root
+       print("root %s" % root)
        s1 = root.split ("/ts-test/TIMESLIDER/nanmbp/", 1)
        s1 = root.split ("/ts-test/TIMESLIDER/nanmbp/", 1)
-       print s1
+       print(s1)
 
 for dirName in backupDirs:
 
 for dirName in backupDirs:
-    print "dirName %s " % dirName
+    print("dirName %s " % dirName)
     s1 = dirName.split ("/ts-test/TIMESLIDER/nanmbp/",1)
     s2 = s1[1].split ("/.time-slider/rsync",1)
     s1 = dirName.split ("/ts-test/TIMESLIDER/nanmbp/",1)
     s2 = s1[1].split ("/.time-slider/rsync",1)
-    print s2[0]
+    print(s2[0])
     os.chdir(dirName)
     dirList = ["toto %s" % d for d in os.listdir(dirName) \
                 if os.path.isdir(d) and
                 not os.path.islink(d)] 
     os.chdir(dirName)
     dirList = ["toto %s" % d for d in os.listdir(dirName) \
                 if os.path.isdir(d) and
                 not os.path.islink(d)] 
-    print dirList
+    print(dirList)
index b4ad9fd..ec4d5d6 100755 (executable)
@@ -73,8 +73,8 @@ GETTEXT_DOMAIN = 'time-slider'
 gtk.glade.bindtextdomain(GETTEXT_DOMAIN, LOCALE_PATH)
 gtk.glade.textdomain(GETTEXT_DOMAIN)
 
 gtk.glade.bindtextdomain(GETTEXT_DOMAIN, LOCALE_PATH)
 gtk.glade.textdomain(GETTEXT_DOMAIN)
 
-import zfs
-from rbac import RBACprofile
+from . import zfs
+from .rbac import RBACprofile
 
 class RsyncBackup:
 
 
 class RsyncBackup:
 
@@ -91,7 +91,7 @@ class RsyncBackup:
           self.creationtime = creationtime
           try:
               tm = time.localtime(self.creationtime)
           self.creationtime = creationtime
           try:
               tm = time.localtime(self.creationtime)
-              self.creationtime_str = unicode(time.strftime ("%c", tm),
+              self.creationtime_str = str(time.strftime ("%c", tm),
                          locale.getpreferredencoding()).encode('utf-8')
           except:
               self.creationtime_str = time.ctime(self.creationtime)
                          locale.getpreferredencoding()).encode('utf-8')
           except:
               self.creationtime_str = time.ctime(self.creationtime)
@@ -131,22 +131,21 @@ class RsyncBackup:
                             rsyncsmf.RSYNCLOCKSUFFIX)
 
        if not os.path.exists(lockFileDir):
                             rsyncsmf.RSYNCLOCKSUFFIX)
 
        if not os.path.exists(lockFileDir):
-         os.makedirs(lockFileDir, 0755)
+         os.makedirs(lockFileDir, 0o755)
        
        lockFile = os.path.join(lockFileDir, self.snaplabel + ".lock")
        try:
          lockFp = open(lockFile, 'w')
          fcntl.flock(lockFp, fcntl.LOCK_EX | fcntl.LOCK_NB)
        except IOError:
        
        lockFile = os.path.join(lockFileDir, self.snaplabel + ".lock")
        try:
          lockFp = open(lockFile, 'w')
          fcntl.flock(lockFp, fcntl.LOCK_EX | fcntl.LOCK_NB)
        except IOError:
-         raise RuntimeError, \
-         "couldn't delete %s, already used by another process" % self.mountpoint
+         raise RuntimeError("couldn't delete %s, already used by another process" % self.mountpoint)
          return 
 
        trashDir = os.path.join(self.rsync_dir,
                          self.fsname,
                          rsyncsmf.RSYNCTRASHSUFFIX)
        if not os.path.exists(trashDir):
          return 
 
        trashDir = os.path.join(self.rsync_dir,
                          self.fsname,
                          rsyncsmf.RSYNCTRASHSUFFIX)
        if not os.path.exists(trashDir):
-         os.makedirs(trashDir, 0755)
+         os.makedirs(trashDir, 0o755)
 
        backupTrashDir = os.path.join (self.rsync_dir,
                                 self.fsname,
 
        backupTrashDir = os.path.join (self.rsync_dir,
                                 self.fsname,
@@ -178,6 +177,6 @@ for root, dirs, files in os.walk(rsyncsmf.RsyncSMF("%s:rsync" %(plugin.PLUGINBAS
                     insort(backupDirs, os.path.abspath(backupDir))
 
 
                     insort(backupDirs, os.path.abspath(backupDir))
 
 
-print backupDirs
+print(backupDirs)
 
 
 
 
index 496b686..8e9fb3a 100644 (file)
@@ -24,7 +24,6 @@ import os
 import subprocess
 import sys
 import syslog
 import subprocess
 import sys
 import syslog
-import statvfs
 import math
 import gio
 import logging
 import math
 import gio
 import logging
@@ -35,6 +34,8 @@ def run_command(command, raise_on_try=True):
     Returns a tuple of standard out and stander error.
     Throws a RunTimeError if the command failed to execute or
     if the command returns a non-zero exit status.
     Returns a tuple of standard out and stander error.
     Throws a RunTimeError if the command failed to execute or
     if the command returns a non-zero exit status.
+
+    Assume the output is UTF-8 encoded
     """
 
     debug("Trying to run command %s" % (command), True)
     """
 
     debug("Trying to run command %s" % (command), True)
@@ -43,14 +44,14 @@ def run_command(command, raise_on_try=True):
                              stdout=subprocess.PIPE,
                              stderr=subprocess.PIPE,
                              close_fds=True)
                              stdout=subprocess.PIPE,
                              stderr=subprocess.PIPE,
                              close_fds=True)
-        outdata,errdata = p.communicate()
+        outdata,errdata = (x.decode('utf-8') for x in p.communicate())
         err = p.wait()
         err = p.wait()
-    except OSError, message:
-        raise RuntimeError"%s subprocess error:\n %s" % \
-                            (command, str(message))
+    except OSError as message:
+        raise RuntimeError("%s subprocess error:\n %s" % \
+                            (command, str(message)))
     if err != 0 and raise_on_try:
     if err != 0 and raise_on_try:
-        raise RuntimeError'%s failed with exit code %d\n%s' % \
-                            (str(command), err, errdata)
+        raise RuntimeError('%s failed with exit code %d\n%s' % \
+                            (str(command), err, errdata))
     return outdata,errdata
 
 def debug(message, verbose):
     return outdata,errdata
 
 def debug(message, verbose):
@@ -79,8 +80,8 @@ def get_filesystem_capacity(path):
         raise ValueError("%s is a non-existent path" % path)
     f = os.statvfs(path)
 
         raise ValueError("%s is a non-existent path" % path)
     f = os.statvfs(path)
 
-    unavailBlocks = f[statvfs.F_BLOCKS] - f[statvfs.F_BAVAIL]
-    capacity = int(math.ceil(100 * (unavailBlocks / float(f[statvfs.F_BLOCKS]))))
+    unavailBlocks = f.f_blocks - f.f_bavail
+    capacity = int(math.ceil(100 * (unavailBlocks / float(f.f_blocks))))
 
     return capacity
 
 
     return capacity
 
@@ -89,8 +90,8 @@ def get_available_size(path):
     if not os.path.exists(path):
         raise ValueError("%s is a non-existent path" % path)
     f = os.statvfs(path)
     if not os.path.exists(path):
         raise ValueError("%s is a non-existent path" % path)
     f = os.statvfs(path)
-    free = long(f[statvfs.F_BAVAIL] * f[statvfs.F_FRSIZE])
-    
+    free = int(f.f_bavail * f.f_frsize)
+
     return free
 
 def get_used_size(path):
     return free
 
 def get_used_size(path):
@@ -100,8 +101,8 @@ def get_used_size(path):
         raise ValueError("%s is a non-existent path" % path)
     f = os.statvfs(path)
 
         raise ValueError("%s is a non-existent path" % path)
     f = os.statvfs(path)
 
-    unavailBlocks = f[statvfs.F_BLOCKS] - f[statvfs.F_BAVAIL]
-    used = long(unavailBlocks * f[statvfs.F_FRSIZE])
+    unavailBlocks = f.f_blocks - f.f_bavail
+    used = int(unavailBlocks * f.f_frsize)
 
     return used
 
 
     return used
 
@@ -111,7 +112,7 @@ def get_total_size(path):
     if not os.path.exists(path):
         raise ValueError("%s is a non-existent path" % path)
     f = os.statvfs(path)
     if not os.path.exists(path):
         raise ValueError("%s is a non-existent path" % path)
     f = os.statvfs(path)
-    total = long(f[statvfs.F_BLOCKS] * f[statvfs.F_FRSIZE])
+    total = int(f.f_blocks * f.f_frsize)
 
     return total
 
 
     return total
 
index 3efa9ca..e9092b8 100644 (file)
@@ -25,7 +25,7 @@ import re
 import threading
 from bisect import insort, bisect_left, bisect_right
 
 import threading
 from bisect import insort, bisect_left, bisect_right
 
-import util
+from . import util
 
 BYTESPERMB = 1048576
 
 
 BYTESPERMB = 1048576
 
@@ -38,14 +38,14 @@ ZPOOLCMD = "/usr/sbin/zpool"
 class Datasets(Exception):
     """
     Container class for all zfs datasets. Maintains a centralised
 class Datasets(Exception):
     """
     Container class for all zfs datasets. Maintains a centralised
-    list of datasets (generated on demand) and accessor methods. 
+    list of datasets (generated on demand) and accessor methods.
     Also allows clients to notify when a refresh might be necessary.
     """
     # Class wide instead of per-instance in order to avoid duplication
     filesystems = None
     volumes = None
     snapshots = None
     Also allows clients to notify when a refresh might be necessary.
     """
     # Class wide instead of per-instance in order to avoid duplication
     filesystems = None
     volumes = None
     snapshots = None
-    
+
     # Mutex locks to prevent concurrent writes to above class wide
     # dataset lists.
     _filesystemslock = threading.Lock()
     # Mutex locks to prevent concurrent writes to above class wide
     # dataset lists.
     _filesystemslock = threading.Lock()
@@ -56,7 +56,7 @@ class Datasets(Exception):
         """
         Create a complete set of snapshots as if this were
         for a standard zfs-auto-snapshot operation.
         """
         Create a complete set of snapshots as if this were
         for a standard zfs-auto-snapshot operation.
-        
+
         Keyword arguments:
         label:
             A label to apply to the snapshot name. Cannot be None.
         Keyword arguments:
         label:
             A label to apply to the snapshot name. Cannot be None.
@@ -64,7 +64,7 @@ class Datasets(Exception):
             A string indicating one of the standard auto-snapshot schedules
             tags to check (eg. "frequent" for will map to the tag:
             com.sun:auto-snapshot:frequent). If specified as a zfs property
             A string indicating one of the standard auto-snapshot schedules
             tags to check (eg. "frequent" for will map to the tag:
             com.sun:auto-snapshot:frequent). If specified as a zfs property
-            on a zfs dataset, the property corresponding to the tag will 
+            on a zfs dataset, the property corresponding to the tag will
             override the wildcard property: "com.sun:auto-snapshot"
             Default value = None
         """
             override the wildcard property: "com.sun:auto-snapshot"
             Default value = None
         """
@@ -86,7 +86,7 @@ class Datasets(Exception):
             outdata,errdata = util.run_command(scmd)
             for line in outdata.rstrip().split('\n'):
                 line = line.split()
             outdata,errdata = util.run_command(scmd)
             for line in outdata.rstrip().split('\n'):
                 line = line.split()
-                # Skip over unset values. 
+                # Skip over unset values.
                 if line[1] == "-":
                     continue
                 # Add to everything list. This is used later
                 if line[1] == "-":
                     continue
                 # Add to everything list. This is used later
@@ -104,7 +104,7 @@ class Datasets(Exception):
             line = line.split()
             idx = bisect_right(everything, line[0])
             if len(everything) == 0 or \
             line = line.split()
             idx = bisect_right(everything, line[0])
             if len(everything) == 0 or \
-               everything[idx-1] != line[0]:           
+               everything[idx-1] != line[0]:
                 # Dataset is neither included nor excluded so far
                 if line[1] == "-":
                     continue
                 # Dataset is neither included nor excluded so far
                 if line[1] == "-":
                     continue
@@ -117,7 +117,7 @@ class Datasets(Exception):
         # Now figure out what can be recursively snapshotted and what
         # must be singly snapshotted. Single snapshot restrictions apply
         # to those datasets who have a child in the excluded list.
         # Now figure out what can be recursively snapshotted and what
         # must be singly snapshotted. Single snapshot restrictions apply
         # to those datasets who have a child in the excluded list.
-        # 'included' is sorted in reverse alphabetical order. 
+        # 'included' is sorted in reverse alphabetical order.
         for datasetname in included:
             excludedchild = False
             idx = bisect_right(everything, datasetname)
         for datasetname in included:
             excludedchild = False
             idx = bisect_right(everything, datasetname)
@@ -173,7 +173,7 @@ class Datasets(Exception):
             A string indicating one of the standard auto-snapshot schedules
             tags to check (eg. "frequent" will map to the tag:
             com.sun:auto-snapshot:frequent). If specified as a zfs property
             A string indicating one of the standard auto-snapshot schedules
             tags to check (eg. "frequent" will map to the tag:
             com.sun:auto-snapshot:frequent). If specified as a zfs property
-            on a zfs dataset, the property corresponding to the tag will 
+            on a zfs dataset, the property corresponding to the tag will
             override the wildcard property: "com.sun:auto-snapshot"
             Default value = None
         """
             override the wildcard property: "com.sun:auto-snapshot"
             Default value = None
         """
@@ -216,7 +216,7 @@ class Datasets(Exception):
     def list_filesystems(self, pattern = None):
         """
         List pattern matching filesystems sorted by name.
     def list_filesystems(self, pattern = None):
         """
         List pattern matching filesystems sorted by name.
-        
+
         Keyword arguments:
         pattern -- Filter according to pattern (default None)
         """
         Keyword arguments:
         pattern -- Filter according to pattern (default None)
         """
@@ -230,13 +230,13 @@ class Datasets(Exception):
                    "-o", "name,mountpoint", "-s", "name"]
             try:
                 outdata,errdata = util.run_command(cmd, True)
                    "-o", "name,mountpoint", "-s", "name"]
             try:
                 outdata,errdata = util.run_command(cmd, True)
-            except OSError, message:
-                raise RuntimeError"%s subprocess error:\n %s" % \
-                                    (cmd, str(message))
+            except OSError as message:
+                raise RuntimeError("%s subprocess error:\n %s" % \
+                                    (cmd, str(message)))
             if err != 0:
                 Datasets._filesystemslock.release()
             if err != 0:
                 Datasets._filesystemslock.release()
-                raise RuntimeError'%s failed with exit code %d\n%s' % \
-                                    (str(cmd), err, errdata)
+                raise RuntimeError('%s failed with exit code %d\n%s' % \
+                                    (str(cmd), err, errdata))
             for line in outdata.rstrip().split('\n'):
                 line = line.rstrip().split()
                 Datasets.filesystems.append([line[0], line[1]])
             for line in outdata.rstrip().split('\n'):
                 line = line.rstrip().split()
                 Datasets.filesystems.append([line[0], line[1]])
@@ -258,7 +258,7 @@ class Datasets(Exception):
     def list_volumes(self, pattern = None):
         """
         List pattern matching volumes sorted by name.
     def list_volumes(self, pattern = None):
         """
         List pattern matching volumes sorted by name.
-        
+
         Keyword arguments:
         pattern -- Filter according to pattern (default None)
         """
         Keyword arguments:
         pattern -- Filter according to pattern (default None)
         """
@@ -270,9 +270,9 @@ class Datasets(Exception):
                    "-o", "name", "-s", "name"]
             try:
                 outdata,errdata = util.run_command(cmd, True)
                    "-o", "name", "-s", "name"]
             try:
                 outdata,errdata = util.run_command(cmd, True)
-            except RuntimeError, message:
+            except RuntimeError as message:
                 Datasets._volumeslock.release()
                 Datasets._volumeslock.release()
-                raise RuntimeError, str(message)
+                raise RuntimeError(str(message))
 
             for line in outdata.rstrip().split('\n'):
                 Datasets.volumes.append(line.rstrip())
 
             for line in outdata.rstrip().split('\n'):
                 Datasets.volumes.append(line.rstrip())
@@ -295,7 +295,7 @@ class Datasets(Exception):
         """
         List pattern matching snapshots sorted by creation date.
         Oldest listed first
         """
         List pattern matching snapshots sorted by creation date.
         Oldest listed first
-        
+
         Keyword arguments:
         pattern -- Filter according to pattern (default None)
         """
         Keyword arguments:
         pattern -- Filter according to pattern (default None)
         """
@@ -307,14 +307,14 @@ class Datasets(Exception):
             cmd = [ZFSCMD, "get", "-H", "-p", "-o", "value,name", "creation"]
             try:
                 outdata,errdata = util.run_command(cmd, True)
             cmd = [ZFSCMD, "get", "-H", "-p", "-o", "value,name", "creation"]
             try:
                 outdata,errdata = util.run_command(cmd, True)
-            except RuntimeError, message:
+            except RuntimeError as message:
                 Datasets.snapshotslock.release()
                 Datasets.snapshotslock.release()
-                raise RuntimeError, str(message)
+                raise RuntimeError(str(message))
             for dataset in outdata.rstrip().split('\n'):
                 if re.search("@", dataset):
                     insort(snaps, dataset.split())
             for snap in snaps:
             for dataset in outdata.rstrip().split('\n'):
                 if re.search("@", dataset):
                     insort(snaps, dataset.split())
             for snap in snaps:
-                Datasets.snapshots.append([snap[1], long(snap[0])])
+                Datasets.snapshots.append([snap[1], int(snap[0])])
         if pattern == None:
             snapshots = Datasets.snapshots[:]
         else:
         if pattern == None:
             snapshots = Datasets.snapshots[:]
         else:
@@ -374,7 +374,7 @@ class Datasets(Exception):
         """
         # FIXME in future.
         # This is a little sub-optimal because we should be able to modify
         """
         # FIXME in future.
         # This is a little sub-optimal because we should be able to modify
-        # the snapshot list in place in some situations and regenerate the 
+        # the snapshot list in place in some situations and regenerate the
         # snapshot list without calling out to zfs(1m). But on the
         # pro side, we will pick up any new snapshots since the last
         # scan that we would be otherwise unaware of.
         # snapshot list without calling out to zfs(1m). But on the
         # pro side, we will pick up any new snapshots since the last
         # scan that we would be otherwise unaware of.
@@ -423,7 +423,7 @@ class ZPool:
         outdata,errdata = util.run_command(cmd)
         _used,_available = outdata.rstrip().split('\n')
         used = float(_used)
         outdata,errdata = util.run_command(cmd)
         _used,_available = outdata.rstrip().split('\n')
         used = float(_used)
-        available = float(_available) 
+        available = float(_available)
         return 100.0 * used/(used + available)
 
     def get_available_size(self):
         return 100.0 * used/(used + available)
 
     def get_available_size(self):
@@ -500,7 +500,7 @@ class ZPool:
             A string indicating one of the standard auto-snapshot schedules
             tags to check (eg. "frequent" will map to the tag:
             com.sun:auto-snapshot:frequent). If specified as a zfs property
             A string indicating one of the standard auto-snapshot schedules
             tags to check (eg. "frequent" will map to the tag:
             com.sun:auto-snapshot:frequent). If specified as a zfs property
-            on a zfs dataset, the property corresponding to the tag will 
+            on a zfs dataset, the property corresponding to the tag will
             override the wildcard property: "com.sun:auto-snapshot"
             Default value = None
         """
             override the wildcard property: "com.sun:auto-snapshot"
             Default value = None
         """
@@ -521,9 +521,9 @@ class ZPool:
         """
         List pattern matching snapshots sorted by creation date.
         Oldest listed first
         """
         List pattern matching snapshots sorted by creation date.
         Oldest listed first
-           
+
         Keyword arguments:
         Keyword arguments:
-        pattern -- Filter according to pattern (default None)   
+        pattern -- Filter according to pattern (default None)
         """
         # If there isn't a list of snapshots for this dataset
         # already, create it now and store it in order to save
         """
         # If there isn't a list of snapshots for this dataset
         # already, create it now and store it in order to save
@@ -591,7 +591,7 @@ class ReadableDataset:
             cmd = [ZFSCMD, "get", "-H", "-p", "-o", "value", "creation",
                    self.name]
             outdata,errdata = util.run_command(cmd)
             cmd = [ZFSCMD, "get", "-H", "-p", "-o", "value", "creation",
                    self.name]
             outdata,errdata = util.run_command(cmd)
-            self.__creationTime = long(outdata.rstrip())
+            self.__creationTime = int(outdata.rstrip())
         return self.__creationTime
 
     def exists(self):
         return self.__creationTime
 
     def exists(self):
@@ -599,13 +599,13 @@ class ReadableDataset:
         Returns True if the dataset is still existent on the system.
         False otherwise
         """
         Returns True if the dataset is still existent on the system.
         False otherwise
         """
-        # Test existance of the dataset by checking the output of a 
+        # Test existance of the dataset by checking the output of a
         # simple zfs get command on the snapshot
         cmd = [ZFSCMD, "get", "-H", "-o", "name", "type", self.name]
         try:
             outdata,errdata = util.run_command(cmd)
         # simple zfs get command on the snapshot
         cmd = [ZFSCMD, "get", "-H", "-o", "name", "type", self.name]
         try:
             outdata,errdata = util.run_command(cmd)
-        except RuntimeError, message:
-            raise RuntimeError, str(message)
+        except RuntimeError as message:
+            raise RuntimeError(str(message))
 
         result = outdata.rstrip()
         if result == self.name:
 
         result = outdata.rstrip()
         if result == self.name:
@@ -616,7 +616,7 @@ class ReadableDataset:
     def get_used_size(self):
         cmd = [ZFSCMD, "get", "-H", "-p", "-o", "value", "used", self.name]
         outdata,errdata = util.run_command(cmd)
     def get_used_size(self):
         cmd = [ZFSCMD, "get", "-H", "-p", "-o", "value", "used", self.name]
         outdata,errdata = util.run_command(cmd)
-        return long(outdata.rstrip())
+        return int(outdata.rstrip())
 
     def get_user_property(self, prop, local=False):
         if local == True:
 
     def get_user_property(self, prop, local=False):
         if local == True:
@@ -629,7 +629,7 @@ class ReadableDataset:
     def set_user_property(self, prop, value):
         cmd = [ZFSCMD, "set", "%s=%s" % (prop, value), self.name]
         outdata,errdata = util.run_command(cmd)
     def set_user_property(self, prop, value):
         cmd = [ZFSCMD, "set", "%s=%s" % (prop, value), self.name]
         outdata,errdata = util.run_command(cmd)
-    
+
     def unset_user_property(self, prop):
         cmd = [ZFSCMD, "inherit", prop, self.name]
         outdata,errdata = util.run_command(cmd)
     def unset_user_property(self, prop):
         cmd = [ZFSCMD, "inherit", prop, self.name]
         outdata,errdata = util.run_command(cmd)
@@ -638,7 +638,7 @@ class Snapshot(ReadableDataset):
     """
     ZFS Snapshot object class.
     Provides information and operations specfic to ZFS snapshots
     """
     ZFS Snapshot object class.
     Provides information and operations specfic to ZFS snapshots
-    """    
+    """
     def __init__(self, name, creation = None):
         """
         Keyword arguments:
     def __init__(self, name, creation = None):
         """
         Keyword arguments:
@@ -656,7 +656,7 @@ class Snapshot(ReadableDataset):
     def __split_snapshot_name(self):
         name = self.name.split("@", 1)
         # Make sure this is really a snapshot and not a
     def __split_snapshot_name(self):
         name = self.name.split("@", 1)
         # Make sure this is really a snapshot and not a
-        # filesystem otherwise a filesystem could get 
+        # filesystem otherwise a filesystem could get
         # destroyed instead of a snapshot. That would be
         # really really bad.
         if name[0] == self.name:
         # destroyed instead of a snapshot. That would be
         # really really bad.
         if name[0] == self.name:
@@ -673,7 +673,7 @@ class Snapshot(ReadableDataset):
                "-o", "value", "referenced", \
                self.name]
         outdata,errdata = util.run_command(cmd)
                "-o", "value", "referenced", \
                self.name]
         outdata,errdata = util.run_command(cmd)
-        return long(outdata.rstrip())
+        return int(outdata.rstrip())
 
     def list_children(self):
         """Returns a recursive list of child snapshots of this snapshot"""
 
     def list_children(self):
         """Returns a recursive list of child snapshots of this snapshot"""
@@ -807,7 +807,7 @@ class ReadWritableDataset(ReadableDataset):
         cmd = [ZFSCMD, "get", "-H", "-p", "-o", "value", "available", \
                self.name]
         outdata,errdata = util.run_command(cmd)
         cmd = [ZFSCMD, "get", "-H", "-p", "-o", "value", "available", \
                self.name]
         outdata,errdata = util.run_command(cmd)
-        return long(outdata.rstrip())
+        return int(outdata.rstrip())
 
     def create_snapshot(self, snaplabel, recursive = False):
         """
 
     def create_snapshot(self, snaplabel, recursive = False):
         """
@@ -828,12 +828,12 @@ class ReadWritableDataset(ReadableDataset):
             cmd.append("-r")
         cmd.append("%s@%s" % (self.name, snaplabel))
         outdata,errdata = util.run_command(cmd, False)
             cmd.append("-r")
         cmd.append("%s@%s" % (self.name, snaplabel))
         outdata,errdata = util.run_command(cmd, False)
-       if errdata:
-         print errdata
+        if errdata:
+            print(errdata)
         self.datasets.refresh_snapshots()
 
     def list_children(self):
         self.datasets.refresh_snapshots()
 
     def list_children(self):
-        
+
         # Note, if more dataset types ever come around they will
         # need to be added to the filsystem,volume args below.
         # Not for the forseeable future though.
         # Note, if more dataset types ever come around they will
         # need to be added to the filsystem,volume args below.
         # Not for the forseeable future though.
@@ -851,9 +851,9 @@ class ReadWritableDataset(ReadableDataset):
         """
         List pattern matching snapshots sorted by creation date.
         Oldest listed first
         """
         List pattern matching snapshots sorted by creation date.
         Oldest listed first
-           
+
         Keyword arguments:
         Keyword arguments:
-        pattern -- Filter according to pattern (default None)   
+        pattern -- Filter according to pattern (default None)
         """
         # If there isn't a list of snapshots for this dataset
         # already, create it now and store it in order to save
         """
         # If there isn't a list of snapshots for this dataset
         # already, create it now and store it in order to save
@@ -1001,20 +1001,20 @@ def list_zpools():
 if __name__ == "__main__":
     for zpool in list_zpools():
         pool = ZPool(zpool)
 if __name__ == "__main__":
     for zpool in list_zpools():
         pool = ZPool(zpool)
-        print pool
+        print(pool)
         for filesys,mountpoint in pool.list_filesystems():
             fs = Filesystem(filesys, mountpoint)
         for filesys,mountpoint in pool.list_filesystems():
             fs = Filesystem(filesys, mountpoint)
-            print fs
-            print "\tSnapshots:"
+            print(fs)
+            print("\tSnapshots:")
             for snapshot, snaptime in fs.list_snapshots():
                 snap = Snapshot(snapshot, snaptime)
             for snapshot, snaptime in fs.list_snapshots():
                 snap = Snapshot(snapshot, snaptime)
-                print "\t\t" + snap.name
+                print("\t\t" + snap.name)
 
         for volname in pool.list_volumes():
             vol = Volume(volname)
 
         for volname in pool.list_volumes():
             vol = Volume(volname)
-            print vol
-            print "\tSnapshots:"
+            print(vol)
+            print("\tSnapshots:")
             for snapshot, snaptime in vol.list_snapshots():
                 snap = Snapshot(snapshot, snaptime)
             for snapshot, snaptime in vol.list_snapshots():
                 snap = Snapshot(snapshot, snaptime)
-                print "\t\t" + snap.name
+                print("\t\t" + snap.name)