*.pyc
*.rpm
*.tar.bz2
+.mypy_cache/
import os
import sys
import subprocess
-import pluginsmf
+from . import pluginsmf
from time_slider import smf, autosnapsmf, util
command = self.smfInst.get_trigger_command()
try:
statinfo = os.stat(command)
- other_x = (statinfo.st_mode & 01)
+ other_x = (statinfo.st_mode & 0o1)
if other_x == 0:
- raise RuntimeError, 'Plugin: %s:\nConfigured trigger command is not ' \
+ raise RuntimeError('Plugin: %s:\nConfigured trigger command is not ' \
'executable:\n%s' \
- % (self.smfInst.instanceName, command)
+ % (self.smfInst.instanceName, command))
except OSError:
- raise RuntimeError, 'Plugin: %s:\nCan not access the configured ' \
+ raise RuntimeError('Plugin: %s:\nCan not access the configured ' \
'plugin/trigger_command:\n%s' \
- % (self.smfInst.instanceName, command)
+ % (self.smfInst.instanceName, command))
def execute(self, schedule, label):
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True)
- except OSError, message:
- raise RuntimeError, "%s subprocess error:\n %s" % \
- (cmd, str(message))
+ except OSError as message:
+ raise RuntimeError("%s subprocess error:\n %s" % \
+ (cmd, str(message)))
self._proc = None
def is_running(self):
err = p.wait()
if err != 0:
self._refreshLock.release()
- raise RuntimeError, '%s failed with exit code %d\n%s' % \
- (str(cmd), err, errdata)
+ raise RuntimeError('%s failed with exit code %d\n%s' % \
+ (str(cmd), err, errdata))
for line in outdata.rstrip().split('\n'):
line = line.rstrip().split()
state = line[0]
try:
plugin = Plugin(fmri, self.verbose)
self.plugins.append(plugin)
- except RuntimeError, message:
+ except RuntimeError as message:
sys.stderr.write("Ignoring misconfigured plugin: %s\n" \
% (fmri))
sys.stderr.write("Reason:\n%s\n" % (message))
from bisect import insort, bisect_left
from time_slider import util, zfs, dbussvc, autosnapsmf, timeslidersmf
-import rsyncsmf
+from . import rsyncsmf
# Set to True if SMF property value of "plugin/command" is "true"
not os.path.islink(d)]
for d in dirList:
mtime = os.stat(d).st_mtime
- insort(self._backups, [long(mtime), os.path.abspath(d)])
+ insort(self._backups, [int(mtime), os.path.abspath(d)])
self._backupTimes[dirName][d] = mtime
def _find_backup_device(self):
lockFile = os.path.join(lockFileDir, tail)
if not os.path.exists(lockFileDir):
- os.makedirs(lockFileDir, 0755)
+ os.makedirs(lockFileDir, 0o755)
try:
lockFp = open(lockFile, 'w')
fcntl.flock(lockFp, fcntl.LOCK_EX | fcntl.LOCK_NB)
trashDir = os.path.join(trash, tail)
if not os.path.exists(trash):
- os.makedirs(trash, 0755)
+ os.makedirs(trash, 0o755)
util.debug("Deleting rsync backup to recover space: %s"\
% (dirName), self._verbose)
self._bus.rsync_started(self._rsyncBaseDir)
ctime,snapName = self._currentQueueSet[0]
- snapshot = zfs.Snapshot(snapName, long(ctime))
+ snapshot = zfs.Snapshot(snapName, int(ctime))
# Make sure the snapshot didn't get destroyed since we last
# checked it.
remainingList = self._currentQueueSet[1:]
dirList = []
if not os.path.exists(partialDir):
- os.makedirs(partialDir, 0755)
+ os.makedirs(partialDir, 0o755)
if not os.path.exists(logDir):
- os.makedirs(logDir, 0755)
+ os.makedirs(logDir, 0o755)
if not os.path.exists(targetDir):
- os.makedirs(targetDir, 0755)
+ os.makedirs(targetDir, 0o755)
# Add the new directory to our internal
# mtime dictionary and sorted list.
self._backupTimes[targetDir] = {}
insort(self._backupDirs, targetDir)
else:
- for name,value in self._backupTimes[targetDir].items():
+ for name,value in list(self._backupTimes[targetDir].items()):
if ctime > value:
if nearestOlder == None or \
value > nearestOlder[1]:
link + ".lock")
if not os.path.exists(lockFileDir):
- os.makedirs(lockFileDir, 0755)
+ os.makedirs(lockFileDir, 0o755)
try:
lockFp = open(lockFile, 'w')
# Set umask temporarily so that rsync backups are read-only to
# the owner by default. Rync will override this to match the
# permissions of each snapshot as appropriate.
- origmask = os.umask(0222)
+ origmask = os.umask(0o222)
util.debug("Starting rsync backup of '%s' to: %s" \
% (sourceDir, partialDir),
self._verbose)
# they match the snapshot creation time. This is extremely important
# because the backup mechanism relies on it to determine backup times
# and nearest matches for incremental rsync (linkDest)
- os.utime(backupDir, (long(ctime), long(ctime)))
+ os.utime(backupDir, (int(ctime), int(ctime)))
# Update the dictionary and time sorted list with ctime also
- self._backupTimes[targetDir][snapshot.snaplabel] = long(ctime)
- insort(self._backups, [long(ctime), os.path.abspath(backupDir)])
+ self._backupTimes[targetDir][snapshot.snaplabel] = int(ctime)
+ insort(self._backups, [int(ctime), os.path.abspath(backupDir)])
snapshot.set_user_property(self._propName, "completed")
snapshot.release(self._propName)
self._currentQueueSet = remainingList
snapshot.fsname,
rsyncsmf.RSYNCTRASHSUFFIX)
if not os.path.exists(trash):
- os.makedirs(trash, 0755)
+ os.makedirs(trash, 0o755)
for mtime,dirName in purgeList:
trashDir = os.path.join(trash,
dirName)
dirName + ".lock")
if not os.path.exists(lockFileDir):
- os.makedirs(lockFileDir, 0755)
+ os.makedirs(lockFileDir, 0o755)
try:
lockFp = open(lockFile, 'w')
outdata,errdata = util.run_command(cmd)
for line in outdata.rstrip().split('\n'):
ctimeStr,name = line.split()
- insort(sortsnaplist, tuple((long(ctimeStr), name)))
+ insort(sortsnaplist, tuple((int(ctimeStr), name)))
sortsnaplist.reverse()
return sortsnaplist
lockFileDir = os.path.normpath(tempfile.gettempdir() + '/' + \
".time-slider")
if not os.path.exists(lockFileDir):
- os.makedirs(lockFileDir, 0755)
+ os.makedirs(lockFileDir, 0o755)
lockFile = os.path.join(lockFileDir, 'rsync-backup.lock')
lockFp = open(lockFile, 'w')
import subprocess
import syslog
-import rsyncsmf
+from . import rsyncsmf
from time_slider import util, smf, zfs
# Set to True if SMF property value of "plugin/command" is "true"
# Set to True if SMF property value of "plugin/command" is "true"
verboseprop = "plugin/verbose"
propbasename = "org.opensolaris:time-slider-plugin"
-print _("Do I work?")
+print(_("Do I work?"))
def main(argv):
# Check to see if the receive command is accessible and executable
try:
statinfo = os.stat(recvcmd[0])
- other_x = (statinfo.st_mode & 01)
+ other_x = (statinfo.st_mode & 0o1)
if other_x == 0:
log_error(syslog.LOG_ERR,
"Plugin: %s: Configured receive/command is not " \
senderrno = sendP.wait()
if senderrno != 0:
- raise RuntimeError, "Send command: %s failed with exit code" \
+ raise RuntimeError("Send command: %s failed with exit code" \
"%d. Error message: \n%s" \
- % (str(sendcmd), senderrno, senderr)
+ % (str(sendcmd), senderrno, senderr))
if recverrno != 0:
- raise RuntimeError, "Receive command %s failed with exit " \
+ raise RuntimeError("Receive command %s failed with exit " \
"code %d. Error message: \n%s" \
- % (str(recvcmd), recverrno, recverr)
+ % (str(recvcmd), recverrno, recverr))
if prevsnapname != None:
util.debug("Releasing hold on %s" % (prevsnapname), verbose)
% (prevsnapname),
verbose)
snapshot.release(propname)
- except Exception, message:
+ except Exception as message:
log_error(syslog.LOG_ERR,
"Error during snapshot send/receive operation: %s" \
% (message))
gettext.textdomain(GETTEXT_DOMAIN)
# register the gettext function for the whole interpreter as "_"
-import __builtin__
-__builtin__._ = gettext.gettext
+import builtins
+builtins._ = gettext.gettext
try:
mainloop.run()
except:
- print "Exiting"
+ print("Exiting")
if __name__ == '__main__':
main()
#
import threading
-import smf
-import util
+from . import smf
+from . import util
factoryDefaultSchedules = ("monthly", "weekly", "daily", "hourly", "frequent")
period = int(self.get_prop(ZFSPROPGROUP, "period"))
keep = int(self.get_prop(ZFSPROPGROUP, "keep"))
- except OSError, message:
- raise RuntimeError, "%s subprocess error:\n %s" % \
- (cmd, str(message))
+ except OSError as message:
+ raise RuntimeError("%s subprocess error:\n %s" % \
+ (cmd, str(message)))
finally:
_scheddetaillock.release()
instance = AutoSnap(s)
try:
_defaultSchedules.append(instance.get_schedule_details())
- except RuntimeError, message:
- raise RuntimeError, "Error getting schedule details for " + \
+ except RuntimeError as message:
+ raise RuntimeError("Error getting schedule details for " + \
"default auto-snapshot SMF instance:" + \
"\n\t" + instanceName + "\nDetails:\n" + \
- str(message)
+ str(message))
return _defaultSchedules
def get_custom_schedules():
instance = AutoSnap(label)
try:
_customSchedules.append(instance.get_schedule_details())
- except RuntimeError, message:
- raise RuntimeError, "Error getting schedule details " + \
+ except RuntimeError as message:
+ raise RuntimeError("Error getting schedule details " + \
"for custom auto-snapshot SMF " + \
"instance:\n\t" + label + "\n" + \
- "Details:\n" + str(message)
+ "Details:\n" + str(message))
return _customSchedules
defaults = get_default_schedules()
for sched in defaults:
S = AutoSnap(sched[0])
- print S.get_schedule_details()
+ print(S.get_schedule_details())
gtk.glade.bindtextdomain(GETTEXT_DOMAIN, LOCALE_PATH)
gtk.glade.textdomain(GETTEXT_DOMAIN)
-import zfs
-from rbac import RBACprofile
+from . import zfs
+from .rbac import RBACprofile
class RsyncBackup:
self.creationtime = creationtime
try:
tm = time.localtime(self.creationtime)
- self.creationtime_str = unicode(time.strftime ("%c", tm),
+ self.creationtime_str = str(time.strftime ("%c", tm),
locale.getpreferredencoding()).encode('utf-8')
except:
self.creationtime_str = time.ctime(self.creationtime)
rsyncsmf.RSYNCLOCKSUFFIX)
if not os.path.exists(lockFileDir):
- os.makedirs(lockFileDir, 0755)
+ os.makedirs(lockFileDir, 0o755)
lockFile = os.path.join(lockFileDir, self.snaplabel + ".lock")
try:
lockFp = open(lockFile, 'w')
fcntl.flock(lockFp, fcntl.LOCK_EX | fcntl.LOCK_NB)
except IOError:
- raise RuntimeError, \
- "couldn't delete %s, already used by another process" % self.mountpoint
+ raise RuntimeError("couldn't delete %s, already used by another process" % self.mountpoint)
return
trashDir = os.path.join(self.rsync_dir,
self.fsname,
rsyncsmf.RSYNCTRASHSUFFIX)
if not os.path.exists(trashDir):
- os.makedirs(trashDir, 0755)
+ os.makedirs(trashDir, 0o755)
backupTrashDir = os.path.join (self.rsync_dir,
self.fsname,
def initialise_view(self):
if len(self.shortcircuit) == 0:
# Set TreeViews
- self.liststorefs = gtk.ListStore(str, str, str, str, str, long,
+ self.liststorefs = gtk.ListStore(str, str, str, str, str, int,
gobject.TYPE_PYOBJECT)
list_filter = self.liststorefs.filter_new()
list_sort = gtk.TreeModelSort(list_filter)
for snapshot in newlist:
try:
tm = time.localtime(snapshot.get_creation_time())
- t = unicode(time.strftime ("%c", tm),
+ t = str(time.strftime ("%c", tm),
locale.getpreferredencoding()).encode('utf-8')
except:
t = time.ctime(snapshot.get_creation_time())
if backup.exists():
try:
backup.destroy ()
- except RuntimeError, inst:
+ except RuntimeError as inst:
self.errors.append(str(inst))
deleted += 1
self.progress = deleted / (total * 1.0)
"administrative priviliges."
"\n\nConsult your system administrator "))
dialog.run()
- print argv + "is not a valid executable path"
+ print(argv + "is not a valid executable path")
sys.exit(1)
COLUMN_STRING_DATE,
COLUMN_DATE,
COLUMN_SIZE
-) = range (5)
+) = list(range(5))
#
import time_slider.autosnapsmf as base
-import smf
-from timesliderconfig import Config
+from . import smf
+from .timesliderconfig import Config
SNAPLABELPREFIX = base.SNAPLABELPREFIX
instance = AutoSnap(s)
try:
_defaultSchedules.append(instance.get_schedule_details())
- except RuntimeError, message:
- raise RuntimeError, "Error getting schedule details for " + \
+ except RuntimeError as message:
+ raise RuntimeError("Error getting schedule details for " + \
"default auto-snapshot SMF instance:" + \
"\n\t" + instanceName + "\nDetails:\n" + \
- str(message)
+ str(message))
return _defaultSchedules
instance = AutoSnap(label)
try:
_customSchedules.append(instance.get_schedule_details())
- except RuntimeError, message:
- raise RuntimeError, "Error getting schedule details " + \
+ except RuntimeError as message:
+ raise RuntimeError("Error getting schedule details " + \
"for custom auto-snapshot SMF " + \
"instance:\n\t" + label + "\n" + \
- "Details:\n" + str(message)
+ "Details:\n" + str(message))
return _customSchedules
class AutoSnap(base.AutoSnap):
defaults = get_default_schedules()
for sched in defaults:
S = AutoSnap(sched[0])
- print S.get_schedule_details()
+ print(S.get_schedule_details())
if __name__ == "__main__":
rbac = RBACprofile()
- print rbac.name
- print rbac.uid
- print rbac.profiles
- print rbac.auths
+ print(rbac.name)
+ print(rbac.uid)
+ print(rbac.profiles)
+ print(rbac.auths)
# CDDL HEADER END
#
-import timesliderconfig
+from . import timesliderconfig
import time_slider.smf as base
class SMFInstance(base.SMFInstance):
if __name__ == "__main__":
S = SMFInstance('svc:/application/time-slider')
- print S
+ print(S)
# CDDL HEADER END
#
-import ConfigParser
+import configparser
import sys
import time_slider.util as util
},
}
-class MyConfigParser(ConfigParser.ConfigParser):
+class MyConfigParser(configparser.ConfigParser):
def __init__(self):
- ConfigParser.ConfigParser.__init__(self)
+ configparser.ConfigParser.__init__(self)
- for section, content in default_properties.iteritems():
+ for section, content in default_properties.items():
if not self.has_section(section):
self.add_section(section)
- for k,v in content.iteritems():
+ for k,v in content.items():
self.set(section, k, str(v))
class Config:
result = self.config.get(section, option)
util.debug('CONFIG: GET section %s, option %s with value %s\n' % (section, option, result), 1)
return result
- except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
+ except (configparser.NoOptionError, configparser.NoSectionError):
util.debug('CONFIG: NOTFOUND section %s, option %s\n' % (section, option), 1)
return ''
#
import time_slider.timeslidersmf as base
-import smf
+from . import smf
import threading
class TimeSliderSMF(base.TimeSliderSMF):
if __name__ == "__main__":
S = TimeSliderSMF('svc:/application/time-slider')
- print S
+ print(S)
import os
import pwd
-import util
+from . import util
class RBACprofile:
if __name__ == "__main__":
rbac = RBACprofile()
- print rbac.name
- print rbac.uid
- print rbac.profiles
- print rbac.auths
+ print(rbac.name)
+ print(rbac.uid)
+ print(rbac.profiles)
+ print(rbac.auths)
import os
import subprocess
import threading
-import util
-import smf
-from autosnapsmf import enable_default_schedules, disable_default_schedules
+from . import util
+from . import smf
+from .autosnapsmf import enable_default_schedules, disable_default_schedules
from os.path import abspath, dirname, join, pardir
sys.path.insert(0, join(dirname(__file__), pardir, "plugin"))
import dbus.service
import dbus.mainloop
import dbus.mainloop.glib
-import dbussvc
+from . import dbussvc
# This is the rough guess ratio used for rsync backup device size
gtk.glade.bindtextdomain(GETTEXT_DOMAIN, LOCALE_PATH)
gtk.glade.textdomain(GETTEXT_DOMAIN)
-import zfs
-from timeslidersmf import TimeSliderSMF
-from rbac import RBACprofile
+from . import zfs
+from .timeslidersmf import TimeSliderSMF
+from .rbac import RBACprofile
class FilesystemIntention:
# Initialise SMF service instance state.
try:
self._sliderSMF = TimeSliderSMF()
- except RuntimeError,message:
+ except RuntimeError as message:
self._xml.get_widget("toplevel").set_sensitive(False)
dialog = gtk.MessageDialog(self._xml.get_widget("toplevel"),
0,
rsyncsmf.RSYNCCONFIGFILE)
newKey = generate_random_key()
try:
- origmask = os.umask(0222)
+ origmask = os.umask(0o222)
if not os.path.exists(nodePath):
- os.makedirs(nodePath, 0755)
+ os.makedirs(nodePath, 0o755)
f = open(configPath, 'w')
f.write("target_key=%s\n" % (newKey))
f.close()
self._setupManager.setup_rsync_config()
self._setupManager.setup_services()
self._setupManager.broadcast_changes()
- except RuntimeError, message:
+ except RuntimeError as message:
sys.stderr.write(str(message))
def generate_random_key(length=32):
import subprocess
import threading
-import util
+from . import util
#SMF EXIT CODES
SMF_EXIT_OK = 0
if __name__ == "__main__":
S = SMFInstance('svc:/application/time-slider')
- print S
+ print(S)
gtk.glade.bindtextdomain(GETTEXT_DOMAIN, LOCALE_PATH)
gtk.glade.textdomain(GETTEXT_DOMAIN)
-import zfs
-from rbac import RBACprofile
+from . import zfs
+from .rbac import RBACprofile
class SnapshotNowDialog:
"administrative priviliges."
"\n\nConsult your system administrator "))
dialog.run()
- print argv + "is not a valid executable path"
+ print(argv + "is not a valid executable path")
sys.exit(1)
import logging
from logging.handlers import SysLogHandler
-import glib
-import gobject
+from gi.repository import GLib as glib
+from gi.repository import GObject as gobject
import dbus
import dbus.service
import dbus.mainloop
import dbus.mainloop.glib
-import dbussvc
-import zfs
-import smf
+from . import dbussvc
+from . import zfs
+from . import smf
import time_slider.linux.timeslidersmf as timeslidersmf
import time_slider.linux.autosnapsmf as autosnapsmf
# import plugin
from time_slider.linux.rbac import RBACprofile
-import util
+from . import util
import time_slider.linux.timesliderconfig as timesliderconfig
self._smf = timeslidersmf.TimeSliderSMF()
try:
self.verbose = self._smf.get_verbose()
- except RuntimeError,message:
+ except RuntimeError as message:
self.logger.error("Error determing whether debugging is enabled")
self.verbose = False
self.exitCode = smf.SMF_EXIT_OK
self.refresh()
- # Seems we're up and running OK.
+ # Seems we're up and running OK.
# Signal our parent so we can daemonise
os.kill(os.getppid(), signal.SIGUSR1)
volume.set_auto_snap(False)
except IndexError:
pass
-
+
nexttime = None
waittime = None
while True:
nexttime = self._check_snapshots()
# Overdue snapshots are already taken automatically
# inside _check_snapshots() so nexttime should never be
- # < 0. It can be None however, which is fine since it
+ # < 0. It can be None however, which is fine since it
# will cause the scheduler thread to sleep indefinitely
# or until a SIGHUP is caught.
if nexttime:
util.debug("Waiting until " + str (nexttime), self.verbose)
waittime = None
if nexttime != None:
- waittime = nexttime - long(time.time())
+ waittime = nexttime - int(time.time())
if (waittime <= 0):
# We took too long and missed a snapshot, so break out
# and catch up on it the next time through the loop
self.verbose)
self._conditionLock.wait(_MINUTE * 15)
- except OSError, message:
+ except OSError as message:
self.logger.error("Caught OSError exception in snapshot" +
" manager thread")
self.logger.error("Error details:\n" + \
self.exitCode = smf.SMF_EXIT_ERR_FATAL
# Exit this thread
break
- except RuntimeError,message:
+ except RuntimeError as message:
self.logger.error("Caught RuntimeError exception in snapshot" +
" manager thread")
self.logger.error("Error details:\n" + \
def _configure_svc_props(self):
try:
self.verbose = self._smf.get_verbose()
- except RuntimeError,message:
+ except RuntimeError as message:
self.logger.error("Error determing whether debugging is enabled")
self.verbose = False
util.debug("Critical level value is: %d%%" % crit, self.verbose)
emer = self._smf.get_cleanup_level("emergency")
util.debug("Emergency level value is: %d%%" % emer, self.verbose)
- except RuntimeError,message:
+ except RuntimeError as message:
self.logger.error("Failed to determine cleanup threshhold levels")
self.logger.error("Details:\n" + \
"--------BEGIN ERROR MESSAGE--------\n" + \
try:
self._keepEmpties = self._smf.get_keep_empties()
- except RuntimeError,message:
+ except RuntimeError as message:
# Not fatal, just assume we delete them (default configuration)
self.logger.error("Can't determine whether to keep empty snapshots")
self.logger.error("Details:\n" + \
self.logger.error("Assuming default value: False")
self._keepEmpties = False
- # Previously, snapshot labels used the ":" character was used as a
+ # Previously, snapshot labels used the ":" character was used as a
# separator character for datestamps. Windows filesystems such as
# CIFS and FAT choke on this character so now we use a user definable
# separator value, with a default value of "_"
else:
self._zpools.append(zpool)
util.debug(str(zpool), self.verbose)
- except RuntimeError,message:
+ except RuntimeError as message:
self.logger.error("Could not list Zpools")
self.exitCode = smf.SMF_EXIT_ERR_FATAL
# Propogate exception up to thread's run() method
- raise RuntimeError,message
+ raise RuntimeError(message)
def _rebuild_schedules(self):
try:
_defaultSchedules = autosnapsmf.get_default_schedules()
_customSchedules = autosnapsmf.get_custom_schedules()
- except RuntimeError,message:
+ except RuntimeError as message:
self.exitCode = smf.SMF_EXIT_ERR_FATAL
- raise RuntimeError, "Error reading SMF schedule instances\n" + \
- "Details:\n" + str(message)
+ raise RuntimeError("Error reading SMF schedule instances\n" + \
+ "Details:\n" + str(message))
else:
# Now set it in stone.
self._defaultSchedules = tuple(_defaultSchedules)
self._customSchedules = tuple(_customSchedules)
-
+
# Build the combined schedule tuple from default + custom schedules
_defaultSchedules.extend(_customSchedules)
self._allSchedules = tuple(_defaultSchedules)
last = None
for schedule,interval,period,keep in self._allSchedules:
- # Shortcut if we've already processed this schedule and it's
+ # Shortcut if we've already processed this schedule and it's
# still up to date. Don't skip the default schedules though
# because overlap affects their scheduling
if [schedule,interval,period,keep] not in \
snaps = self._datasets.list_snapshots("%s%s" % \
(self._prefix,
schedule))
- except RuntimeError,message:
+ except RuntimeError as message:
self.exitCode = smf.SMF_EXIT_ERR_FATAL
self.logger.error("Failed to list snapshots during schedule update")
#Propogate up to the thread's run() method
- raise RuntimeError,message
+ raise RuntimeError(message)
if len(snaps) > 0:
util.debug("Last %s snapshot was: %s" % \
snap_tm = time.gmtime(self._last[schedule])
# Increment year if period >= than 1 calender year.
year = snap_tm.tm_year
- year += period / 12
+ year += period // 12
period = period % 12
mon = (snap_tm.tm_mon + period) % 12
mday = snap_tm.tm_mday
if dlastmon > dnewmon and snap_tm.tm_mday > dnewmon:
mday = dnewmon
-
+
tm =(year, mon, mday, \
snap_tm.tm_hour, snap_tm.tm_min, snap_tm.tm_sec, \
0, 0, -1)
def _next_due(self):
schedule = None
earliest = None
- now = long(time.time())
-
+ now = int(time.time())
+
for s,i,p,k in self._defaultSchedules:
due = self._next[s]
if due <= now:
- #Default Schedule - so break out at the first
+ #Default Schedule - so break out at the first
#schedule that is overdue. The subordinate schedules
#will re-adjust afterwards.
earliest,schedule = due,s
self._refreshLock.acquire()
next,schedule = self._next_due()
self._refreshLock.release()
- now = long(time.time())
+ now = int(time.time())
while next != None and next <= now:
label = self._take_snapshots(schedule)
# self._plugin.execute_plugins(schedule, label)
(schedule, dt.isoformat()), \
self.verbose)
return next
-
+
def _take_snapshots(self, schedule):
# Set the time before taking snapshot to avoid clock skew due
# to time taken to complete snapshot.
- tm = long(time.time())
+ tm = int(time.time())
label = "%s%s%s-%s" % \
(autosnapsmf.SNAPLABELPREFIX, self._separator, schedule,
datetime.datetime.now().strftime("%Y-%m-%d-%Hh%M"))
try:
self._datasets.create_auto_snapshot_set(label, tag=schedule)
- except RuntimeError, message:
+ except RuntimeError as message:
# Write an error message, set the exit code and pass it up the
# stack so the thread can terminate
self.logger.error("Failed to create snapshots for schedule: %s" \
% (schedule))
self.exitCode = smf.SMF_EXIT_MON_DEGRADE
- raise RuntimeError,message
+ raise RuntimeError(message)
self._last[schedule] = tm;
self._perform_purge(schedule)
return label
"""Cleans out zero sized snapshots, kind of cautiously"""
# Per schedule: We want to delete 0 sized
# snapshots but we need to keep at least one around (the most
- # recent one) for each schedule so that that overlap is
+ # recent one) for each schedule so that that overlap is
# maintained from frequent -> hourly -> daily etc.
# Start off with the smallest interval schedule first and
# move up. This increases the amount of data retained where
# Clone the list because we want to remove items from it
# while iterating through it.
remainingsnaps = snaps[:]
- except RuntimeError,message:
+ except RuntimeError as message:
self.logger.error("Failed to list snapshots during snapshot cleanup")
self.exitCode = smf.SMF_EXIT_ERR_FATAL
- raise RuntimeError,message
+ raise RuntimeError(message)
if (self._keepEmpties == False):
try: # remove the newest one from the list.
for snapname in snaps:
try:
snapshot = zfs.Snapshot(snapname)
- except Exception,message:
+ except Exception as message:
self.logger.error(str(message))
# Not fatal, just skip to the next snapshot
continue
self.verbose)
try:
snapshot.destroy()
- except RuntimeError,message:
+ except RuntimeError as message:
self.logger.error("Failed to destroy snapshot: " +
snapname)
self.exitCode = smf.SMF_EXIT_MON_DEGRADE
# Propogate exception so thread can exit
- raise RuntimeError,message
+ raise RuntimeError(message)
remainingsnaps.remove(snapname)
- except RuntimeError,message:
+ except RuntimeError as message:
self.logger.error("Can not determine used size of: " + \
snapname)
self.exitCode = smf.SMF_EXIT_MON_DEGRADE
#Propogate the exception to the thead run() method
- raise RuntimeError,message
+ raise RuntimeError(message)
# Deleting individual snapshots instead of recursive sets
# breaks the recursion chain and leaves child snapshots
- # dangling so we need to take care of cleaning up the
+ # dangling so we need to take care of cleaning up the
# snapshots.
target = len(remainingsnaps) - self._keep[schedule]
counter = 0
while counter < target:
util.debug("Destroy expired snapshot: " + \
- remainingsnaps[counter],
+ remainingsnaps[counter],
self.verbose)
try:
snapshot = zfs.Snapshot(remainingsnaps[counter])
- except Exception,message:
+ except Exception as message:
self.logger.error(str(message))
# Not fatal, just skip to the next snapshot
counter += 1
continue
try:
snapshot.destroy()
- except RuntimeError,message:
+ except RuntimeError as message:
self.logger.error("Failed to destroy snapshot: " +
snapshot.name)
self.exitCode = smf.SMF_EXIT_ERR_FATAL
# Propogate exception so thread can exit
- raise RuntimeError,message
+ raise RuntimeError(message)
else:
counter += 1
for name in self._datasets.list_auto_snapshot_sets(schedule):
dataset = zfs.ReadWritableDataset(name)
self._prune_snapshots(dataset, schedule)
- except RuntimeError,message:
+ except RuntimeError as message:
self.logger.error("Error listing datasets during " + \
"removal of expired snapshots")
self.exitCode = smf.SMF_EXIT_ERR_FATAL
# Propogate up to thread's run() method
- raise RuntimeError,message
+ raise RuntimeError(message)
def _needs_cleanup(self):
if self._remedialCleanup == False:
# Sys admin has explicitly instructed for remedial cleanups
# not to be performed.
return False
- now = long(time.time())
+ now = int(time.time())
# Don't run checks any less than 15 minutes apart.
if self._cleanupLock.acquire(False) == False:
#Indicates that a cleanup is already running.
self.verbose)
self._cleanupLock.release()
return True
- except RuntimeError, message:
+ except RuntimeError as message:
self.logger.error("Error checking zpool capacity of: " + \
zpool.name)
self._cleanupLock.release()
self.exitCode = smf.SMF_EXIT_ERR_FATAL
# Propogate up to thread's run() mehod.
- raise RuntimeError,message
- self._lastCleanupCheck = long(time.time())
+ raise RuntimeError(message)
+ self._lastCleanupCheck = int(time.time())
self._cleanupLock.release()
return False
self._poolstatus[zpool.name] = 4
# This also catches exceptions thrown from _run_<level>_cleanup()
# and _run_cleanup() in methods called by _perform_cleanup()
- except RuntimeError,message:
+ except RuntimeError as message:
self.logger.error("Remedial space cleanup failed because " + \
"of failure to determinecapacity of: " + \
zpool.name)
self.exitCode = smf.SMF_EXIT_ERR_FATAL
self._cleanupLock.release()
# Propogate up to thread's run() method.
- raise RuntimeError,message
+ raise RuntimeError(message)
- # Bad - there's no more snapshots left and nothing
+ # Bad - there's no more snapshots left and nothing
# left to delete. We don't disable the service since
# it will permit self recovery and snapshot
# retention when space becomes available on
snapshots = []
try:
clonedsnaps = self._datasets.list_cloned_snapshots()
- except RuntimeError,message:
+ except RuntimeError as message:
self.logger.error("Error (non-fatal) listing cloned snapshots" +
" while recovering pool capacity")
self.logger.error("Error details:\n" + \
"--------BEGIN ERROR MESSAGE--------\n" + \
str(message) + \
- "\n--------END ERROR MESSAGE--------")
+ "\n--------END ERROR MESSAGE--------")
# Build a list of snapshots in the given schedule, that are not
# cloned, and sort the result in reverse chronological order.
% (self._prefix,schedule)) \
if not s in clonedsnaps]
snapshots.reverse()
- except RuntimeError,message:
+ except RuntimeError as message:
self.logger.error("Error listing snapshots" +
" while recovering pool capacity")
self.exitCode = smf.SMF_EXIT_ERR_FATAL
# Propogate the error up to the thread's run() method.
- raise RuntimeError,message
-
+ raise RuntimeError(message)
+
while zpool.get_capacity() > threshold:
if len(snapshots) == 0:
self.logger.info( \
% schedule)
return
- """This is not an exact science. Deleteing a zero sized
+ """This is not an exact science. Deleteing a zero sized
snapshot can have unpredictable results. For example a
pair of snapshots may share exclusive reference to a large
amount of data (eg. a large core file). The usage of both
util.debug("Destroying %s" % snapname, self.verbose)
try:
snapshot.destroy()
- except RuntimeError,message:
+ except RuntimeError as message:
# Would be nice to be able to mark service as degraded here
# but it's better to try to continue on rather than to give
# up alltogether (SMF maintenance state)
self._destroyedsnaps.append(snapname)
# Give zfs some time to recalculate.
time.sleep(3)
-
+
def _send_to_syslog(self):
for zpool in self._zpools:
status = self._poolstatus[zpool.name]
self.logger.critical( \
"%s exceeded %d%% capacity. " \
"Weekly, hourly and daily automatic snapshots were destroyed" \
- % (zpool.name, self._criticalLevel))
+ % (zpool.name, self._criticalLevel))
elif status == 1:
self.logger.warning( \
"%s exceeded %d%% capacity. " \
signal.signal(signal.SIGALRM, child_sig_handler)
try:
pid = os.fork()
- except OSError, e:
- raise Exception, "%s [%d]" % (e.strerror, e.errno)
+ except OSError as e:
+ raise Exception("%s [%d]" % (e.strerror, e.errno))
if (pid == 0):
#Reset signals that we set to trap in parent
import subprocess
import threading
-import smf
-import util
+from . import smf
+from . import util
#SMF EXIT CODES
SMF_EXIT_OK = 0
if __name__ == "__main__":
S = TimeSliderSMF('svc:/application/time-slider')
- print S
+ print(S)
if '.time-slider' in dirs:
# dirs.remove('.time-slider')
backupDirs.append(os.path.join(root, ".time-slider/rsync"))
- print "root %s" % root
+ print("root %s" % root)
s1 = root.split ("/ts-test/TIMESLIDER/nanmbp/", 1)
- print s1
+ print(s1)
for dirName in backupDirs:
- print "dirName %s " % dirName
+ print("dirName %s " % dirName)
s1 = dirName.split ("/ts-test/TIMESLIDER/nanmbp/",1)
s2 = s1[1].split ("/.time-slider/rsync",1)
- print s2[0]
+ print(s2[0])
os.chdir(dirName)
dirList = ["toto %s" % d for d in os.listdir(dirName) \
if os.path.isdir(d) and
not os.path.islink(d)]
- print dirList
+ print(dirList)
gtk.glade.bindtextdomain(GETTEXT_DOMAIN, LOCALE_PATH)
gtk.glade.textdomain(GETTEXT_DOMAIN)
-import zfs
-from rbac import RBACprofile
+from . import zfs
+from .rbac import RBACprofile
class RsyncBackup:
self.creationtime = creationtime
try:
tm = time.localtime(self.creationtime)
- self.creationtime_str = unicode(time.strftime ("%c", tm),
+ self.creationtime_str = str(time.strftime ("%c", tm),
locale.getpreferredencoding()).encode('utf-8')
except:
self.creationtime_str = time.ctime(self.creationtime)
rsyncsmf.RSYNCLOCKSUFFIX)
if not os.path.exists(lockFileDir):
- os.makedirs(lockFileDir, 0755)
+ os.makedirs(lockFileDir, 0o755)
lockFile = os.path.join(lockFileDir, self.snaplabel + ".lock")
try:
lockFp = open(lockFile, 'w')
fcntl.flock(lockFp, fcntl.LOCK_EX | fcntl.LOCK_NB)
except IOError:
- raise RuntimeError, \
- "couldn't delete %s, already used by another process" % self.mountpoint
+ raise RuntimeError("couldn't delete %s, already used by another process" % self.mountpoint)
return
trashDir = os.path.join(self.rsync_dir,
self.fsname,
rsyncsmf.RSYNCTRASHSUFFIX)
if not os.path.exists(trashDir):
- os.makedirs(trashDir, 0755)
+ os.makedirs(trashDir, 0o755)
backupTrashDir = os.path.join (self.rsync_dir,
self.fsname,
insort(backupDirs, os.path.abspath(backupDir))
-print backupDirs
+print(backupDirs)
import subprocess
import sys
import syslog
-import statvfs
import math
import gio
import logging
Returns a tuple of standard out and stander error.
Throws a RunTimeError if the command failed to execute or
if the command returns a non-zero exit status.
+
+ Assume the output is UTF-8 encoded
"""
debug("Trying to run command %s" % (command), True)
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True)
- outdata,errdata = p.communicate()
+ outdata,errdata = (x.decode('utf-8') for x in p.communicate())
err = p.wait()
- except OSError, message:
- raise RuntimeError, "%s subprocess error:\n %s" % \
- (command, str(message))
+ except OSError as message:
+ raise RuntimeError("%s subprocess error:\n %s" % \
+ (command, str(message)))
if err != 0 and raise_on_try:
- raise RuntimeError, '%s failed with exit code %d\n%s' % \
- (str(command), err, errdata)
+ raise RuntimeError('%s failed with exit code %d\n%s' % \
+ (str(command), err, errdata))
return outdata,errdata
def debug(message, verbose):
raise ValueError("%s is a non-existent path" % path)
f = os.statvfs(path)
- unavailBlocks = f[statvfs.F_BLOCKS] - f[statvfs.F_BAVAIL]
- capacity = int(math.ceil(100 * (unavailBlocks / float(f[statvfs.F_BLOCKS]))))
+ unavailBlocks = f.f_blocks - f.f_bavail
+ capacity = int(math.ceil(100 * (unavailBlocks / float(f.f_blocks))))
return capacity
if not os.path.exists(path):
raise ValueError("%s is a non-existent path" % path)
f = os.statvfs(path)
- free = long(f[statvfs.F_BAVAIL] * f[statvfs.F_FRSIZE])
-
+ free = int(f.f_bavail * f.f_frsize)
+
return free
def get_used_size(path):
raise ValueError("%s is a non-existent path" % path)
f = os.statvfs(path)
- unavailBlocks = f[statvfs.F_BLOCKS] - f[statvfs.F_BAVAIL]
- used = long(unavailBlocks * f[statvfs.F_FRSIZE])
+ unavailBlocks = f.f_blocks - f.f_bavail
+ used = int(unavailBlocks * f.f_frsize)
return used
if not os.path.exists(path):
raise ValueError("%s is a non-existent path" % path)
f = os.statvfs(path)
- total = long(f[statvfs.F_BLOCKS] * f[statvfs.F_FRSIZE])
+ total = int(f.f_blocks * f.f_frsize)
return total
import threading
from bisect import insort, bisect_left, bisect_right
-import util
+from . import util
BYTESPERMB = 1048576
class Datasets(Exception):
"""
Container class for all zfs datasets. Maintains a centralised
- list of datasets (generated on demand) and accessor methods.
+ list of datasets (generated on demand) and accessor methods.
Also allows clients to notify when a refresh might be necessary.
"""
# Class wide instead of per-instance in order to avoid duplication
filesystems = None
volumes = None
snapshots = None
-
+
# Mutex locks to prevent concurrent writes to above class wide
# dataset lists.
_filesystemslock = threading.Lock()
"""
Create a complete set of snapshots as if this were
for a standard zfs-auto-snapshot operation.
-
+
Keyword arguments:
label:
A label to apply to the snapshot name. Cannot be None.
A string indicating one of the standard auto-snapshot schedules
tags to check (eg. "frequent" for will map to the tag:
com.sun:auto-snapshot:frequent). If specified as a zfs property
- on a zfs dataset, the property corresponding to the tag will
+ on a zfs dataset, the property corresponding to the tag will
override the wildcard property: "com.sun:auto-snapshot"
Default value = None
"""
outdata,errdata = util.run_command(scmd)
for line in outdata.rstrip().split('\n'):
line = line.split()
- # Skip over unset values.
+ # Skip over unset values.
if line[1] == "-":
continue
# Add to everything list. This is used later
line = line.split()
idx = bisect_right(everything, line[0])
if len(everything) == 0 or \
- everything[idx-1] != line[0]:
+ everything[idx-1] != line[0]:
# Dataset is neither included nor excluded so far
if line[1] == "-":
continue
# Now figure out what can be recursively snapshotted and what
# must be singly snapshotted. Single snapshot restrictions apply
# to those datasets who have a child in the excluded list.
- # 'included' is sorted in reverse alphabetical order.
+ # 'included' is sorted in reverse alphabetical order.
for datasetname in included:
excludedchild = False
idx = bisect_right(everything, datasetname)
A string indicating one of the standard auto-snapshot schedules
tags to check (eg. "frequent" will map to the tag:
com.sun:auto-snapshot:frequent). If specified as a zfs property
- on a zfs dataset, the property corresponding to the tag will
+ on a zfs dataset, the property corresponding to the tag will
override the wildcard property: "com.sun:auto-snapshot"
Default value = None
"""
def list_filesystems(self, pattern = None):
"""
List pattern matching filesystems sorted by name.
-
+
Keyword arguments:
pattern -- Filter according to pattern (default None)
"""
"-o", "name,mountpoint", "-s", "name"]
try:
outdata,errdata = util.run_command(cmd, True)
- except OSError, message:
- raise RuntimeError, "%s subprocess error:\n %s" % \
- (cmd, str(message))
+ except OSError as message:
+ raise RuntimeError("%s subprocess error:\n %s" % \
+ (cmd, str(message)))
if err != 0:
Datasets._filesystemslock.release()
- raise RuntimeError, '%s failed with exit code %d\n%s' % \
- (str(cmd), err, errdata)
+ raise RuntimeError('%s failed with exit code %d\n%s' % \
+ (str(cmd), err, errdata))
for line in outdata.rstrip().split('\n'):
line = line.rstrip().split()
Datasets.filesystems.append([line[0], line[1]])
def list_volumes(self, pattern = None):
"""
List pattern matching volumes sorted by name.
-
+
Keyword arguments:
pattern -- Filter according to pattern (default None)
"""
"-o", "name", "-s", "name"]
try:
outdata,errdata = util.run_command(cmd, True)
- except RuntimeError, message:
+ except RuntimeError as message:
Datasets._volumeslock.release()
- raise RuntimeError, str(message)
+ raise RuntimeError(str(message))
for line in outdata.rstrip().split('\n'):
Datasets.volumes.append(line.rstrip())
"""
List pattern matching snapshots sorted by creation date.
Oldest listed first
-
+
Keyword arguments:
pattern -- Filter according to pattern (default None)
"""
cmd = [ZFSCMD, "get", "-H", "-p", "-o", "value,name", "creation"]
try:
outdata,errdata = util.run_command(cmd, True)
- except RuntimeError, message:
+ except RuntimeError as message:
Datasets.snapshotslock.release()
- raise RuntimeError, str(message)
+ raise RuntimeError(str(message))
for dataset in outdata.rstrip().split('\n'):
if re.search("@", dataset):
insort(snaps, dataset.split())
for snap in snaps:
- Datasets.snapshots.append([snap[1], long(snap[0])])
+ Datasets.snapshots.append([snap[1], int(snap[0])])
if pattern == None:
snapshots = Datasets.snapshots[:]
else:
"""
# FIXME in future.
# This is a little sub-optimal because we should be able to modify
- # the snapshot list in place in some situations and regenerate the
+ # the snapshot list in place in some situations and regenerate the
# snapshot list without calling out to zfs(1m). But on the
# pro side, we will pick up any new snapshots since the last
# scan that we would be otherwise unaware of.
outdata,errdata = util.run_command(cmd)
_used,_available = outdata.rstrip().split('\n')
used = float(_used)
- available = float(_available)
+ available = float(_available)
return 100.0 * used/(used + available)
def get_available_size(self):
A string indicating one of the standard auto-snapshot schedules
tags to check (eg. "frequent" will map to the tag:
com.sun:auto-snapshot:frequent). If specified as a zfs property
- on a zfs dataset, the property corresponding to the tag will
+ on a zfs dataset, the property corresponding to the tag will
override the wildcard property: "com.sun:auto-snapshot"
Default value = None
"""
"""
List pattern matching snapshots sorted by creation date.
Oldest listed first
-
+
Keyword arguments:
- pattern -- Filter according to pattern (default None)
+ pattern -- Filter according to pattern (default None)
"""
# If there isn't a list of snapshots for this dataset
# already, create it now and store it in order to save
cmd = [ZFSCMD, "get", "-H", "-p", "-o", "value", "creation",
self.name]
outdata,errdata = util.run_command(cmd)
- self.__creationTime = long(outdata.rstrip())
+ self.__creationTime = int(outdata.rstrip())
return self.__creationTime
def exists(self):
Returns True if the dataset is still existent on the system.
False otherwise
"""
- # Test existance of the dataset by checking the output of a
+ # Test existance of the dataset by checking the output of a
# simple zfs get command on the snapshot
cmd = [ZFSCMD, "get", "-H", "-o", "name", "type", self.name]
try:
outdata,errdata = util.run_command(cmd)
- except RuntimeError, message:
- raise RuntimeError, str(message)
+ except RuntimeError as message:
+ raise RuntimeError(str(message))
result = outdata.rstrip()
if result == self.name:
def get_used_size(self):
cmd = [ZFSCMD, "get", "-H", "-p", "-o", "value", "used", self.name]
outdata,errdata = util.run_command(cmd)
- return long(outdata.rstrip())
+ return int(outdata.rstrip())
def get_user_property(self, prop, local=False):
if local == True:
def set_user_property(self, prop, value):
cmd = [ZFSCMD, "set", "%s=%s" % (prop, value), self.name]
outdata,errdata = util.run_command(cmd)
-
+
def unset_user_property(self, prop):
cmd = [ZFSCMD, "inherit", prop, self.name]
outdata,errdata = util.run_command(cmd)
"""
ZFS Snapshot object class.
Provides information and operations specfic to ZFS snapshots
- """
+ """
def __init__(self, name, creation = None):
"""
Keyword arguments:
def __split_snapshot_name(self):
name = self.name.split("@", 1)
# Make sure this is really a snapshot and not a
- # filesystem otherwise a filesystem could get
+ # filesystem otherwise a filesystem could get
# destroyed instead of a snapshot. That would be
# really really bad.
if name[0] == self.name:
"-o", "value", "referenced", \
self.name]
outdata,errdata = util.run_command(cmd)
- return long(outdata.rstrip())
+ return int(outdata.rstrip())
def list_children(self):
"""Returns a recursive list of child snapshots of this snapshot"""
cmd = [ZFSCMD, "get", "-H", "-p", "-o", "value", "available", \
self.name]
outdata,errdata = util.run_command(cmd)
- return long(outdata.rstrip())
+ return int(outdata.rstrip())
def create_snapshot(self, snaplabel, recursive = False):
"""
cmd.append("-r")
cmd.append("%s@%s" % (self.name, snaplabel))
outdata,errdata = util.run_command(cmd, False)
- if errdata:
- print errdata
+ if errdata:
+ print(errdata)
self.datasets.refresh_snapshots()
def list_children(self):
-
+
# Note, if more dataset types ever come around they will
# need to be added to the filsystem,volume args below.
# Not for the forseeable future though.
"""
List pattern matching snapshots sorted by creation date.
Oldest listed first
-
+
Keyword arguments:
- pattern -- Filter according to pattern (default None)
+ pattern -- Filter according to pattern (default None)
"""
# If there isn't a list of snapshots for this dataset
# already, create it now and store it in order to save
if __name__ == "__main__":
for zpool in list_zpools():
pool = ZPool(zpool)
- print pool
+ print(pool)
for filesys,mountpoint in pool.list_filesystems():
fs = Filesystem(filesys, mountpoint)
- print fs
- print "\tSnapshots:"
+ print(fs)
+ print("\tSnapshots:")
for snapshot, snaptime in fs.list_snapshots():
snap = Snapshot(snapshot, snaptime)
- print "\t\t" + snap.name
+ print("\t\t" + snap.name)
for volname in pool.list_volumes():
vol = Volume(volname)
- print vol
- print "\tSnapshots:"
+ print(vol)
+ print("\tSnapshots:")
for snapshot, snaptime in vol.list_snapshots():
snap = Snapshot(snapshot, snaptime)
- print "\t\t" + snap.name
+ print("\t\t" + snap.name)