Blob Blame History Raw
From e60206af7781c86ddb5d2ef1fcac3f8f8b086ee4 Mon Sep 17 00:00:00 2001
From: Federico Simoncelli <fsimonce@redhat.com>
Date: Fri, 14 Dec 2012 06:42:09 -0500
Subject: [PATCH 20/22] misc: rename safelease to clusterlock

The safelease module is now contaning also the sanlock implementation
and soon it might contain other (e.g.: a special lock for local storage
domains), for this reason it has been renamed with a more general name
clusterlock. The safelease implementation also required some cleanup in
order to achieve more uniformity between the locking mechanisms.

Change-Id: I74070ebb43dd726362900a0746c08b2ee3d6eac7
Signed-off-by: Federico Simoncelli <fsimonce@redhat.com>
Reviewed-on: http://gerrit.ovirt.org/10067
Reviewed-by: Allon Mureinik <amureini@redhat.com>
Reviewed-by: Dan Kenigsberg <danken@redhat.com>
Reviewed-on: http://gerrit.ovirt.org/11463
---
 vdsm.spec.in                                    |   2 +-
 vdsm/API.py                                     |   4 +-
 vdsm/storage/Makefile.am                        |   4 +-
 vdsm/storage/blockSD.py                         |   4 +-
 vdsm/storage/clusterlock.py                     | 251 ++++++++++++++++++++++++
 vdsm/storage/hsm.py                             |  20 +-
 vdsm/storage/imageRepository/formatConverter.py |   6 +-
 vdsm/storage/safelease.py                       | 250 -----------------------
 vdsm/storage/sd.py                              |  12 +-
 vdsm/storage/sp.py                              |  25 ++-
 10 files changed, 289 insertions(+), 289 deletions(-)
 create mode 100644 vdsm/storage/clusterlock.py
 delete mode 100644 vdsm/storage/safelease.py

diff --git a/vdsm.spec.in b/vdsm.spec.in
index dfc2459..8ad4dce 100644
--- a/vdsm.spec.in
+++ b/vdsm.spec.in
@@ -685,7 +685,7 @@ exit 0
 %{_datadir}/%{vdsm_name}/storage/resourceFactories.py*
 %{_datadir}/%{vdsm_name}/storage/remoteFileHandler.py*
 %{_datadir}/%{vdsm_name}/storage/resourceManager.py*
-%{_datadir}/%{vdsm_name}/storage/safelease.py*
+%{_datadir}/%{vdsm_name}/storage/clusterlock.py*
 %{_datadir}/%{vdsm_name}/storage/sdc.py*
 %{_datadir}/%{vdsm_name}/storage/sd.py*
 %{_datadir}/%{vdsm_name}/storage/securable.py*
diff --git a/vdsm/API.py b/vdsm/API.py
index 732f8a3..a050a51 100644
--- a/vdsm/API.py
+++ b/vdsm/API.py
@@ -33,7 +33,7 @@ import configNetwork
 from vdsm import netinfo
 from vdsm import constants
 import storage.misc
-import storage.safelease
+import storage.clusterlock
 import storage.volume
 import storage.sd
 import storage.image
@@ -992,7 +992,7 @@ class StoragePool(APIBase):
     def spmStart(self, prevID, prevLver, enableScsiFencing,
                  maxHostID=None, domVersion=None):
         if maxHostID is None:
-            maxHostID = storage.safelease.MAX_HOST_ID
+            maxHostID = storage.clusterlock.MAX_HOST_ID
         recoveryMode = None   # unused
         return self._irs.spmStart(self._UUID, prevID, prevLver,
                 recoveryMode, enableScsiFencing, maxHostID, domVersion)
diff --git a/vdsm/storage/Makefile.am b/vdsm/storage/Makefile.am
index cff09be..abc1545 100644
--- a/vdsm/storage/Makefile.am
+++ b/vdsm/storage/Makefile.am
@@ -25,6 +25,7 @@ dist_vdsmstorage_PYTHON = \
 	__init__.py \
 	blockSD.py \
 	blockVolume.py \
+	clusterlock.py \
 	devicemapper.py \
 	dispatcher.py \
 	domainMonitor.py \
@@ -35,8 +36,8 @@ dist_vdsmstorage_PYTHON = \
 	hba.py \
 	hsm.py \
 	image.py \
+	iscsiadm.py \
 	iscsi.py \
-        iscsiadm.py \
 	localFsSD.py \
 	lvm.py \
 	misc.py \
@@ -48,7 +49,6 @@ dist_vdsmstorage_PYTHON = \
 	remoteFileHandler.py \
 	resourceFactories.py \
 	resourceManager.py \
-	safelease.py \
 	sdc.py \
 	sd.py \
 	securable.py \
diff --git a/vdsm/storage/blockSD.py b/vdsm/storage/blockSD.py
index 61ec996..862e413 100644
--- a/vdsm/storage/blockSD.py
+++ b/vdsm/storage/blockSD.py
@@ -37,7 +37,7 @@ import misc
 import fileUtils
 import sd
 import lvm
-import safelease
+import clusterlock
 import blockVolume
 import multipath
 import resourceFactories
@@ -63,7 +63,7 @@ log = logging.getLogger("Storage.BlockSD")
 
 # FIXME: Make this calculated from something logical
 RESERVED_METADATA_SIZE = 40 * (2 ** 20)
-RESERVED_MAILBOX_SIZE = MAILBOX_SIZE * safelease.MAX_HOST_ID
+RESERVED_MAILBOX_SIZE = MAILBOX_SIZE * clusterlock.MAX_HOST_ID
 METADATA_BASE_SIZE = 378
 # VG's min metadata threshold is 20%
 VG_MDA_MIN_THRESHOLD = 0.2
diff --git a/vdsm/storage/clusterlock.py b/vdsm/storage/clusterlock.py
new file mode 100644
index 0000000..4525b2f
--- /dev/null
+++ b/vdsm/storage/clusterlock.py
@@ -0,0 +1,251 @@
+#
+# Copyright 2011 Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301 USA
+#
+# Refer to the README and COPYING files for full details of the license
+#
+
+import os
+import threading
+import logging
+import subprocess
+from contextlib import nested
+import sanlock
+
+import misc
+import storage_exception as se
+from vdsm import constants
+from vdsm.config import config
+
+
+MAX_HOST_ID = 250
+
+# The LEASE_OFFSET is used by SANLock to not overlap with safelease in
+# orfer to preserve the ability to acquire both locks (e.g.: during the
+# domain upgrade)
+SDM_LEASE_NAME = 'SDM'
+SDM_LEASE_OFFSET = 512 * 2048
+
+
+class SafeLease(object):
+    log = logging.getLogger("SafeLease")
+
+    lockUtilPath = config.get('irs', 'lock_util_path')
+    lockCmd = config.get('irs', 'lock_cmd')
+    freeLockCmd = config.get('irs', 'free_lock_cmd')
+
+    def __init__(self, sdUUID, idsPath, leasesPath, lockRenewalIntervalSec,
+                 leaseTimeSec, leaseFailRetry, ioOpTimeoutSec):
+        self._lock = threading.Lock()
+        self._sdUUID = sdUUID
+        self._idsPath = idsPath
+        self._leasesPath = leasesPath
+        self.setParams(lockRenewalIntervalSec, leaseTimeSec, leaseFailRetry,
+                       ioOpTimeoutSec)
+
+    def initLock(self):
+        lockUtil = os.path.join(self.lockUtilPath, "safelease")
+        initCommand = [lockUtil, "release", "-f", self._leasesPath, "0"]
+        rc, out, err = misc.execCmd(initCommand, sudo=False,
+                cwd=self.lockUtilPath)
+        if rc != 0:
+            self.log.warn("could not initialise spm lease (%s): %s", rc, out)
+            raise se.ClusterLockInitError()
+
+    def setParams(self, lockRenewalIntervalSec, leaseTimeSec, leaseFailRetry,
+                  ioOpTimeoutSec):
+        self._lockRenewalIntervalSec = lockRenewalIntervalSec
+        self._leaseTimeSec = leaseTimeSec
+        self._leaseFailRetry = leaseFailRetry
+        self._ioOpTimeoutSec = ioOpTimeoutSec
+
+    def getReservedId(self):
+        return 1000
+
+    def acquireHostId(self, hostId, async):
+        self.log.debug("Host id for domain %s successfully acquired (id: %s)",
+                       self._sdUUID, hostId)
+
+    def releaseHostId(self, hostId, async, unused):
+        self.log.debug("Host id for domain %s released successfully (id: %s)",
+                       self._sdUUID, hostId)
+
+    def hasHostId(self, hostId):
+        return True
+
+    def acquire(self, hostID):
+        leaseTimeMs = self._leaseTimeSec * 1000
+        ioOpTimeoutMs = self._ioOpTimeoutSec * 1000
+        with self._lock:
+            self.log.debug("Acquiring cluster lock for domain %s" %
+                    self._sdUUID)
+
+            lockUtil = self.getLockUtilFullPath()
+            acquireLockCommand = subprocess.list2cmdline([
+                lockUtil, "start", self._sdUUID, str(hostID),
+                str(self._lockRenewalIntervalSec), str(self._leasesPath),
+                str(leaseTimeMs), str(ioOpTimeoutMs), str(self._leaseFailRetry)
+            ])
+
+            cmd = [constants.EXT_SETSID, constants.EXT_IONICE, '-c1', '-n0',
+                constants.EXT_SU, misc.IOUSER, '-s', constants.EXT_SH, '-c',
+                acquireLockCommand]
+            (rc, out, err) = misc.execCmd(cmd, cwd=self.lockUtilPath,
+                    sudo=True)
+            if rc != 0:
+                raise se.AcquireLockFailure(self._sdUUID, rc, out, err)
+            self.log.debug("Clustered lock acquired successfully")
+
+    def getLockUtilFullPath(self):
+        return os.path.join(self.lockUtilPath, self.lockCmd)
+
+    def release(self):
+        with self._lock:
+            freeLockUtil = os.path.join(self.lockUtilPath, self.freeLockCmd)
+            releaseLockCommand = [freeLockUtil, self._sdUUID]
+            self.log.info("Releasing cluster lock for domain %s" %
+                    self._sdUUID)
+            (rc, out, err) = misc.execCmd(releaseLockCommand, sudo=False,
+                    cwd=self.lockUtilPath)
+            if rc != 0:
+                self.log.error("Could not release cluster lock "
+                        "rc=%s out=%s, err=%s" % (str(rc), out, err))
+
+            self.log.debug("Cluster lock released successfully")
+
+
+class SANLock(object):
+    log = logging.getLogger("SANLock")
+
+    _sanlock_fd = None
+    _sanlock_lock = threading.Lock()
+
+    def __init__(self, sdUUID, idsPath, leasesPath, *args):
+        self._lock = threading.Lock()
+        self._sdUUID = sdUUID
+        self._idsPath = idsPath
+        self._leasesPath = leasesPath
+        self._sanlockfd = None
+
+    def initLock(self):
+        try:
+            sanlock.init_lockspace(self._sdUUID, self._idsPath)
+            sanlock.init_resource(self._sdUUID, SDM_LEASE_NAME,
+                                  [(self._leasesPath, SDM_LEASE_OFFSET)])
+        except sanlock.SanlockException:
+            self.log.warn("Cannot initialize clusterlock", exc_info=True)
+            raise se.ClusterLockInitError()
+
+    def setParams(self, *args):
+        pass
+
+    def getReservedId(self):
+        return MAX_HOST_ID
+
+    def acquireHostId(self, hostId, async):
+        with self._lock:
+            self.log.info("Acquiring host id for domain %s (id: %s)",
+                          self._sdUUID, hostId)
+
+            try:
+                sanlock.add_lockspace(self._sdUUID, hostId, self._idsPath,
+                                      async=async)
+            except sanlock.SanlockException, e:
+                if e.errno == os.errno.EINPROGRESS:
+                    # if the request is not asynchronous wait for the ongoing
+                    # lockspace operation to complete
+                    if not async and not sanlock.inq_lockspace(
+                            self._sdUUID, hostId, self._idsPath, wait=True):
+                        raise se.AcquireHostIdFailure(self._sdUUID, e)
+                    # else silently continue, the host id has been acquired
+                    # or it's in the process of being acquired (async)
+                elif e.errno != os.errno.EEXIST:
+                    raise se.AcquireHostIdFailure(self._sdUUID, e)
+
+            self.log.debug("Host id for domain %s successfully acquired "
+                           "(id: %s)", self._sdUUID, hostId)
+
+    def releaseHostId(self, hostId, async, unused):
+        with self._lock:
+            self.log.info("Releasing host id for domain %s (id: %s)",
+                          self._sdUUID, hostId)
+
+            try:
+                sanlock.rem_lockspace(self._sdUUID, hostId, self._idsPath,
+                                      async=async, unused=unused)
+            except sanlock.SanlockException, e:
+                if e.errno != os.errno.ENOENT:
+                    raise se.ReleaseHostIdFailure(self._sdUUID, e)
+
+            self.log.debug("Host id for domain %s released successfully "
+                           "(id: %s)", self._sdUUID, hostId)
+
+    def hasHostId(self, hostId):
+        with self._lock:
+            try:
+                return sanlock.inq_lockspace(self._sdUUID,
+                                             hostId, self._idsPath)
+            except sanlock.SanlockException:
+                self.log.debug("Unable to inquire sanlock lockspace "
+                               "status, returning False", exc_info=True)
+                return False
+
+    # The hostId parameter is maintained here only for compatibility with
+    # ClusterLock. We could consider to remove it in the future but keeping it
+    # for logging purpose is desirable.
+    def acquire(self, hostId):
+        with nested(self._lock, SANLock._sanlock_lock):
+            self.log.info("Acquiring cluster lock for domain %s (id: %s)",
+                          self._sdUUID, hostId)
+
+            while True:
+                if SANLock._sanlock_fd is None:
+                    try:
+                        SANLock._sanlock_fd = sanlock.register()
+                    except sanlock.SanlockException, e:
+                        raise se.AcquireLockFailure(self._sdUUID, e.errno,
+                                        "Cannot register to sanlock", str(e))
+
+                try:
+                    sanlock.acquire(self._sdUUID, SDM_LEASE_NAME,
+                                    [(self._leasesPath, SDM_LEASE_OFFSET)],
+                                    slkfd=SANLock._sanlock_fd)
+                except sanlock.SanlockException, e:
+                    if e.errno != os.errno.EPIPE:
+                        raise se.AcquireLockFailure(self._sdUUID, e.errno,
+                                        "Cannot acquire cluster lock", str(e))
+                    SANLock._sanlock_fd = None
+                    continue
+
+                break
+
+            self.log.debug("Cluster lock for domain %s successfully acquired "
+                           "(id: %s)", self._sdUUID, hostId)
+
+    def release(self):
+        with self._lock:
+            self.log.info("Releasing cluster lock for domain %s", self._sdUUID)
+
+            try:
+                sanlock.release(self._sdUUID, SDM_LEASE_NAME,
+                                [(self._leasesPath, SDM_LEASE_OFFSET)],
+                                slkfd=SANLock._sanlock_fd)
+            except sanlock.SanlockException, e:
+                raise se.ReleaseLockFailure(self._sdUUID, e)
+
+            self._sanlockfd = None
+            self.log.debug("Cluster lock for domain %s successfully released",
+                           self._sdUUID)
diff --git a/vdsm/storage/hsm.py b/vdsm/storage/hsm.py
index 62e9f74..8bbe3b8 100644
--- a/vdsm/storage/hsm.py
+++ b/vdsm/storage/hsm.py
@@ -53,7 +53,7 @@ import iscsi
 import misc
 from misc import deprecated
 import taskManager
-import safelease
+import clusterlock
 import storage_exception as se
 from threadLocal import vars
 from vdsm import constants
@@ -528,7 +528,7 @@ class HSM:
 
     @public
     def spmStart(self, spUUID, prevID, prevLVER, recoveryMode, scsiFencing,
-                 maxHostID=safelease.MAX_HOST_ID, domVersion=None,
+                 maxHostID=clusterlock.MAX_HOST_ID, domVersion=None,
                  options=None):
         """
         Starts an SPM.
@@ -845,7 +845,7 @@ class HSM:
         :raises: an :exc:`Storage_Exception.InvalidParameterException` if the
                  master domain is not supplied in the domain list.
         """
-        safeLease = sd.packLeaseParams(
+        leaseParams = sd.packLeaseParams(
             lockRenewalIntervalSec=lockRenewalIntervalSec,
             leaseTimeSec=leaseTimeSec,
             ioOpTimeoutSec=ioOpTimeoutSec,
@@ -853,9 +853,9 @@ class HSM:
         vars.task.setDefaultException(
             se.StoragePoolCreationError(
                 "spUUID=%s, poolName=%s, masterDom=%s, domList=%s, "
-                "masterVersion=%s, safelease params: (%s)" %
+                "masterVersion=%s, clusterlock params: (%s)" %
                 (spUUID, poolName, masterDom, domList, masterVersion,
-                 safeLease)))
+                 leaseParams)))
         misc.validateUUID(spUUID, 'spUUID')
         if masterDom not in domList:
             raise se.InvalidParameterException("masterDom", str(masterDom))
@@ -892,7 +892,7 @@ class HSM:
 
         return sp.StoragePool(
             spUUID, self.taskMng).create(poolName, masterDom, domList,
-                                         masterVersion, safeLease)
+                                         masterVersion, leaseParams)
 
     @public
     def connectStoragePool(self, spUUID, hostID, scsiKey,
@@ -1701,7 +1701,7 @@ class HSM:
         :returns: Nothing ? pool.reconstructMaster return nothing
         :rtype: ?
         """
-        safeLease = sd.packLeaseParams(
+        leaseParams = sd.packLeaseParams(
             lockRenewalIntervalSec=lockRenewalIntervalSec,
             leaseTimeSec=leaseTimeSec,
             ioOpTimeoutSec=ioOpTimeoutSec,
@@ -1710,9 +1710,9 @@ class HSM:
 
         vars.task.setDefaultException(
             se.ReconstructMasterError(
-                "spUUID=%s, masterDom=%s, masterVersion=%s, safelease "
+                "spUUID=%s, masterDom=%s, masterVersion=%s, clusterlock "
                 "params: (%s)" % (spUUID, masterDom, masterVersion,
-                                  safeLease)))
+                                  leaseParams)))
 
         self.log.info("spUUID=%s master=%s", spUUID, masterDom)
 
@@ -1738,7 +1738,7 @@ class HSM:
                 domDict[d] = sd.validateSDDeprecatedStatus(status)
 
         return pool.reconstructMaster(hostId, poolName, masterDom, domDict,
-                                      masterVersion, safeLease)
+                                      masterVersion, leaseParams)
 
     def _logResp_getDeviceList(self, response):
         logableDevs = deepcopy(response)
diff --git a/vdsm/storage/imageRepository/formatConverter.py b/vdsm/storage/imageRepository/formatConverter.py
index 88b053d..0742560 100644
--- a/vdsm/storage/imageRepository/formatConverter.py
+++ b/vdsm/storage/imageRepository/formatConverter.py
@@ -26,7 +26,7 @@ from vdsm import qemuImg
 from storage import sd
 from storage import blockSD
 from storage import image
-from storage import safelease
+from storage import clusterlock
 from storage import volume
 from storage import blockVolume
 from storage import storage_exception as se
@@ -115,8 +115,8 @@ def v3DomainConverter(repoPath, hostId, domain, isMsd):
         domain.setMetadataPermissions()
 
     log.debug("Initializing the new cluster lock for domain %s", domain.sdUUID)
-    newClusterLock = safelease.SANLock(domain.sdUUID, domain.getIdsFilePath(),
-                                       domain.getLeasesFilePath())
+    newClusterLock = clusterlock.SANLock(
+        domain.sdUUID, domain.getIdsFilePath(), domain.getLeasesFilePath())
     newClusterLock.initLock()
 
     log.debug("Acquiring the host id %s for domain %s", hostId, domain.sdUUID)
diff --git a/vdsm/storage/safelease.py b/vdsm/storage/safelease.py
deleted file mode 100644
index 88a4eae..0000000
--- a/vdsm/storage/safelease.py
+++ /dev/null
@@ -1,250 +0,0 @@
-#
-# Copyright 2011 Red Hat, Inc.
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301 USA
-#
-# Refer to the README and COPYING files for full details of the license
-#
-
-import os
-from vdsm.config import config
-import misc
-import subprocess
-import sanlock
-from contextlib import nested
-from vdsm import constants
-import storage_exception as se
-import threading
-import logging
-
-
-MAX_HOST_ID = 250
-
-# The LEASE_OFFSET is used by SANLock to not overlap with safelease in
-# orfer to preserve the ability to acquire both locks (e.g.: during the
-# domain upgrade)
-SDM_LEASE_NAME = 'SDM'
-SDM_LEASE_OFFSET = 512 * 2048
-
-
-class ClusterLock(object):
-    log = logging.getLogger("ClusterLock")
-    lockUtilPath = config.get('irs', 'lock_util_path')
-    lockCmd = config.get('irs', 'lock_cmd')
-    freeLockCmd = config.get('irs', 'free_lock_cmd')
-
-    def __init__(self, sdUUID, idFile, leaseFile,
-            lockRenewalIntervalSec,
-            leaseTimeSec,
-            leaseFailRetry,
-            ioOpTimeoutSec):
-        self._lock = threading.RLock()
-        self._sdUUID = sdUUID
-        self._leaseFile = leaseFile
-        self.setParams(lockRenewalIntervalSec, leaseTimeSec,
-                       leaseFailRetry, ioOpTimeoutSec)
-
-    def initLock(self):
-        lockUtil = os.path.join(self.lockUtilPath, "safelease")
-        initCommand = [lockUtil, "release", "-f", self._leaseFile, "0"]
-        rc, out, err = misc.execCmd(initCommand, sudo=False,
-                cwd=self.lockUtilPath)
-        if rc != 0:
-            self.log.warn("could not initialise spm lease (%s): %s", rc, out)
-            raise se.ClusterLockInitError()
-
-    def setParams(self, lockRenewalIntervalSec,
-                    leaseTimeSec,
-                    leaseFailRetry,
-                    ioOpTimeoutSec):
-        self._lockRenewalIntervalSec = lockRenewalIntervalSec
-        self._leaseTimeSec = leaseTimeSec
-        self._leaseFailRetry = leaseFailRetry
-        self._ioOpTimeoutSec = ioOpTimeoutSec
-
-    def getReservedId(self):
-        return 1000
-
-    def acquireHostId(self, hostId, async):
-        pass
-
-    def releaseHostId(self, hostId, async, unused):
-        pass
-
-    def hasHostId(self, hostId):
-        return True
-
-    def acquire(self, hostID):
-        leaseTimeMs = self._leaseTimeSec * 1000
-        ioOpTimeoutMs = self._ioOpTimeoutSec * 1000
-        with self._lock:
-            self.log.debug("Acquiring cluster lock for domain %s" %
-                    self._sdUUID)
-
-            lockUtil = self.getLockUtilFullPath()
-            acquireLockCommand = subprocess.list2cmdline([lockUtil, "start",
-                self._sdUUID, str(hostID), str(self._lockRenewalIntervalSec),
-                str(self._leaseFile), str(leaseTimeMs), str(ioOpTimeoutMs),
-                str(self._leaseFailRetry)])
-
-            cmd = [constants.EXT_SETSID, constants.EXT_IONICE, '-c1', '-n0',
-                constants.EXT_SU, misc.IOUSER, '-s', constants.EXT_SH, '-c',
-                acquireLockCommand]
-            (rc, out, err) = misc.execCmd(cmd, cwd=self.lockUtilPath,
-                    sudo=True)
-            if rc != 0:
-                raise se.AcquireLockFailure(self._sdUUID, rc, out, err)
-            self.log.debug("Clustered lock acquired successfully")
-
-    def getLockUtilFullPath(self):
-        return os.path.join(self.lockUtilPath, self.lockCmd)
-
-    def release(self):
-        with self._lock:
-            freeLockUtil = os.path.join(self.lockUtilPath, self.freeLockCmd)
-            releaseLockCommand = [freeLockUtil, self._sdUUID]
-            self.log.info("Releasing cluster lock for domain %s" %
-                    self._sdUUID)
-            (rc, out, err) = misc.execCmd(releaseLockCommand, sudo=False,
-                    cwd=self.lockUtilPath)
-            if rc != 0:
-                self.log.error("Could not release cluster lock "
-                        "rc=%s out=%s, err=%s" % (str(rc), out, err))
-
-            self.log.debug("Cluster lock released successfully")
-
-
-class SANLock(object):
-    log = logging.getLogger("SANLock")
-
-    _sanlock_fd = None
-    _sanlock_lock = threading.Lock()
-
-    def __init__(self, sdUUID, idsPath, leasesPath, *args):
-        self._lock = threading.Lock()
-        self._sdUUID = sdUUID
-        self._idsPath = idsPath
-        self._leasesPath = leasesPath
-        self._sanlockfd = None
-
-    def initLock(self):
-        try:
-            sanlock.init_lockspace(self._sdUUID, self._idsPath)
-            sanlock.init_resource(self._sdUUID, SDM_LEASE_NAME,
-                                  [(self._leasesPath, SDM_LEASE_OFFSET)])
-        except sanlock.SanlockException:
-            self.log.warn("Cannot initialize clusterlock", exc_info=True)
-            raise se.ClusterLockInitError()
-
-    def setParams(self, *args):
-        pass
-
-    def getReservedId(self):
-        return MAX_HOST_ID
-
-    def acquireHostId(self, hostId, async):
-        with self._lock:
-            self.log.info("Acquiring host id for domain %s (id: %s)",
-                          self._sdUUID, hostId)
-
-            try:
-                sanlock.add_lockspace(self._sdUUID, hostId, self._idsPath,
-                                      async=async)
-            except sanlock.SanlockException, e:
-                if e.errno == os.errno.EINPROGRESS:
-                    # if the request is not asynchronous wait for the ongoing
-                    # lockspace operation to complete
-                    if not async and not sanlock.inq_lockspace(
-                            self._sdUUID, hostId, self._idsPath, wait=True):
-                        raise se.AcquireHostIdFailure(self._sdUUID, e)
-                    # else silently continue, the host id has been acquired
-                    # or it's in the process of being acquired (async)
-                elif e.errno != os.errno.EEXIST:
-                    raise se.AcquireHostIdFailure(self._sdUUID, e)
-
-            self.log.debug("Host id for domain %s successfully acquired "
-                           "(id: %s)", self._sdUUID, hostId)
-
-    def releaseHostId(self, hostId, async, unused):
-        with self._lock:
-            self.log.info("Releasing host id for domain %s (id: %s)",
-                          self._sdUUID, hostId)
-
-            try:
-                sanlock.rem_lockspace(self._sdUUID, hostId, self._idsPath,
-                                      async=async, unused=unused)
-            except sanlock.SanlockException, e:
-                if e.errno != os.errno.ENOENT:
-                    raise se.ReleaseHostIdFailure(self._sdUUID, e)
-
-            self.log.debug("Host id for domain %s released successfully "
-                           "(id: %s)", self._sdUUID, hostId)
-
-    def hasHostId(self, hostId):
-        with self._lock:
-            try:
-                return sanlock.inq_lockspace(self._sdUUID,
-                                             hostId, self._idsPath)
-            except sanlock.SanlockException:
-                self.log.debug("Unable to inquire sanlock lockspace "
-                               "status, returning False", exc_info=True)
-                return False
-
-    # The hostId parameter is maintained here only for compatibility with
-    # ClusterLock. We could consider to remove it in the future but keeping it
-    # for logging purpose is desirable.
-    def acquire(self, hostId):
-        with nested(self._lock, SANLock._sanlock_lock):
-            self.log.info("Acquiring cluster lock for domain %s (id: %s)",
-                          self._sdUUID, hostId)
-
-            while True:
-                if SANLock._sanlock_fd is None:
-                    try:
-                        SANLock._sanlock_fd = sanlock.register()
-                    except sanlock.SanlockException, e:
-                        raise se.AcquireLockFailure(self._sdUUID, e.errno,
-                                        "Cannot register to sanlock", str(e))
-
-                try:
-                    sanlock.acquire(self._sdUUID, SDM_LEASE_NAME,
-                                    [(self._leasesPath, SDM_LEASE_OFFSET)],
-                                    slkfd=SANLock._sanlock_fd)
-                except sanlock.SanlockException, e:
-                    if e.errno != os.errno.EPIPE:
-                        raise se.AcquireLockFailure(self._sdUUID, e.errno,
-                                        "Cannot acquire cluster lock", str(e))
-                    SANLock._sanlock_fd = None
-                    continue
-
-                break
-
-            self.log.debug("Cluster lock for domain %s successfully acquired "
-                           "(id: %s)", self._sdUUID, hostId)
-
-    def release(self):
-        with self._lock:
-            self.log.info("Releasing cluster lock for domain %s", self._sdUUID)
-
-            try:
-                sanlock.release(self._sdUUID, SDM_LEASE_NAME,
-                                [(self._leasesPath, SDM_LEASE_OFFSET)],
-                                slkfd=SANLock._sanlock_fd)
-            except sanlock.SanlockException, e:
-                raise se.ReleaseLockFailure(self._sdUUID, e)
-
-            self._sanlockfd = None
-            self.log.debug("Cluster lock for domain %s successfully released",
-                           self._sdUUID)
diff --git a/vdsm/storage/sd.py b/vdsm/storage/sd.py
index 1b11017..dbc1beb 100644
--- a/vdsm/storage/sd.py
+++ b/vdsm/storage/sd.py
@@ -31,7 +31,7 @@ import resourceFactories
 from resourceFactories import IMAGE_NAMESPACE, VOLUME_NAMESPACE
 import resourceManager as rm
 from vdsm import constants
-import safelease
+import clusterlock
 import outOfProcess as oop
 from persistentDict import unicodeEncoder, unicodeDecoder
 
@@ -307,12 +307,12 @@ class StorageDomain:
                 DEFAULT_LEASE_PARAMS[DMDK_LEASE_TIME_SEC],
                 DEFAULT_LEASE_PARAMS[DMDK_LEASE_RETRIES],
                 DEFAULT_LEASE_PARAMS[DMDK_IO_OP_TIMEOUT_SEC])
-            self._clusterLock = safelease.ClusterLock(self.sdUUID,
-                    self.getIdsFilePath(), self.getLeasesFilePath(),
-                    *leaseParams)
+            self._clusterLock = clusterlock.SafeLease(
+                self.sdUUID, self.getIdsFilePath(), self.getLeasesFilePath(),
+                *leaseParams)
         elif domversion in DOM_SANLOCK_VERS:
-            self._clusterLock = safelease.SANLock(self.sdUUID,
-                    self.getIdsFilePath(), self.getLeasesFilePath())
+            self._clusterLock = clusterlock.SANLock(
+                self.sdUUID, self.getIdsFilePath(), self.getLeasesFilePath())
         else:
             raise se.UnsupportedDomainVersion(domversion)
 
diff --git a/vdsm/storage/sp.py b/vdsm/storage/sp.py
index 40d15b3..e13d088 100644
--- a/vdsm/storage/sp.py
+++ b/vdsm/storage/sp.py
@@ -494,7 +494,7 @@ class StoragePool(Securable):
             return config.getint("irs", "maximum_domains_in_pool")
 
     @unsecured
-    def _acquireTemporaryClusterLock(self, msdUUID, safeLease):
+    def _acquireTemporaryClusterLock(self, msdUUID, leaseParams):
         try:
             # Master domain is unattached and all changes to unattached domains
             # must be performed under storage lock
@@ -504,7 +504,7 @@ class StoragePool(Securable):
             # assigned id for this pool
             self.id = msd.getReservedId()
 
-            msd.changeLeaseParams(safeLease)
+            msd.changeLeaseParams(leaseParams)
 
             msd.acquireHostId(self.id)
 
@@ -527,7 +527,7 @@ class StoragePool(Securable):
         self.id = SPM_ID_FREE
 
     @unsecured
-    def create(self, poolName, msdUUID, domList, masterVersion, safeLease):
+    def create(self, poolName, msdUUID, domList, masterVersion, leaseParams):
         """
         Create new storage pool with single/multiple image data domain.
         The command will create new storage pool meta-data attach each
@@ -537,10 +537,9 @@ class StoragePool(Securable):
          'msdUUID' - master domain of this pool (one of domList)
          'domList' - list of domains (i.e sdUUID,sdUUID,...,sdUUID)
         """
-        self.log.info("spUUID=%s poolName=%s master_sd=%s "
-                      "domList=%s masterVersion=%s %s",
-                      self.spUUID, poolName, msdUUID,
-                      domList, masterVersion, str(safeLease))
+        self.log.info("spUUID=%s poolName=%s master_sd=%s domList=%s "
+                      "masterVersion=%s %s", self.spUUID, poolName, msdUUID,
+                      domList, masterVersion, leaseParams)
 
         if msdUUID not in domList:
             raise se.InvalidParameterException("masterDomain", msdUUID)
@@ -565,7 +564,7 @@ class StoragePool(Securable):
                     raise se.StorageDomainAlreadyAttached(spUUIDs[0], sdUUID)
 
         fileUtils.createdir(self.poolPath)
-        self._acquireTemporaryClusterLock(msdUUID, safeLease)
+        self._acquireTemporaryClusterLock(msdUUID, leaseParams)
 
         try:
             self._setSafe()
@@ -573,7 +572,7 @@ class StoragePool(Securable):
             # We should do it before actually attaching this domain to the pool.
             # During 'master' marking we create pool metadata and each attached
             # domain should register there
-            self.createMaster(poolName, msd, masterVersion, safeLease)
+            self.createMaster(poolName, msd, masterVersion, leaseParams)
             self.__rebuild(msdUUID=msdUUID, masterVersion=masterVersion)
             # Attach storage domains to the storage pool
             # Since we are creating the pool then attach is done from the hsm and not the spm
@@ -722,10 +721,10 @@ class StoragePool(Securable):
 
     @unsecured
     def reconstructMaster(self, hostId, poolName, msdUUID, domDict,
-                          masterVersion, safeLease):
+                          masterVersion, leaseParams):
         self.log.info("spUUID=%s hostId=%s poolName=%s msdUUID=%s domDict=%s "
                       "masterVersion=%s leaseparams=(%s)", self.spUUID, hostId,
-                      poolName, msdUUID, domDict, masterVersion, str(safeLease))
+                      poolName, msdUUID, domDict, masterVersion, leaseParams)
 
         if msdUUID not in domDict:
             raise se.InvalidParameterException("masterDomain", msdUUID)
@@ -736,7 +735,7 @@ class StoragePool(Securable):
         # For backward compatibility we must support a reconstructMaster
         # that doesn't specify an hostId.
         if not hostId:
-            self._acquireTemporaryClusterLock(msdUUID, safeLease)
+            self._acquireTemporaryClusterLock(msdUUID, leaseParams)
             temporaryLock = True
         else:
             # Forcing to acquire the host id (if it's not acquired already).
@@ -749,7 +748,7 @@ class StoragePool(Securable):
 
         try:
             self.createMaster(poolName, futureMaster, masterVersion,
-                              safeLease)
+                              leaseParams)
 
             for sdUUID in domDict:
                 domDict[sdUUID] = domDict[sdUUID].capitalize()
-- 
1.8.1