summaryrefslogtreecommitdiffstats
path: root/xlators/features/locks/src
diff options
context:
space:
mode:
authorPavan Sondur <pavan@gluster.com>2010-09-30 02:25:31 +0000
committerVijay Bellur <vijay@dev.gluster.com>2010-09-30 11:19:24 -0700
commitaf18c636c44b1ea56296850e55afe0e4b2ce845c (patch)
tree40f8470ec000b96d61b3f8d53286aa0812c9d921 /xlators/features/locks/src
parent760daf28898cbb8b5072551735bebee16450ba08 (diff)
protocol/client: cluster/afr: Support lock recovery and self heal.
Signed-off-by: Pavan Vilas Sondur <pavan@gluster.com> Signed-off-by: Vijay Bellur <vijay@dev.gluster.com> BUG: 865 (Add locks recovery support in GlusterFS) URL: http://bugs.gluster.com/cgi-bin/bugzilla3/show_bug.cgi?id=865
Diffstat (limited to 'xlators/features/locks/src')
-rw-r--r--xlators/features/locks/src/Makefile.am2
-rw-r--r--xlators/features/locks/src/common.c9
-rw-r--r--xlators/features/locks/src/common.h14
-rw-r--r--xlators/features/locks/src/locks-mem-types.h1
-rw-r--r--xlators/features/locks/src/locks.h7
-rw-r--r--xlators/features/locks/src/posix.c256
-rw-r--r--xlators/features/locks/src/reservelk.c450
7 files changed, 738 insertions, 1 deletions
diff --git a/xlators/features/locks/src/Makefile.am b/xlators/features/locks/src/Makefile.am
index d10b874be..53dd3aa5d 100644
--- a/xlators/features/locks/src/Makefile.am
+++ b/xlators/features/locks/src/Makefile.am
@@ -3,7 +3,7 @@ xlatordir = $(libdir)/glusterfs/$(PACKAGE_VERSION)/xlator/features
locks_la_LDFLAGS = -module -avoidversion
-locks_la_SOURCES = common.c posix.c entrylk.c inodelk.c
+locks_la_SOURCES = common.c posix.c entrylk.c inodelk.c reservelk.c
locks_la_LIBADD = $(top_builddir)/libglusterfs/src/libglusterfs.la
noinst_HEADERS = locks.h common.h locks-mem-types.h
diff --git a/xlators/features/locks/src/common.c b/xlators/features/locks/src/common.c
index b34cd9781..9f2d11304 100644
--- a/xlators/features/locks/src/common.c
+++ b/xlators/features/locks/src/common.c
@@ -103,6 +103,12 @@ fd_to_fdnum (fd_t *fd)
return ((unsigned long) fd);
}
+fd_t *
+fd_from_fdnum (posix_lock_t *lock)
+{
+ return ((fd_t *) lock->fd_num);
+}
+
int
__pl_inode_is_empty (pl_inode_t *pl_inode)
{
@@ -441,6 +447,9 @@ pl_inode_get (xlator_t *this, inode_t *inode)
INIT_LIST_HEAD (&pl_inode->dom_list);
INIT_LIST_HEAD (&pl_inode->ext_list);
INIT_LIST_HEAD (&pl_inode->rw_list);
+ INIT_LIST_HEAD (&pl_inode->reservelk_list);
+ INIT_LIST_HEAD (&pl_inode->blocked_reservelks);
+ INIT_LIST_HEAD (&pl_inode->blocked_calls);
inode_ctx_put (inode, this, (uint64_t)(long)(pl_inode));
diff --git a/xlators/features/locks/src/common.h b/xlators/features/locks/src/common.h
index d70729447..c7d817f8d 100644
--- a/xlators/features/locks/src/common.h
+++ b/xlators/features/locks/src/common.h
@@ -116,4 +116,18 @@ pl_trace_release (xlator_t *this, fd_t *fd);
unsigned long
fd_to_fdnum (fd_t *fd);
+fd_t *
+fd_from_fdnum (posix_lock_t *lock);
+
+int
+pl_reserve_setlk (xlator_t *this, pl_inode_t *pl_inode, posix_lock_t *lock,
+ int can_block);
+int
+reservelks_equal (posix_lock_t *l1, posix_lock_t *l2);
+
+int
+pl_verify_reservelk (xlator_t *this, pl_inode_t *pl_inode,
+ posix_lock_t *lock, int can_block);
+int
+pl_reserve_unlock (xlator_t *this, pl_inode_t *pl_inode, posix_lock_t *reqlock);
#endif /* __COMMON_H__ */
diff --git a/xlators/features/locks/src/locks-mem-types.h b/xlators/features/locks/src/locks-mem-types.h
index cf5024086..f441f95cf 100644
--- a/xlators/features/locks/src/locks-mem-types.h
+++ b/xlators/features/locks/src/locks-mem-types.h
@@ -33,6 +33,7 @@ enum gf_locks_mem_types_ {
gf_locks_mt_pl_rw_req_t,
gf_locks_mt_posix_locks_private_t,
gf_locks_mt_pl_local_t,
+ gf_locks_mt_pl_fdctx_t,
gf_locks_mt_end
};
#endif
diff --git a/xlators/features/locks/src/locks.h b/xlators/features/locks/src/locks.h
index 60474615e..483e3e666 100644
--- a/xlators/features/locks/src/locks.h
+++ b/xlators/features/locks/src/locks.h
@@ -125,6 +125,9 @@ struct __pl_inode {
struct list_head dom_list; /* list of domains */
struct list_head ext_list; /* list of fcntl locks */
struct list_head rw_list; /* list of waiting r/w requests */
+ struct list_head reservelk_list; /* list of reservelks */
+ struct list_head blocked_reservelks; /* list of blocked reservelks */
+ struct list_head blocked_calls; /* List of blocked lock calls while a reserve is held*/
int mandatory; /* if mandatory locking is enabled */
inode_t *refkeeper; /* hold refs on an inode while locks are
@@ -150,4 +153,8 @@ typedef struct {
gf_boolean_t posixlk_count_req;
} pl_local_t;
+typedef struct {
+ struct list_head locks_list;
+} pl_fdctx_t;
+
#endif /* __POSIX_LOCKS_H__ */
diff --git a/xlators/features/locks/src/posix.c b/xlators/features/locks/src/posix.c
index 165280265..f08559424 100644
--- a/xlators/features/locks/src/posix.c
+++ b/xlators/features/locks/src/posix.c
@@ -741,6 +741,194 @@ pl_writev (call_frame_t *frame, xlator_t *this, fd_t *fd,
return 0;
}
+static int
+__fd_has_locks (pl_inode_t *pl_inode, fd_t *fd)
+{
+ int found = 0;
+ posix_lock_t *l = NULL;
+
+ list_for_each_entry (l, &pl_inode->ext_list, list) {
+ if ((l->fd_num == fd_to_fdnum(fd))) {
+ found = 1;
+ break;
+ }
+ }
+
+ return found;
+}
+
+static posix_lock_t *
+lock_dup (posix_lock_t *lock)
+{
+ posix_lock_t *new_lock = NULL;
+
+ new_lock = new_posix_lock (&lock->user_flock, lock->transport,
+ lock->client_pid, lock->owner,
+ (fd_t *)lock->fd_num);
+ return new_lock;
+}
+
+static int
+__dup_locks_to_fdctx (pl_inode_t *pl_inode, fd_t *fd,
+ pl_fdctx_t *fdctx)
+{
+ posix_lock_t *l = NULL;
+ posix_lock_t *duplock = NULL;
+ int ret = 0;
+
+ fdctx = GF_CALLOC (1, sizeof (*fdctx),
+ gf_locks_mt_pl_fdctx_t);
+ if (!fdctx) {
+ ret = -1;
+ goto out;
+ }
+
+ INIT_LIST_HEAD (&fdctx->locks_list);
+
+ list_for_each_entry (l, &pl_inode->ext_list, list) {
+ if ((l->fd_num == fd_to_fdnum(fd))) {
+ duplock = lock_dup (l);
+ if (!duplock) {
+ gf_log (THIS->name, GF_LOG_DEBUG,
+ "Out of memory");
+ ret = -1;
+ break;
+ }
+
+ list_add_tail (&duplock->list, &fdctx->locks_list);
+ }
+ }
+
+out:
+ return ret;
+}
+
+static int
+__copy_locks_to_fdctx (pl_inode_t *pl_inode, fd_t *fd,
+ pl_fdctx_t *fdctx)
+{
+ int ret = 0;
+
+ ret = __dup_locks_to_fdctx (pl_inode, fd, fdctx);
+ if (ret)
+ goto out;
+
+ ret = fd_ctx_set (fd, THIS, (uint64_t) (unsigned long)&fdctx);
+ if (ret)
+ gf_log (THIS->name, GF_LOG_DEBUG,
+ "Failed to set fdctx");
+out:
+ return ret;
+
+}
+
+static void
+pl_mark_eol_lock (posix_lock_t *lock)
+{
+ lock->user_flock.l_type = GF_LK_RECLK;
+ return;
+}
+
+static posix_lock_t *
+__get_next_fdctx_lock (pl_fdctx_t *fdctx)
+{
+ posix_lock_t *lock = NULL;
+
+ GF_ASSERT (fdctx);
+
+ if (list_empty (&fdctx->locks_list)) {
+ gf_log (THIS->name, GF_LOG_DEBUG,
+ "fdctx lock list empty");
+ goto out;
+ }
+
+ lock = list_entry (&fdctx->locks_list, typeof (*lock),
+ list);
+
+ GF_ASSERT (lock);
+
+ list_del_init (&lock->list);
+
+out:
+ return lock;
+}
+
+static int
+__set_next_lock_fd (pl_fdctx_t *fdctx, posix_lock_t *reqlock)
+{
+ posix_lock_t *lock = NULL;
+ int ret = 0;
+
+ GF_ASSERT (fdctx);
+
+ lock = __get_next_fdctx_lock (fdctx);
+ if (!lock) {
+ gf_log (THIS->name, GF_LOG_DEBUG,
+ "marking EOL in reqlock");
+ pl_mark_eol_lock (reqlock);
+ goto out;
+ }
+
+ reqlock->user_flock = lock->user_flock;
+
+out:
+ if (lock)
+ __destroy_lock (lock);
+
+ return ret;
+}
+static int
+pl_getlk_fd (xlator_t *this, pl_inode_t *pl_inode,
+ fd_t *fd, posix_lock_t *reqlock)
+{
+ uint64_t tmp = 0;
+ pl_fdctx_t *fdctx = NULL;
+ int ret = 0;
+
+ pthread_mutex_lock (&pl_inode->mutex);
+ {
+ if (!__fd_has_locks (pl_inode, fd)) {
+ gf_log (this->name, GF_LOG_DEBUG,
+ "fd=%p has no active locks", fd);
+ ret = 0;
+ goto unlock;
+ }
+
+ gf_log (this->name, GF_LOG_DEBUG,
+ "There are active locks on fd");
+
+ ret = fd_ctx_get (fd, this, &tmp);
+ fdctx = (pl_fdctx_t *) tmp;
+ if (ret) {
+ gf_log (this->name, GF_LOG_TRACE,
+ "no fdctx -> copying all locks on fd");
+
+ ret = __copy_locks_to_fdctx (pl_inode, fd, fdctx);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Out of memory");
+ goto unlock;
+ }
+
+ ret = __set_next_lock_fd (fdctx, reqlock);
+
+ } else {
+ gf_log (this->name, GF_LOG_TRACE,
+ "fdctx present -> returning the next lock");
+ ret = __set_next_lock_fd (fdctx, reqlock);
+ if (ret) {
+ gf_log (this->name, GF_LOG_DEBUG,
+ "could not get next lock of fd");
+ goto unlock;
+ }
+ }
+ }
+
+unlock:
+ pthread_mutex_unlock (&pl_inode->mutex);
+ return ret;
+
+}
int
pl_lk (call_frame_t *frame, xlator_t *this,
@@ -791,6 +979,68 @@ pl_lk (call_frame_t *frame, xlator_t *this,
switch (cmd) {
+ case F_RESLK_LCKW:
+ can_block = 1;
+
+ /* fall through */
+ case F_RESLK_LCK:
+ memcpy (&reqlock->user_flock, flock, sizeof (struct flock));
+ reqlock->frame = frame;
+ reqlock->this = this;
+
+ ret = pl_reserve_setlk (this, pl_inode, reqlock,
+ can_block);
+ if (ret < 0) {
+ if (can_block)
+ goto out;
+
+ op_ret = -1;
+ op_errno = -ret;
+ __destroy_lock (reqlock);
+ goto unwind;
+ }
+ /* Finally a getlk and return the call */
+ conf = pl_getlk (pl_inode, reqlock);
+ if (conf)
+ posix_lock_to_flock (conf, flock);
+ break;
+
+ case F_RESLK_UNLCK:
+ reqlock->frame = frame;
+ reqlock->this = this;
+ ret = pl_reserve_unlock (this, pl_inode, reqlock);
+ if (ret < 0) {
+ op_ret = -1;
+ op_errno = -ret;
+ }
+ __destroy_lock (reqlock);
+ goto unwind;
+
+ break;
+
+ case F_GETLK_FD:
+ reqlock->frame = frame;
+ reqlock->this = this;
+ ret = pl_verify_reservelk (this, pl_inode, reqlock, can_block);
+ GF_ASSERT (ret >= 0);
+
+ ret = pl_getlk_fd (this, pl_inode, fd, reqlock);
+ if (ret < 0) {
+ gf_log (this->name, GF_LOG_DEBUG,
+ "getting locks on fd failed");
+ op_ret = -1;
+ op_errno = ENOLCK;
+ goto unwind;
+ }
+
+ gf_log (this->name, GF_LOG_TRACE,
+ "Replying with a lock on fd for healing");
+
+ posix_lock_to_flock (reqlock, flock);
+ __destroy_lock (reqlock);
+
+ break;
+
#if F_GETLK != F_GETLK64
case F_GETLK64:
#endif
@@ -816,6 +1066,12 @@ pl_lk (call_frame_t *frame, xlator_t *this,
#endif
case F_SETLK:
memcpy (&reqlock->user_flock, flock, sizeof (struct flock));
+ ret = pl_verify_reservelk (this, pl_inode, reqlock, can_block);
+ if (ret < 0) {
+ gf_log (this->name, GF_LOG_TRACE,
+ "Lock blocked due to conflicting reserve lock");
+ goto out;
+ }
ret = pl_setlk (this, pl_inode, reqlock,
can_block);
diff --git a/xlators/features/locks/src/reservelk.c b/xlators/features/locks/src/reservelk.c
new file mode 100644
index 000000000..c36484c46
--- /dev/null
+++ b/xlators/features/locks/src/reservelk.c
@@ -0,0 +1,450 @@
+/*
+ Copyright (c) 2006, 2007, 2008 Gluster, Inc. <http://www.gluster.com>
+ This file is part of GlusterFS.
+
+ GlusterFS is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ GlusterFS is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see
+ <http://www.gnu.org/licenses/>.
+*/
+
+#ifndef _CONFIG_H
+#define _CONFIG_H
+#include "config.h"
+#endif
+
+#include "glusterfs.h"
+#include "compat.h"
+#include "xlator.h"
+#include "inode.h"
+#include "logging.h"
+#include "common-utils.h"
+#include "list.h"
+
+#include "locks.h"
+#include "common.h"
+
+void
+__delete_reserve_lock (posix_lock_t *lock)
+{
+ list_del (&lock->list);
+}
+
+void
+__destroy_reserve_lock (posix_lock_t *lock)
+{
+ FREE (lock);
+}
+
+/* Return true if the two reservelks have exactly same lock boundaries */
+int
+reservelks_equal (posix_lock_t *l1, posix_lock_t *l2)
+{
+ if ((l1->fl_start == l2->fl_start) &&
+ (l1->fl_end == l2->fl_end))
+ return 1;
+
+ return 0;
+}
+
+/* Determine if lock is grantable or not */
+static posix_lock_t *
+__reservelk_grantable (pl_inode_t *pl_inode, posix_lock_t *lock)
+{
+ xlator_t *this = NULL;
+ posix_lock_t *l = NULL;
+ posix_lock_t *ret_lock = NULL;
+
+ this = THIS;
+
+ if (list_empty (&pl_inode->reservelk_list)) {
+ gf_log (this->name, GF_LOG_TRACE,
+ "No reservelks in list");
+ goto out;
+ }
+ list_for_each_entry (l, &pl_inode->reservelk_list, list){
+ if (reservelks_equal (lock, l)) {
+ ret_lock = l;
+ break;
+ }
+ }
+out:
+ return ret_lock;
+}
+
+static int
+__same_owner_reservelk (posix_lock_t *l1, posix_lock_t *l2)
+{
+ return ((l1->owner == l2->owner));
+
+}
+
+static posix_lock_t *
+__matching_reservelk (pl_inode_t *pl_inode, posix_lock_t *lock)
+{
+ posix_lock_t *l = NULL;
+
+ if (list_empty (&pl_inode->reservelk_list)) {
+ gf_log ("posix-locks", GF_LOG_TRACE,
+ "reservelk list empty");
+ return NULL;
+ }
+
+ list_for_each_entry (l, &pl_inode->reservelk_list, list) {
+ if (reservelks_equal (l, lock)) {
+ gf_log ("posix-locks", GF_LOG_TRACE,
+ "equal reservelk found");
+ break;
+ }
+ }
+
+ return l;
+}
+
+static int
+__reservelk_conflict (xlator_t *this, pl_inode_t *pl_inode,
+ posix_lock_t *lock)
+{
+ posix_lock_t *conf = NULL;
+ int ret = 0;
+
+ conf = __matching_reservelk (pl_inode, lock);
+ if (conf) {
+ gf_log (this->name, GF_LOG_TRACE,
+ "Matching reservelk found");
+ if (__same_owner_reservelk (lock, conf)) {
+ list_del_init (&conf->list);
+ gf_log (this->name, GF_LOG_TRACE,
+ "Removing the matching reservelk for setlk to progress");
+ FREE (conf);
+ ret = 0;
+ } else {
+ gf_log (this->name, GF_LOG_TRACE,
+ "Conflicting reservelk found");
+ ret = 1;
+ }
+
+ }
+ return ret;
+
+}
+
+int
+pl_verify_reservelk (xlator_t *this, pl_inode_t *pl_inode,
+ posix_lock_t *lock, int can_block)
+{
+ int ret = 0;
+
+ pthread_mutex_lock (&pl_inode->mutex);
+ {
+ if (__reservelk_conflict (this, pl_inode, lock)) {
+ gf_log (this->name, GF_LOG_TRACE,
+ "Found conflicting reservelk. Blocking until reservelk is unlocked.");
+ lock->blocked = can_block;
+ list_add_tail (&lock->list, &pl_inode->blocked_calls);
+ ret = -1;
+ goto unlock;
+ }
+
+ gf_log (this->name, GF_LOG_TRACE,
+ "no conflicting reservelk found. Call continuing");
+ ret = 0;
+
+ }
+unlock:
+ pthread_mutex_unlock (&pl_inode->mutex);
+
+ return ret;
+
+}
+
+
+/* Determines if lock can be granted and adds the lock. If the lock
+ * is blocking, adds it to the blocked_reservelks.
+ */
+static int
+__lock_reservelk (xlator_t *this, pl_inode_t *pl_inode, posix_lock_t *lock,
+ int can_block)
+{
+ posix_lock_t *conf = NULL;
+ int ret = -EINVAL;
+
+ conf = __reservelk_grantable (pl_inode, lock);
+ if (conf){
+ ret = -EAGAIN;
+ if (can_block == 0)
+ goto out;
+
+ list_add_tail (&lock->list, &pl_inode->blocked_reservelks);
+
+ gf_log (this->name, GF_LOG_TRACE,
+ "%s (pid=%d) lk-owner:%"PRIu64" %"PRId64" - %"PRId64" => Blocked",
+ lock->fl_type == F_UNLCK ? "Unlock" : "Lock",
+ lock->client_pid,
+ lock->owner,
+ lock->user_flock.l_start,
+ lock->user_flock.l_len);
+
+
+ goto out;
+ }
+
+ list_add (&lock->list, &pl_inode->reservelk_list);
+
+ ret = 0;
+
+out:
+ return ret;
+}
+
+static posix_lock_t *
+find_matching_reservelk (posix_lock_t *lock, pl_inode_t *pl_inode)
+{
+ posix_lock_t *l = NULL;
+ list_for_each_entry (l, &pl_inode->reservelk_list, list) {
+ if (reservelks_equal (l, lock))
+ return l;
+ }
+ return NULL;
+}
+
+/* Set F_UNLCK removes a lock which has the exact same lock boundaries
+ * as the UNLCK lock specifies. If such a lock is not found, returns invalid
+ */
+static posix_lock_t *
+__reserve_unlock_lock (xlator_t *this, posix_lock_t *lock, pl_inode_t *pl_inode)
+{
+
+ posix_lock_t *conf = NULL;
+
+ conf = find_matching_reservelk (lock, pl_inode);
+ if (!conf) {
+ gf_log (this->name, GF_LOG_DEBUG,
+ " Matching lock not found for unlock");
+ goto out;
+ }
+ __delete_reserve_lock (conf);
+ gf_log (this->name, GF_LOG_DEBUG,
+ " Matching lock found for unlock");
+
+out:
+ return conf;
+
+
+}
+
+static void
+__grant_blocked_reserve_locks (xlator_t *this, pl_inode_t *pl_inode,
+ struct list_head *granted)
+{
+ int bl_ret = 0;
+ posix_lock_t *bl = NULL;
+ posix_lock_t *tmp = NULL;
+
+ struct list_head blocked_list;
+
+ INIT_LIST_HEAD (&blocked_list);
+ list_splice_init (&pl_inode->blocked_reservelks, &blocked_list);
+
+ list_for_each_entry_safe (bl, tmp, &blocked_list, list) {
+
+ list_del_init (&bl->list);
+
+ bl_ret = __lock_reservelk (this, pl_inode, bl, 1);
+
+ if (bl_ret == 0) {
+ list_add (&bl->list, granted);
+ }
+ }
+ return;
+}
+
+/* Grant all reservelks blocked on lock(s) */
+void
+grant_blocked_reserve_locks (xlator_t *this, pl_inode_t *pl_inode)
+{
+ struct list_head granted;
+ posix_lock_t *lock = NULL;
+ posix_lock_t *tmp = NULL;
+
+ INIT_LIST_HEAD (&granted);
+
+ if (list_empty (&pl_inode->blocked_reservelks)) {
+ gf_log (this->name, GF_LOG_TRACE,
+ "No blocked locks to be granted");
+ return;
+ }
+
+ pthread_mutex_lock (&pl_inode->mutex);
+ {
+ __grant_blocked_reserve_locks (this, pl_inode, &granted);
+ }
+ pthread_mutex_unlock (&pl_inode->mutex);
+
+ list_for_each_entry_safe (lock, tmp, &granted, list) {
+ gf_log (this->name, GF_LOG_TRACE,
+ "%s (pid=%d) (lk-owner=%"PRIu64") %"PRId64" - %"PRId64" => Granted",
+ lock->fl_type == F_UNLCK ? "Unlock" : "Lock",
+ lock->client_pid,
+ lock->owner,
+ lock->user_flock.l_start,
+ lock->user_flock.l_len);
+
+ STACK_UNWIND_STRICT (lk, lock->frame, 0, 0, &lock->user_flock);
+ }
+
+}
+
+static void
+__grant_blocked_lock_calls (xlator_t *this, pl_inode_t *pl_inode,
+ struct list_head *granted)
+{
+ int bl_ret = 0;
+ posix_lock_t *bl = NULL;
+ posix_lock_t *tmp = NULL;
+
+ struct list_head blocked_list;
+
+ INIT_LIST_HEAD (&blocked_list);
+ list_splice_init (&pl_inode->blocked_reservelks, &blocked_list);
+
+ list_for_each_entry_safe (bl, tmp, &blocked_list, list) {
+
+ list_del_init (&bl->list);
+
+ bl_ret = pl_verify_reservelk (this, pl_inode, bl, bl->blocked);
+
+ if (bl_ret == 0) {
+ list_add_tail (&bl->list, granted);
+ }
+ }
+ return;
+}
+
+void
+grant_blocked_lock_calls (xlator_t *this, pl_inode_t *pl_inode)
+{
+ struct list_head granted;
+ posix_lock_t *lock = NULL;
+ posix_lock_t *tmp = NULL;
+ fd_t *fd = NULL;
+
+ int can_block = 0;
+ int32_t cmd = 0;
+ int ret = 0;
+
+ if (list_empty (&pl_inode->blocked_calls)) {
+ gf_log (this->name, GF_LOG_TRACE,
+ "No blocked lock calls to be granted");
+ return;
+ }
+
+ pthread_mutex_lock (&pl_inode->mutex);
+ {
+ __grant_blocked_lock_calls (this, pl_inode, &granted);
+ }
+ pthread_mutex_unlock (&pl_inode->mutex);
+
+ list_for_each_entry_safe (lock, tmp, &granted, list) {
+ fd = fd_from_fdnum (lock);
+
+ if (lock->blocked) {
+ can_block = 1;
+ cmd = F_SETLKW;
+ }
+ else
+ cmd = F_SETLK;
+
+ lock->blocked = 0;
+ ret = pl_setlk (this, pl_inode, lock, can_block);
+ if (ret == -1) {
+ if (can_block) {
+ pl_trace_block (this, lock->frame, fd, NULL,
+ cmd, &lock->user_flock, NULL);
+ continue;
+ } else {
+ gf_log (this->name, GF_LOG_DEBUG, "returning EAGAIN");
+ pl_trace_out (this, lock->frame, fd, NULL, cmd,
+ &lock->user_flock, -1, EAGAIN, NULL);
+ pl_update_refkeeper (this, fd->inode);
+ STACK_UNWIND_STRICT (lk, lock->frame, -1, EAGAIN, &lock->user_flock);
+ __destroy_lock (lock);
+ }
+ }
+
+ }
+
+}
+
+
+int
+pl_reserve_unlock (xlator_t *this, pl_inode_t *pl_inode, posix_lock_t *lock)
+{
+ posix_lock_t *retlock = NULL;
+ int ret = -1;
+
+ pthread_mutex_lock (&pl_inode->mutex);
+ {
+ retlock = __reserve_unlock_lock (this, lock, pl_inode);
+ if (!retlock) {
+ gf_log (this->name, GF_LOG_DEBUG,
+ "Bad Unlock issued on Inode lock");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ gf_log (this->name, GF_LOG_TRACE,
+ "Reservelk Unlock successful");
+ __destroy_reserve_lock (retlock);
+ ret = 0;
+ }
+out:
+ pthread_mutex_unlock (&pl_inode->mutex);
+
+ grant_blocked_reserve_locks (this, pl_inode);
+ grant_blocked_lock_calls (this, pl_inode);
+
+ return ret;
+
+}
+
+int
+pl_reserve_setlk (xlator_t *this, pl_inode_t *pl_inode, posix_lock_t *lock,
+ int can_block)
+{
+ int ret = -EINVAL;
+
+ pthread_mutex_lock (&pl_inode->mutex);
+ {
+
+ ret = __lock_reservelk (this, pl_inode, lock, can_block);
+ if (ret < 0)
+ gf_log (this->name, GF_LOG_TRACE,
+ "%s (pid=%d) (lk-owner=%"PRIu64") %"PRId64" - %"PRId64" => NOK",
+ lock->fl_type == F_UNLCK ? "Unlock" : "Lock",
+ lock->client_pid,
+ lock->owner,
+ lock->user_flock.l_start,
+ lock->user_flock.l_len);
+ else
+ gf_log (this->name, GF_LOG_TRACE,
+ "%s (pid=%d) (lk-owner=%"PRIu64") %"PRId64" - %"PRId64" => OK",
+ lock->fl_type == F_UNLCK ? "Unlock" : "Lock",
+ lock->client_pid,
+ lock->owner,
+ lock->fl_start,
+ lock->fl_end);
+
+ }
+ pthread_mutex_unlock (&pl_inode->mutex);
+ return ret;
+}