summaryrefslogtreecommitdiffstats
path: root/xlators/features/locks/src/locks.h
diff options
context:
space:
mode:
authorSusant Palai <spalai@redhat.com>2018-11-30 15:04:17 +0530
committerAmar Tumballi <amarts@redhat.com>2019-01-17 15:33:42 +0000
commitebaf09a2a329517936232510e117debc3795e80b (patch)
treea3ce4c023b7ffce538ac31e3b3829c2267bbc2d0 /xlators/features/locks/src/locks.h
parent11cf73bc4173c13a9de54ea8d816eb72d8b01f48 (diff)
lock: Add fencing support
design reference: https://review.gluster.org/#/c/glusterfs-specs/+/21925/ This patch adds the lock preempt support. Note: The current model stores lock enforcement information as separate xattr on disk. There is another effort going in parallel to store this in stat(x) of the file. This patch is self sufficient to add fencing support. Based on the availability of the stat(x) support either I will rebase this patch or we can modify the necessary bits post merging this patch. Change-Id: If4a42f3e0afaee1f66cdb0360ad4e0c005b5b017 updates: #466 Signed-off-by: Susant Palai <spalai@redhat.com>
Diffstat (limited to 'xlators/features/locks/src/locks.h')
-rw-r--r--xlators/features/locks/src/locks.h22
1 files changed, 22 insertions, 0 deletions
diff --git a/xlators/features/locks/src/locks.h b/xlators/features/locks/src/locks.h
index 34776e1..842c441 100644
--- a/xlators/features/locks/src/locks.h
+++ b/xlators/features/locks/src/locks.h
@@ -179,6 +179,24 @@ struct __pl_inode {
of inode_t as long as there are
locks on it */
gf_boolean_t migrated;
+
+ /* Flag to indicate whether to read mlock-enforce xattr from disk */
+ gf_boolean_t check_mlock_info;
+
+ /* Mandatory_lock enforce: IO will be allowed if and only if the lkowner has
+ held the lock.
+
+ Note: An xattr is set on the file to recover this information post
+ reboot. If client does not want mandatory lock to be enforced, then it
+ should remove this xattr explicitly
+ */
+ gf_boolean_t mlock_enforced;
+ /* There are scenarios where mandatory lock is granted but there are IOs
+ pending at posix level. To avoid this before preempting the previous lock
+ owner, we wait for all the fops to be unwound.
+ */
+ int fop_wind_count;
+ pthread_cond_t check_fop_wind_count;
};
typedef struct __pl_inode pl_inode_t;
@@ -213,12 +231,14 @@ typedef struct {
dict_t *xdata;
loc_t loc[2];
fd_t *fd;
+ inode_t *inode;
off_t offset;
glusterfs_fop_t op;
gf_boolean_t entrylk_count_req;
gf_boolean_t inodelk_count_req;
gf_boolean_t posixlk_count_req;
gf_boolean_t parent_entrylk_req;
+ int update_mlock_enforced_flag;
} pl_local_t;
typedef struct {
@@ -239,6 +259,8 @@ typedef struct _locks_ctx {
struct list_head metalk_list;
} pl_ctx_t;
+typedef enum { DECREMENT, INCREMENT } pl_count_op_t;
+
pl_ctx_t *
pl_ctx_get(client_t *client, xlator_t *xlator);