diff options
| -rw-r--r-- | tests/basic/afr/heal-info.t | 36 | ||||
| -rw-r--r-- | xlators/cluster/afr/src/afr-common.c | 32 | ||||
| -rw-r--r-- | xlators/cluster/afr/src/afr-self-heal-entry.c | 17 | ||||
| -rw-r--r-- | xlators/cluster/afr/src/afr.c | 14 | ||||
| -rw-r--r-- | xlators/cluster/afr/src/afr.h | 1 | ||||
| -rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-volume-set.c | 7 | 
6 files changed, 90 insertions, 17 deletions
diff --git a/tests/basic/afr/heal-info.t b/tests/basic/afr/heal-info.t new file mode 100644 index 00000000000..b4da50b3ae9 --- /dev/null +++ b/tests/basic/afr/heal-info.t @@ -0,0 +1,36 @@ +#!/bin/bash +#Test that parallel heal-info command execution doesn't result in spurious +#entries with locking-scheme granular + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +cleanup; + +function heal_info_to_file { +        while [ -f $M0/a.txt ]; do +                $CLI volume heal $V0 info | grep -i number | grep -v 0 >> $1 +        done +} + +function write_and_del_file { +        dd of=$M0/a.txt if=/dev/zero bs=1024k count=100 +        rm -f $M0/a.txt +} + +TEST glusterd +TEST pidof glusterd +TEST $CLI volume create $V0 replica 2 $H0:$B0/brick{0,1} +TEST $CLI volume set $V0 locking-scheme granular +TEST $CLI volume start $V0 +TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0; +TEST touch $M0/a.txt +write_and_del_file & +touch $B0/f1 $B0/f2 +heal_info_to_file $B0/f1 & +heal_info_to_file $B0/f2 & +wait +EXPECT "^0$" echo $(wc -l $B0/f1 | awk '{print $1}') +EXPECT "^0$" echo $(wc -l $B0/f2 | awk '{print $1}') + +cleanup; diff --git a/xlators/cluster/afr/src/afr-common.c b/xlators/cluster/afr/src/afr-common.c index f4d74c57a5d..32dc91ca16e 100644 --- a/xlators/cluster/afr/src/afr-common.c +++ b/xlators/cluster/afr/src/afr-common.c @@ -4549,8 +4549,11 @@ afr_selfheal_locked_data_inspect (call_frame_t *frame, xlator_t *this,          afr_private_t   *priv = NULL;          fd_t          *fd = NULL;          struct afr_reply *locked_replies = NULL; +        gf_boolean_t granular_locks = _gf_false;          priv = this->private; +	if (strcmp ("granular", priv->locking_scheme) == 0) +	        granular_locks = _gf_true;          locked_on = alloca0 (priv->child_count);          data_lock = alloca0 (priv->child_count);          sources = alloca0 (priv->child_count); @@ -4571,10 +4574,12 @@ afr_selfheal_locked_data_inspect (call_frame_t *frame, xlator_t *this,          locked_replies = alloca0 (sizeof (*locked_replies) * priv->child_count); -        ret = afr_selfheal_tryinodelk (frame, this, inode, priv->sh_domain, -                                       0, 0, locked_on); +        if (!granular_locks) { +                ret = afr_selfheal_tryinodelk (frame, this, inode, +                                              priv->sh_domain, 0, 0, locked_on); +        }          { -                if (ret == 0) { +                if (!granular_locks && (ret == 0)) {                          ret = -afr_final_errno (frame->local, priv);                          if (ret == 0)                                  ret = -ENOTCONN;/* all invalid responses */ @@ -4601,8 +4606,9 @@ afr_selfheal_locked_data_inspect (call_frame_t *frame, xlator_t *this,                                          data_lock);          }  unlock: -        afr_selfheal_uninodelk (frame, this, inode, priv->sh_domain, 0, 0, -                                locked_on); +        if (!granular_locks) +                afr_selfheal_uninodelk (frame, this, inode, priv->sh_domain, 0, +                                        0, locked_on);  out:          if (locked_replies)                  afr_replies_wipe (locked_replies, priv->child_count); @@ -4625,8 +4631,11 @@ afr_selfheal_locked_entry_inspect (call_frame_t *frame, xlator_t *this,          unsigned char *sinks = NULL;          unsigned char *healed_sinks = NULL;          struct afr_reply *locked_replies = NULL; +        gf_boolean_t granular_locks = _gf_false;          priv = this->private; +	if (strcmp ("granular", priv->locking_scheme) == 0) +	        granular_locks = _gf_true;          locked_on = alloca0 (priv->child_count);          data_lock = alloca0 (priv->child_count);          sources = alloca0 (priv->child_count); @@ -4635,10 +4644,12 @@ afr_selfheal_locked_entry_inspect (call_frame_t *frame, xlator_t *this,          locked_replies = alloca0 (sizeof (*locked_replies) * priv->child_count); -        ret = afr_selfheal_tryentrylk (frame, this, inode, priv->sh_domain, -                                       NULL, locked_on); +        if (!granular_locks) { +                ret = afr_selfheal_tryentrylk (frame, this, inode, +                                              priv->sh_domain, NULL, locked_on); +        }          { -                if (ret == 0) { +                if (!granular_locks && ret == 0) {                          ret = -afr_final_errno (frame->local, priv);                          if (ret == 0)                                  ret = -ENOTCONN;/* all invalid responses */ @@ -4668,8 +4679,9 @@ afr_selfheal_locked_entry_inspect (call_frame_t *frame, xlator_t *this,                                          data_lock);          }  unlock: -        afr_selfheal_unentrylk (frame, this, inode, priv->sh_domain, NULL, -                                locked_on); +        if (!granular_locks) +                afr_selfheal_unentrylk (frame, this, inode, priv->sh_domain, +                                        NULL, locked_on);  out:          if (locked_replies)                  afr_replies_wipe (locked_replies, priv->child_count); diff --git a/xlators/cluster/afr/src/afr-self-heal-entry.c b/xlators/cluster/afr/src/afr-self-heal-entry.c index c8e2c98db0e..fccffa7dbac 100644 --- a/xlators/cluster/afr/src/afr-self-heal-entry.c +++ b/xlators/cluster/afr/src/afr-self-heal-entry.c @@ -744,8 +744,11 @@ afr_selfheal_entry (call_frame_t *frame, xlator_t *this, inode_t *inode)          unsigned char *long_name_locked = NULL;  	fd_t *fd = NULL;  	int ret = 0; +	gf_boolean_t granular_locks = _gf_false;  	priv = this->private; +	if (strcmp ("granular", priv->locking_scheme) == 0) +	        granular_locks = _gf_true;  	fd = afr_selfheal_data_opendir (this, inode);  	if (!fd) @@ -772,10 +775,13 @@ afr_selfheal_entry (call_frame_t *frame, xlator_t *this, inode_t *inode)  			goto unlock;  		} -                ret = afr_selfheal_tryentrylk (frame, this, inode, this->name, -                                               LONG_FILENAME, long_name_locked); +                if (!granular_locks) { +                        ret = afr_selfheal_tryentrylk (frame, this, inode, +                                                      this->name, LONG_FILENAME, +                                                      long_name_locked); +                }                  { -                        if (ret < 1) { +                        if (!granular_locks && ret < 1) {                                  gf_msg_debug (this->name, 0, "%s: Skipping"                                                " entry self-heal as only %d "                                                "sub-volumes could be " @@ -788,8 +794,9 @@ afr_selfheal_entry (call_frame_t *frame, xlator_t *this, inode_t *inode)                          }                          ret = __afr_selfheal_entry (frame, this, fd, locked_on);                  } -                afr_selfheal_unentrylk (frame, this, inode, this->name, -                                        LONG_FILENAME, long_name_locked); +                if (!granular_locks) +                        afr_selfheal_unentrylk (frame, this, inode, this->name, +                                               LONG_FILENAME, long_name_locked);  	}  unlock:  	afr_selfheal_unentrylk (frame, this, inode, priv->sh_domain, NULL, locked_on); diff --git a/xlators/cluster/afr/src/afr.c b/xlators/cluster/afr/src/afr.c index 6e59fd46328..1b172d50e60 100644 --- a/xlators/cluster/afr/src/afr.c +++ b/xlators/cluster/afr/src/afr.c @@ -182,7 +182,10 @@ reconfigure (xlator_t *this, dict_t *options)                  priv->read_child = index;          } -        GF_OPTION_RECONF ("pre-op-compat", priv->pre_op_compat, options, bool, out); +        GF_OPTION_RECONF ("pre-op-compat", priv->pre_op_compat, options, bool, +                          out); +        GF_OPTION_RECONF ("locking-scheme", priv->locking_scheme, options, str, +                          out);          GF_OPTION_RECONF ("eager-lock", priv->eager_lock, options, bool, out);          GF_OPTION_RECONF ("quorum-type", qtype, options, str, out); @@ -375,6 +378,7 @@ init (xlator_t *this)          GF_OPTION_INIT ("entrylk-trace", priv->entrylk_trace, bool, out);          GF_OPTION_INIT ("pre-op-compat", priv->pre_op_compat, bool, out); +        GF_OPTION_INIT ("locking-scheme", priv->locking_scheme, str, out);          GF_OPTION_INIT ("eager-lock", priv->eager_lock, bool, out);          GF_OPTION_INIT ("quorum-type", qtype, str, out); @@ -879,5 +883,13 @@ struct volume_options options[] = {             .description = "This option can be used to control number of heals"                            " that can wait in SHD per subvolume",          }, +        { .key = {"locking-scheme"}, +          .type = GF_OPTION_TYPE_STR, +          .value = { "full", "granular"}, +          .default_value = "full", +          .description = "If this option is set to granular, self-heal will " +                         "stop being compatible with afr-v1, which helps afr " +                         "be more granular while self-healing", +        },          { .key  = {NULL} },  }; diff --git a/xlators/cluster/afr/src/afr.h b/xlators/cluster/afr/src/afr.h index 7a99e70c80e..ef0d9f74e6e 100644 --- a/xlators/cluster/afr/src/afr.h +++ b/xlators/cluster/afr/src/afr.h @@ -139,6 +139,7 @@ typedef struct _afr_private {  	/* pump dependencies */  	void                   *pump_private;  	gf_boolean_t           use_afr_in_pump; +	char                   *locking_scheme;  } afr_private_t; diff --git a/xlators/mgmt/glusterd/src/glusterd-volume-set.c b/xlators/mgmt/glusterd/src/glusterd-volume-set.c index 1fea1442cc5..cb7d5a59d4e 100644 --- a/xlators/mgmt/glusterd/src/glusterd-volume-set.c +++ b/xlators/mgmt/glusterd/src/glusterd-volume-set.c @@ -2777,7 +2777,12 @@ struct volopt_map_entry glusterd_volopt_map[] = {            .op_version = GD_OP_VERSION_3_7_12,            .flags      = OPT_FLAG_CLIENT_OPT          }, - +        { .key        = "cluster.locking-scheme", +          .voltype    = "cluster/replicate", +          .type       = DOC, +          .op_version = GD_OP_VERSION_3_7_12, +          .flags      = OPT_FLAG_CLIENT_OPT +        },          { .key         = NULL          }  };  | 
