diff options
author | Ravishankar N <ravishankar@redhat.com> | 2016-01-11 12:58:16 +0000 |
---|---|---|
committer | Pranith Kumar Karampuri <pkarampu@redhat.com> | 2016-01-18 16:23:15 -0800 |
commit | 11d91f4e5fb66596addd8906b2f65a4137bd580a (patch) | |
tree | ba31089cce4963964c110c47bee0a03bf8afb10e /xlators/cluster | |
parent | 50ae3e67e4f294925fc840d3f83b77f7072af54d (diff) |
afr: skip healing data blocks for arbiter
Backport of http://review.gluster.org/12777
1 ....but still do other parts of data-self-heal like restoring the time
and undo pending xattrs.
2. Perform undo_pending inside inodelks.
3. If arbiter is the only sink, do these other parts of data-self-heal
inside a single lock-unlock sequence.
Change-Id: I64c9d5b594375f852bfb73dee02c66a9a67a7176
BUG: 1286169
Signed-off-by: Ravishankar N <ravishankar@redhat.com>
Reviewed-on: http://review.gluster.org/12778
Smoke: Gluster Build System <jenkins@build.gluster.com>
NetBSD-regression: NetBSD Build System <jenkins@build.gluster.org>
CentOS-regression: Gluster Build System <jenkins@build.gluster.com>
Reviewed-by: Pranith Kumar Karampuri <pkarampu@redhat.com>
Diffstat (limited to 'xlators/cluster')
-rw-r--r-- | xlators/cluster/afr/src/afr-self-heal-data.c | 58 |
1 files changed, 49 insertions, 9 deletions
diff --git a/xlators/cluster/afr/src/afr-self-heal-data.c b/xlators/cluster/afr/src/afr-self-heal-data.c index 200176e8d8d..429f3593bfc 100644 --- a/xlators/cluster/afr/src/afr-self-heal-data.c +++ b/xlators/cluster/afr/src/afr-self-heal-data.c @@ -371,15 +371,22 @@ afr_selfheal_data_do (call_frame_t *frame, xlator_t *this, fd_t *fd, int type = AFR_SELFHEAL_DATA_FULL; int ret = -1; call_frame_t *iter_frame = NULL; + unsigned char arbiter_sink_status = 0; priv = this->private; + if (priv->arbiter_count) { + arbiter_sink_status = healed_sinks[ARBITER_BRICK_INDEX]; + healed_sinks[ARBITER_BRICK_INDEX] = 0; + } type = afr_data_self_heal_type_get (priv, healed_sinks, source, replies); iter_frame = afr_copy_frame (frame); - if (!iter_frame) - return -ENOMEM; + if (!iter_frame) { + ret = -ENOMEM; + goto out; + } for (off = 0; off < replies[source].poststat.ia_size; off += block) { if (AFR_COUNT (healed_sinks, priv->child_count) == 0) { @@ -400,12 +407,12 @@ afr_selfheal_data_do (call_frame_t *frame, xlator_t *this, fd_t *fd, } } - afr_selfheal_data_restore_time (frame, this, fd->inode, source, - healed_sinks, replies); - ret = afr_selfheal_data_fsync (frame, this, fd, healed_sinks); out: + if (arbiter_sink_status) + healed_sinks[ARBITER_BRICK_INDEX] = arbiter_sink_status; + if (iter_frame) AFR_STACK_DESTROY (iter_frame); return ret; @@ -419,11 +426,17 @@ __afr_selfheal_truncate_sinks (call_frame_t *frame, xlator_t *this, { afr_local_t *local = NULL; afr_private_t *priv = NULL; + unsigned char arbiter_sink_status = 0; int i = 0; local = frame->local; priv = this->private; + if (priv->arbiter_count) { + arbiter_sink_status = healed_sinks[ARBITER_BRICK_INDEX]; + healed_sinks[ARBITER_BRICK_INDEX] = 0; + } + AFR_ONLIST (healed_sinks, frame, attr_cbk, ftruncate, fd, size, NULL); for (i = 0; i < priv->child_count; i++) @@ -432,6 +445,9 @@ __afr_selfheal_truncate_sinks (call_frame_t *frame, xlator_t *this, as successfully healed. Mark it so. */ healed_sinks[i] = 0; + + if (arbiter_sink_status) + healed_sinks[ARBITER_BRICK_INDEX] = arbiter_sink_status; return 0; } @@ -674,6 +690,7 @@ __afr_selfheal_data (call_frame_t *frame, xlator_t *this, fd_t *fd, struct afr_reply *locked_replies = NULL; int source = -1; gf_boolean_t did_sh = _gf_true; + gf_boolean_t is_arbiter_the_only_sink = _gf_false; priv = this->private; @@ -719,6 +736,13 @@ __afr_selfheal_data (call_frame_t *frame, xlator_t *this, fd_t *fd, goto unlock; } + if (priv->arbiter_count && + AFR_COUNT (healed_sinks, priv->child_count) == 1 && + healed_sinks[ARBITER_BRICK_INDEX]) { + is_arbiter_the_only_sink = _gf_true; + goto restore_time; + } + ret = __afr_selfheal_truncate_sinks (frame, this, fd, healed_sinks, locked_replies[source].poststat.ia_size); if (ret < 0) @@ -739,11 +763,27 @@ unlock: ret = afr_selfheal_data_do (frame, this, fd, source, healed_sinks, locked_replies); if (ret) - goto out; + goto out; +restore_time: + afr_selfheal_data_restore_time (frame, this, fd->inode, source, + healed_sinks, locked_replies); - ret = afr_selfheal_undo_pending (frame, this, fd->inode, sources, sinks, - healed_sinks, AFR_DATA_TRANSACTION, - locked_replies, data_lock); + if (!is_arbiter_the_only_sink) { + ret = afr_selfheal_inodelk (frame, this, fd->inode, this->name, + 0, 0, data_lock); + if (ret < AFR_SH_MIN_PARTICIPANTS) { + ret = -ENOTCONN; + did_sh = _gf_false; + goto skip_undo_pending; + } + } + ret = afr_selfheal_undo_pending (frame, this, fd->inode, + sources, sinks, healed_sinks, + AFR_DATA_TRANSACTION, + locked_replies, data_lock); +skip_undo_pending: + afr_selfheal_uninodelk (frame, this, fd->inode, this->name, 0, 0, + data_lock); out: if (did_sh) |