summaryrefslogtreecommitdiffstats
path: root/xlators/features/bit-rot/src/bitd/bit-rot.c
diff options
context:
space:
mode:
authorVenky Shankar <vshankar@redhat.com>2015-05-21 19:55:02 +0530
committerVijay Bellur <vbellur@redhat.com>2015-05-28 04:19:27 -0700
commit6fc12a43f3e20ea24969e5a3c949f7c27d9c1893 (patch)
tree08d2525bb859464ae7de2c534363f76db878f144 /xlators/features/bit-rot/src/bitd/bit-rot.c
parente878b0bcbaa19e46517e44284685ef99b885117b (diff)
features/bitrot: reimplement scrubbing frequency
This patch reimplments existing scrub-frequency mechanism used to schedule scrubber runs. Existing mechanism uses periodic sleeps (waking up periodically on minimum granularity) and performing a number of tracking checks based on counters and sleep times. This patch does away with all the nifty counters and uses timer-wheel to schedule scrub runs. Scheduling changes are peformed by merely calculating the new expiry time and calling mod_timer() [mod_timer_pending() in some cases] making the code more debuggable and easier to follow. This also introduces "hourly" scrubbing tunable as an aid for testing scrubbing during development/testing cycle. One could also implement on-demand scrubbing with ease: by invoking mod_timer() with an expiry of one (1) second, thereby scheduling a scrub run the very next second. Change-Id: I6c7c5f0c6c9f886bf574d88c04cde14b76e60a8b BUG: 1224596 Signed-off-by: Venky Shankar <vshankar@redhat.com> Reviewed-on: http://review.gluster.org/10893 Reviewed-by: Gaurav Kumar Garg <ggarg@redhat.com> Tested-by: NetBSD Build System Reviewed-by: Vijay Bellur <vbellur@redhat.com>
Diffstat (limited to 'xlators/features/bit-rot/src/bitd/bit-rot.c')
-rw-r--r--xlators/features/bit-rot/src/bitd/bit-rot.c70
1 files changed, 61 insertions, 9 deletions
diff --git a/xlators/features/bit-rot/src/bitd/bit-rot.c b/xlators/features/bit-rot/src/bitd/bit-rot.c
index 2652f02b4ea..e7cfe89e1dd 100644
--- a/xlators/features/bit-rot/src/bitd/bit-rot.c
+++ b/xlators/features/bit-rot/src/bitd/bit-rot.c
@@ -1135,6 +1135,11 @@ br_enact_scrubber (xlator_t *this, br_child_t *child)
INIT_LIST_HEAD (&fsscan->queued);
INIT_LIST_HEAD (&fsscan->ready);
+ /* init scheduler related variables */
+ fsscan->kick = _gf_false;
+ pthread_mutex_init (&fsscan->wakelock, NULL);
+ pthread_cond_init (&fsscan->wakecond, NULL);
+
ret = gf_thread_create (&child->thread, NULL, br_fsscanner, child);
if (ret != 0) {
gf_log (this->name, GF_LOG_ALERT, "failed to spawn bitrot "
@@ -1142,6 +1147,10 @@ br_enact_scrubber (xlator_t *this, br_child_t *child)
goto error_return;
}
+ ret = br_fsscan_schedule (this, child, fsscan, fsscrub);
+ if (ret)
+ goto error_return;
+
/**
* Everything has been setup.. add this subvolume to scrubbers
* list.
@@ -1407,13 +1416,6 @@ br_init_signer (xlator_t *this, br_private_t *priv)
if (ret)
goto out;
- priv->timer_wheel = glusterfs_global_timer_wheel (this);
- if (!priv->timer_wheel) {
- gf_log (this->name, GF_LOG_ERROR,
- "global timer wheel unavailable");
- goto out;
- }
-
pthread_cond_init (&priv->object_cond, NULL);
priv->obj_queue = GF_CALLOC (1, sizeof (*priv->obj_queue),
@@ -1568,6 +1570,13 @@ init (xlator_t *this)
INIT_LIST_HEAD (&priv->children[i].list);
INIT_LIST_HEAD (&priv->bricks);
+ priv->timer_wheel = glusterfs_global_timer_wheel (this);
+ if (!priv->timer_wheel) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "global timer wheel unavailable");
+ goto cleanup_mutex;
+ }
+
this->private = priv;
if (!priv->iamscrubber) {
@@ -1633,12 +1642,55 @@ fini (xlator_t *this)
int
reconfigure (xlator_t *this, dict_t *options)
{
- br_private_t *priv = this->private;
+ int i = 0;
+ int32_t ret = -1;
+ br_child_t *child = NULL;
+ br_private_t *priv = NULL;
+ struct br_scanfs *fsscan = NULL;
+ struct br_scrubber *fsscrub = NULL;
+
+ priv = this->private;
if (!priv->iamscrubber)
return 0;
- return br_scrubber_handle_options (this, priv, options);
+ ret = br_scrubber_handle_options (this, priv, options);
+ if (ret)
+ goto err;
+
+ fsscrub = &priv->fsscrub;
+
+ /* reschedule all _up_ subvolume(s) */
+ pthread_mutex_lock (&priv->lock);
+ {
+ for (; i < priv->child_count; i++) {
+ child = &priv->children[i];
+ if (!child->child_up) {
+ gf_log (this->name, GF_LOG_INFO,
+ "Brick %s is offline, skipping "
+ "rescheduling (scrub would auto- "
+ "schedule when brick is back online).",
+ child->brick_path);
+ continue;
+ }
+
+ fsscan = &child->fsscan;
+ ret = br_fsscan_reschedule (this, child,
+ fsscan, fsscrub, _gf_true);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Could not "
+ "reschedule scrubber for brick: %s. "
+ "Scubbing will continue according to "
+ "old frequency.", child->brick_path);
+ }
+ }
+ }
+ pthread_mutex_unlock (&priv->lock);
+
+ return 0;
+
+ err:
+ return -1;
}
struct xlator_fops fops;