diff options
| -rw-r--r-- | tests/basic/tier/new-tier-cmds.t | 22 | ||||
| -rw-r--r-- | xlators/cluster/dht/src/dht-common.h | 13 | ||||
| -rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-messages.h | 10 | ||||
| -rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-mgmt.c | 2 | ||||
| -rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-tier.c | 224 | ||||
| -rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-tierd-svc.c | 65 | ||||
| -rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-utils.c | 10 | ||||
| -rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-utils.h | 3 | ||||
| -rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-volume-ops.c | 17 | 
9 files changed, 183 insertions, 183 deletions
diff --git a/tests/basic/tier/new-tier-cmds.t b/tests/basic/tier/new-tier-cmds.t index d341e62dc51..2c48e027b1b 100644 --- a/tests/basic/tier/new-tier-cmds.t +++ b/tests/basic/tier/new-tier-cmds.t @@ -14,9 +14,9 @@ function check_peers {  }  function create_dist_tier_vol () { -        TEST $CLI_1 volume create $V0 $H1:$B1/${V0} $H2:$B2/${V0} $H3:$B3/${V0} +        TEST $CLI_1 volume create $V0 disperse 6 redundancy 2 $H1:$B1/${V0}_b1 $H2:$B2/${V0}_b2 $H3:$B3/${V0}_b3 $H1:$B1/${V0}_b4 $H2:$B2/${V0}_b5 $H3:$B3/${V0}_b6          TEST $CLI_1 volume start $V0 -        TEST $CLI_1 volume tier $V0 attach $H1:$B1/${V0}_h1 $H2:$B2/${V0}_h2 $H3:$B3/${V0}_h3 +        TEST $CLI_1 volume tier $V0 attach replica 2 $H1:$B1/${V0}_h1 $H2:$B2/${V0}_h2 $H3:$B3/${V0}_h3 $H1:$B1/${V0}_h4 $H2:$B2/${V0}_h5 $H3:$B3/${V0}_h6  }  function tier_daemon_status { @@ -59,8 +59,19 @@ EXPECT "Tier command failed" $CLI_1 volume tier $V0 detach status  EXPECT "0" detach_xml_status -#after starting detach tier the detach tier status should display the status +#kill a node +TEST kill_node 2 +#check if we have the rest of the node available printed in the output of detach status +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" tier_status_node_down + +TEST $glusterd_2; + +EXPECT_WITHIN $PROBE_TIMEOUT 2 check_peers; + +#after starting detach tier the detach tier status should display the status +sleep 2 +$CLI_1 volume status  TEST $CLI_1 volume tier $V0 detach start  EXPECT "1" detach_xml_status @@ -73,14 +84,11 @@ TEST kill_node 2  #check if we have the rest of the node available printed in the output of detach status  EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" tier_detach_status_node_down -#check if we have the rest of the node available printed in the output of tier status -EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" tier_status_node_down -  TEST $glusterd_2;  EXPECT_WITHIN $PROBE_TIMEOUT 2 check_peers;  # Make sure we check that the *bricks* are up and not just the node.  >:-( -EXPECT_WITHIN $CHILD_UP_TIMEOUT 1 brick_up_status_1 $V0 $H2 $B2/${V0} +EXPECT_WITHIN $CHILD_UP_TIMEOUT 1 brick_up_status_1 $V0 $H2 $B2/${V0}_b2  EXPECT_WITHIN $CHILD_UP_TIMEOUT 1 brick_up_status_1 $V0 $H2 $B2/${V0}_h2  # Parsing normal output doesn't work because of line-wrap issues on our diff --git a/xlators/cluster/dht/src/dht-common.h b/xlators/cluster/dht/src/dht-common.h index 10c2e2089b8..d396c1d8173 100644 --- a/xlators/cluster/dht/src/dht-common.h +++ b/xlators/cluster/dht/src/dht-common.h @@ -405,10 +405,17 @@ enum gf_defrag_type {          GF_DEFRAG_CMD_PAUSE_TIER = 1 + 9,          GF_DEFRAG_CMD_RESUME_TIER = 1 + 10,          GF_DEFRAG_CMD_DETACH_STATUS = 1 + 11, -        GF_DEFRAG_CMD_DETACH_START = 1 + 12, -        GF_DEFRAG_CMD_DETACH_STOP = 1 + 13, +        GF_DEFRAG_CMD_STOP_TIER = 1 + 12, +        GF_DEFRAG_CMD_DETACH_START = 1 + 13, +        GF_DEFRAG_CMD_DETACH_COMMIT = 1 + 14, +        GF_DEFRAG_CMD_DETACH_COMMIT_FORCE = 1 + 15, +        GF_DEFRAG_CMD_DETACH_STOP = 1 + 16,          /* new labels are used so it will help -         * while removing old labels by easily differentiating +         * while removing old labels by easily differentiating. +         * A few labels are added so that the count remains same +         * between this enum and the ones on the xdr file. +         * different values for the same enum cause errors and +         * confusion.           */  };  typedef enum gf_defrag_type gf_defrag_type; diff --git a/xlators/mgmt/glusterd/src/glusterd-messages.h b/xlators/mgmt/glusterd/src/glusterd-messages.h index cf84cd30f11..a112d72d822 100644 --- a/xlators/mgmt/glusterd/src/glusterd-messages.h +++ b/xlators/mgmt/glusterd/src/glusterd-messages.h @@ -41,7 +41,7 @@  #define GLUSTERD_COMP_BASE      GLFS_MSGID_GLUSTERD -#define GLFS_NUM_MESSAGES       613 +#define GLFS_NUM_MESSAGES       614  #define GLFS_MSGID_END          (GLUSTERD_COMP_BASE + GLFS_NUM_MESSAGES + 1)  /* Messaged with message IDs */ @@ -4953,6 +4953,14 @@   */  #define GD_MSG_CHANGELOG_GET_FAIL (GLUSTERD_COMP_BASE + 613) +/*! + * @messageid + * @diagnosis + * @recommendedaction + * + */ +#define GD_MSG_MANAGER_FUNCTION_FAILED  (GLUSTERD_COMP_BASE + 614) +  /*------------*/  #define glfs_msg_end_x GLFS_MSGID_END, "Invalid: End of messages" diff --git a/xlators/mgmt/glusterd/src/glusterd-mgmt.c b/xlators/mgmt/glusterd/src/glusterd-mgmt.c index 3fb90766082..a1596f0944d 100644 --- a/xlators/mgmt/glusterd/src/glusterd-mgmt.c +++ b/xlators/mgmt/glusterd/src/glusterd-mgmt.c @@ -539,8 +539,6 @@ gd_mgmt_v3_post_validate_fn (glusterd_op_t op, int32_t op_ret, dict_t *dict,                                  goto out;                          } -                        volinfo->is_tier_enabled = _gf_true; -                          if (ret) {                                  gf_msg (this->name, GF_LOG_ERROR, errno,                                          GD_MSG_DICT_SET_FAILED, "dict set " diff --git a/xlators/mgmt/glusterd/src/glusterd-tier.c b/xlators/mgmt/glusterd/src/glusterd-tier.c index f9a1d35803d..5df87fdfa33 100644 --- a/xlators/mgmt/glusterd/src/glusterd-tier.c +++ b/xlators/mgmt/glusterd/src/glusterd-tier.c @@ -244,116 +244,6 @@ glusterd_handle_tier (rpcsvc_request_t *req)          return glusterd_big_locked_handler (req, __glusterd_handle_tier);  } - -static int -glusterd_manage_tier (glusterd_volinfo_t *volinfo, int opcode) -{ -        int              ret   = -1; -        xlator_t         *this = NULL; -        glusterd_conf_t  *priv = NULL; - -        this = THIS; -        GF_VALIDATE_OR_GOTO (THIS->name, this, out); -        GF_VALIDATE_OR_GOTO (this->name, volinfo, out); -        priv = this->private; -        GF_VALIDATE_OR_GOTO (this->name, priv, out); - -        switch (opcode) { -        case GF_DEFRAG_CMD_START_TIER: -        case GF_DEFRAG_CMD_STOP_TIER: -                ret = volinfo->tierd.svc.manager (&(volinfo->tierd.svc), -                                                volinfo, PROC_START_NO_WAIT); -                break; -        default: -                ret = 0; -                break; -        } - -out: -        return ret; - -} - -static int -glusterd_tier_enable (glusterd_volinfo_t *volinfo, char **op_errstr) -{ -        int32_t                 ret                     = -1; -        xlator_t                *this                   = NULL; -        int32_t                 tier_online             = -1; -        char                    pidfile[PATH_MAX]       = {0}; -        int32_t                 pid                     = -1; -        glusterd_conf_t         *priv                   = NULL; - -        this = THIS; - -        GF_VALIDATE_OR_GOTO (THIS->name, this, out); -        GF_VALIDATE_OR_GOTO (this->name, volinfo, out); -        GF_VALIDATE_OR_GOTO (this->name, op_errstr, out); -        priv = this->private; -        GF_VALIDATE_OR_GOTO (this->name, priv, out); - -        if (glusterd_is_volume_started (volinfo) == 0) { -                *op_errstr = gf_strdup ("Volume is stopped, start volume " -                                        "to enable tier."); -                ret = -1; -                goto out; -        } - -        GLUSTERD_GET_TIER_PID_FILE(pidfile, volinfo, priv); -        tier_online = gf_is_service_running (pidfile, &pid); - -        if (tier_online) { -                *op_errstr = gf_strdup ("tier is already enabled"); -                ret = -1; -                goto out; -        } - -        volinfo->is_tier_enabled = _gf_true; - -        ret = 0; -out: -        if (ret && op_errstr && !*op_errstr) -                gf_asprintf (op_errstr, "Enabling tier on volume %s has been " -                             "unsuccessful", volinfo->volname); -        return ret; -} - -static int -glusterd_tier_disable (glusterd_volinfo_t *volinfo, char **op_errstr) -{ -        int32_t                 ret                     = -1; -        xlator_t                *this                   = NULL; -        int32_t                 tier_online             = -1; -        char                    pidfile[PATH_MAX]       = {0}; -        int32_t                 pid                     = -1; -        glusterd_conf_t         *priv                   = NULL; - -        this = THIS; - -        GF_VALIDATE_OR_GOTO (THIS->name, this, out); -        GF_VALIDATE_OR_GOTO (this->name, volinfo, out); -        GF_VALIDATE_OR_GOTO (this->name, op_errstr, out); -        priv = this->private; - -        GLUSTERD_GET_TIER_PID_FILE(pidfile, volinfo, priv); -        tier_online = gf_is_service_running (pidfile, &pid); - -        if (!tier_online) { -                *op_errstr = gf_strdup ("tier is already disabled"); -                ret = -1; -                goto out; -        } - -        volinfo->is_tier_enabled = _gf_false; - -        ret = 0; -out: -        if (ret && op_errstr && !*op_errstr) -                gf_asprintf (op_errstr, "Disabling tier volume %s has " -                             "been unsuccessful", volinfo->volname); -        return ret; -} -  int  glusterd_op_remove_tier_brick (dict_t *dict, char **op_errstr, dict_t *rsp_dict)  { @@ -455,6 +345,19 @@ glusterd_op_remove_tier_brick (dict_t *dict, char **op_errstr, dict_t *rsp_dict)                                          continue;                                  brickinfo->decommissioned = 0;                          } +                        volinfo->tier.op = GD_OP_DETACH_NOT_STARTED; +                        ret = volinfo->tierd.svc.manager (&(volinfo->tierd.svc), +                                                          volinfo, +                                                          PROC_START_NO_WAIT); +                        if (ret) { +                                gf_msg (this->name, GF_LOG_ERROR, 0, +                                        GD_MSG_MANAGER_FUNCTION_FAILED, +                                        "Calling manager for tier " +                                        "failed on volume: %s for " +                                        "detach stop", volinfo->volname); +                                goto out; +                        } +                          ret = glusterd_create_volfiles_and_notify_services                                  (volinfo); @@ -473,22 +376,24 @@ glusterd_op_remove_tier_brick (dict_t *dict, char **op_errstr, dict_t *rsp_dict)                                          "failed to store volinfo");                                  goto out;                          } -                        ret = glusterd_tierdsvc_restart (); -                        if (ret) { -                                gf_msg (this->name, GF_LOG_ERROR, 0, -                                        GD_MSG_TIERD_START_FAIL, -                                        "Couldn't restart tierd for " -                                        "vol: %s", volinfo->volname); -                                goto out; -                        } - -                        volinfo->tier.op = GD_OP_DETACH_NOT_STARTED;                          ret = 0;                          goto out;          case GF_DEFRAG_CMD_DETACH_START: +                        volinfo->tier.op = GD_OP_DETACH_TIER; +                        svc = &(volinfo->tierd.svc); +                        ret = svc->manager (svc, volinfo, +                                        PROC_START_NO_WAIT); +                        if (ret) { +                                gf_msg (this->name, GF_LOG_ERROR, 0, +                                        GD_MSG_MANAGER_FUNCTION_FAILED, +                                        "calling manager for tier " +                                        "failed on volume: %s for " +                                        "detach start", volname); +                                goto out; +                        }                          ret = dict_get_str (dict, GF_REMOVE_BRICK_TID_KEY,                                              &task_id_str);                          if (ret) { @@ -510,8 +415,6 @@ glusterd_op_remove_tier_brick (dict_t *dict, char **op_errstr, dict_t *rsp_dict)                          }                          force = 0; -                        volinfo->tier.op = GD_OP_DETACH_TIER; -                        volinfo->tier.defrag_status = GF_DEFRAG_STATUS_STARTED;                          break;          case GF_DEFRAG_CMD_DETACH_COMMIT: @@ -529,6 +432,19 @@ glusterd_op_remove_tier_brick (dict_t *dict, char **op_errstr, dict_t *rsp_dict)                          /* Fall through */          case GF_DEFRAG_CMD_DETACH_COMMIT_FORCE: +                        if (cmd == GF_DEFRAG_CMD_DETACH_COMMIT_FORCE) { +                                svc = &(volinfo->tierd.svc); +                                ret = svc->manager (svc, volinfo, +                                                PROC_START_NO_WAIT); +                                if (ret) { +                                        gf_msg (this->name, GF_LOG_ERROR, 0, +                                                GD_MSG_MANAGER_FUNCTION_FAILED, +                                                "calling manager for tier " +                                                "failed on volume: %s for " +                                                "commit force", volname); +                                        goto out; +                                } +                        }                          glusterd_op_perform_detach_tier (volinfo);                          detach_commit = 1; @@ -707,11 +623,6 @@ glusterd_op_remove_tier_brick (dict_t *dict, char **op_errstr, dict_t *rsp_dict)          if (cmd == GF_DEFRAG_CMD_DETACH_START &&                          volinfo->status == GLUSTERD_STATUS_STARTED) { -                svc = &(volinfo->tierd.svc); -                ret = svc->reconfigure (volinfo); -                if (ret) -                        goto out; -                  ret = glusterd_svcs_reconfigure ();                  if (ret) {                          gf_msg (this->name, GF_LOG_WARNING, 0, @@ -780,6 +691,7 @@ glusterd_op_tier_start_stop (dict_t *dict, char **op_errstr, dict_t *rsp_dict)          glusterd_conf_t         *priv                   = NULL;          int32_t                 pid                     = -1;          char                    pidfile[PATH_MAX]       = {0}; +        int                     is_force                = 0;          this = THIS;          GF_VALIDATE_OR_GOTO (THIS->name, this, out); @@ -821,24 +733,48 @@ glusterd_op_tier_start_stop (dict_t *dict, char **op_errstr, dict_t *rsp_dict)          if (!retval)                  goto out; +        if (glusterd_is_volume_started (volinfo) == 0) { +                *op_errstr = gf_strdup ("Volume is stopped, start " +                                "volume to enable/disable tier."); +                ret = -1; +                goto out; +        } + +        GLUSTERD_GET_TIER_PID_FILE(pidfile, volinfo, priv); +          switch (cmd) {          case GF_DEFRAG_CMD_START_TIER: -                GLUSTERD_GET_TIER_PID_FILE(pidfile, volinfo, priv);                  /* we check if its running and skip so that we dont get a                   * failure during force start                   */ -                if (gf_is_service_running (pidfile, &pid)) -                        goto out; -                ret = glusterd_tier_enable (volinfo, op_errstr); -                if (ret < 0) -                        goto out; -                glusterd_store_perform_node_state_store (volinfo); +                ret = dict_get_int32 (dict, "force", &is_force); +                if (ret) { +                        gf_msg_debug (this->name, 0, "Unable to get is_force" +                                        " from dict"); +                } +                ret = dict_set_int32 (volinfo->dict, "force", is_force); +                if (ret) { +                        gf_msg_debug (this->name, errno, "Unable to set" +                                        " is_force to dict"); +                } + +                if (!is_force) { +                        if (gf_is_service_running (pidfile, &pid)) { +                                gf_asprintf (op_errstr, "Tier is already " +                                             "enabled on volume %s." , +                                             volinfo->volname); +                                goto out; +                        } +                } +                  break;          case GF_DEFRAG_CMD_STOP_TIER: -                ret = glusterd_tier_disable (volinfo, op_errstr); -                if (ret < 0) +                if (!gf_is_service_running (pidfile, &pid)) { +                        gf_asprintf (op_errstr, "Tier is alreaady disabled on " +                                     "volume %s.", volinfo->volname);                          goto out; +                }                  break;          default:                  gf_asprintf (op_errstr, "tier command failed. Invalid " @@ -847,7 +783,8 @@ glusterd_op_tier_start_stop (dict_t *dict, char **op_errstr, dict_t *rsp_dict)                  goto out;          } -        ret = glusterd_manage_tier (volinfo, cmd); +        ret = volinfo->tierd.svc.manager (&(volinfo->tierd.svc), +                                          volinfo, PROC_START_NO_WAIT);          if (ret)                  goto out; @@ -984,6 +921,19 @@ glusterd_op_stage_tier (dict_t *dict, char **op_errstr, dict_t *rsp_dict)                                  "start validate failed");                          goto out;                  } +                if (volinfo->tier.op == GD_OP_DETACH_TIER) { +                        snprintf (msg, sizeof (msg), "A detach tier task " +                                  "exists for volume %s. Either commit it" +                                  " or stop it before starting a new task.", +                                  volinfo->volname); +                        gf_msg (this->name, GF_LOG_ERROR, 0, +                                GD_MSG_OLD_REMOVE_BRICK_EXISTS, +                                "Earlier detach-tier" +                                " task exists for volume %s.", +                                volinfo->volname); +                        ret = -1; +                        goto out; +                }                  break;          case GF_DEFRAG_CMD_STOP_TIER: diff --git a/xlators/mgmt/glusterd/src/glusterd-tierd-svc.c b/xlators/mgmt/glusterd/src/glusterd-tierd-svc.c index bb2cbff6e93..378ecdb7a0d 100644 --- a/xlators/mgmt/glusterd/src/glusterd-tierd-svc.c +++ b/xlators/mgmt/glusterd/src/glusterd-tierd-svc.c @@ -72,6 +72,8 @@ glusterd_tierdsvc_init (void *data)          notify = glusterd_svc_common_rpc_notify;          glusterd_store_perform_node_state_store (volinfo); +        volinfo->type = GF_CLUSTER_TYPE_TIER; +          glusterd_svc_build_tierd_rundir (volinfo, rundir, sizeof (rundir));          glusterd_svc_create_rundir (rundir); @@ -150,6 +152,7 @@ glusterd_tierdsvc_manager (glusterd_svc_t *svc, void *data, int flags)          int                 ret     = 0;          xlator_t           *this    = THIS;          glusterd_volinfo_t *volinfo = NULL; +        int                 is_force = 0;          volinfo = data;          GF_VALIDATE_OR_GOTO (this->name, data, out); @@ -169,25 +172,29 @@ glusterd_tierdsvc_manager (glusterd_svc_t *svc, void *data, int flags)                  }          } -        ret = glusterd_is_tierd_enabled (volinfo); -        if (ret == -1) { -                gf_msg (this->name, GF_LOG_ERROR, 0, -                        GD_MSG_VOLINFO_GET_FAIL, "Failed to read volume " -                        "options"); -                goto out; +        ret = dict_get_int32 (volinfo->dict, "force", &is_force); +        if (ret) { +                gf_msg_debug (this->name, errno, "Unable to get" +                              " is_force from dict");          } +        if (is_force) +                ret = 1; +        else +                ret = (glusterd_is_tierd_supposed_to_be_enabled (volinfo)); +          if (ret) {                  if (!glusterd_is_volume_started (volinfo)) {                          if (glusterd_proc_is_running (&svc->proc)) {                                  ret = svc->stop (svc, SIGTERM);                                  if (ret)                                          gf_msg (this->name, GF_LOG_ERROR, 0, -                                                GD_MSG_TIERD_STOP_FAIL, +                                                GD_MSG_SNAPD_STOP_FAIL,                                                  "Couldn't stop tierd for "                                                  "volume: %s",                                                  volinfo->volname);                          } else { +                                /* Since tierd is not running set ret to 0 */                                  ret = 0;                          }                          goto out; @@ -209,6 +216,7 @@ glusterd_tierdsvc_manager (glusterd_svc_t *svc, void *data, int flags)                                  "tierd for volume: %s", volinfo->volname);                          goto out;                  } +                volinfo->is_tier_enabled = _gf_true;                  glusterd_volinfo_ref (volinfo);                  ret = glusterd_conn_connect (&(svc->conn)); @@ -216,16 +224,19 @@ glusterd_tierdsvc_manager (glusterd_svc_t *svc, void *data, int flags)                          glusterd_volinfo_unref (volinfo);                          goto out;                  } - -        } else if (glusterd_proc_is_running (&svc->proc)) { -                ret = svc->stop (svc, SIGTERM); -                if (ret) { -                        gf_msg (this->name, GF_LOG_ERROR, 0, -                                GD_MSG_TIERD_STOP_FAIL, -                                "Couldn't stop tierd for volume: %s", -                                volinfo->volname); -                        goto out; +        } else { +                if (glusterd_proc_is_running (&svc->proc)) { +                        ret = svc->stop (svc, SIGTERM); +                        if (ret) { +                                gf_msg (this->name, GF_LOG_ERROR, 0, +                                        GD_MSG_TIERD_STOP_FAIL, +                                        "Couldn't stop tierd for volume: %s", +                                        volinfo->volname); +                                goto out; +                        } +                        volinfo->is_tier_enabled = _gf_false;                  } +                ret = 0;          }  out: @@ -362,7 +373,6 @@ out:          return ret;  } -  int  glusterd_tierdsvc_restart ()  { @@ -380,15 +390,18 @@ glusterd_tierdsvc_restart ()          cds_list_for_each_entry (volinfo, &conf->volumes, vol_list) {                  /* Start per volume tierd svc */                  if (volinfo->status == GLUSTERD_STATUS_STARTED && -                    glusterd_is_tierd_enabled (volinfo)) { +                    volinfo->type == GF_CLUSTER_TYPE_TIER) {                          svc = &(volinfo->tierd.svc); -                        ret = svc->manager (svc, volinfo, PROC_START_NO_WAIT); -                        if (ret) { -                                gf_msg (this->name, GF_LOG_ERROR, 0, -                                        GD_MSG_TIERD_START_FAIL, -                                        "Couldn't restart tierd for " -                                        "vol: %s", volinfo->volname); -                                goto out; +                           if (volinfo->tier.op != GD_OP_DETACH_TIER) { +                                ret = svc->manager (svc, volinfo, +                                                PROC_START_NO_WAIT); +                                if (ret) { +                                        gf_msg (this->name, GF_LOG_ERROR, 0, +                                                GD_MSG_TIERD_START_FAIL, +                                                "Couldn't restart tierd for " +                                                "vol: %s", volinfo->volname); +                                        goto out; +                                }                          }                  }          } @@ -419,7 +432,7 @@ glusterd_tierdsvc_reconfigure (void *data)          this = THIS;          GF_VALIDATE_OR_GOTO (THIS->name, this, out); -        if (glusterd_is_tierd_enabled (volinfo)) +        if (!glusterd_is_tierd_enabled (volinfo))                  goto manager;          /*           * Check both OLD and NEW volfiles, if they are SAME by size diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.c b/xlators/mgmt/glusterd/src/glusterd-utils.c index fe96d6be094..066d2f72f81 100644 --- a/xlators/mgmt/glusterd/src/glusterd-utils.c +++ b/xlators/mgmt/glusterd/src/glusterd-utils.c @@ -12174,6 +12174,16 @@ glusterd_is_volume_inode_quota_enabled (glusterd_volinfo_t *volinfo)  }  int +glusterd_is_tierd_supposed_to_be_enabled (glusterd_volinfo_t *volinfo) +{ +        if ((volinfo->type != GF_CLUSTER_TYPE_TIER) || +                        (volinfo->tier.op == GD_OP_DETACH_TIER)) +                return _gf_false; +        else +                return _gf_true; +} + +int  glusterd_is_tierd_enabled (glusterd_volinfo_t *volinfo)  {          return volinfo->is_tier_enabled; diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.h b/xlators/mgmt/glusterd/src/glusterd-utils.h index e76ee63edf1..6111ea1100f 100644 --- a/xlators/mgmt/glusterd/src/glusterd-utils.h +++ b/xlators/mgmt/glusterd/src/glusterd-utils.h @@ -628,6 +628,9 @@ int  glusterd_is_tierd_enabled (glusterd_volinfo_t *volinfo);  int +glusterd_is_tierd_supposed_to_be_enabled (glusterd_volinfo_t *volinfo); + +int  glusterd_is_volume_quota_enabled (glusterd_volinfo_t *volinfo);  int diff --git a/xlators/mgmt/glusterd/src/glusterd-volume-ops.c b/xlators/mgmt/glusterd/src/glusterd-volume-ops.c index 0db91a30fff..a87dfc39eb7 100644 --- a/xlators/mgmt/glusterd/src/glusterd-volume-ops.c +++ b/xlators/mgmt/glusterd/src/glusterd-volume-ops.c @@ -2729,6 +2729,16 @@ glusterd_stop_volume (glusterd_volinfo_t *volinfo)                  }          } +        /* call tier manager before the voluem status is set as stopped +         * as tier uses that as a check in the manager +         * */ +        if (volinfo->type == GF_CLUSTER_TYPE_TIER) { +                svc = &(volinfo->tierd.svc); +                ret = svc->manager (svc, volinfo, PROC_START_NO_WAIT); +                if (ret) +                        goto out; +        } +          glusterd_set_volume_status (volinfo, GLUSTERD_STATUS_STOPPED);          ret = glusterd_store_volinfo (volinfo, GLUSTERD_VOLINFO_VER_AC_INCREMENT); @@ -2746,13 +2756,6 @@ glusterd_stop_volume (glusterd_volinfo_t *volinfo)                          goto out;          } -        if (volinfo->type == GF_CLUSTER_TYPE_TIER) { -                svc = &(volinfo->tierd.svc); -                ret = svc->manager (svc, volinfo, PROC_START_NO_WAIT); -                if (ret) -                        goto out; -        } -          ret = glusterd_svcs_manager (volinfo);          if (ret) {                  gf_msg (this->name, GF_LOG_ERROR, 0,  | 
