diff options
| author | Ashish Pandey <aspandey@redhat.com> | 2015-05-13 14:48:42 +0530 | 
|---|---|---|
| committer | Pranith Kumar Karampuri <pkarampu@redhat.com> | 2015-08-25 10:48:24 -0700 | 
| commit | 116e3dd3d7687a785b0d04a8afd11619d85ff4ec (patch) | |
| tree | 294205137003ef980d079c23ef48478f178cfe24 | |
| parent | 36349fa250ace6109002dfa41305d9dcd54ce0a9 (diff) | |
 glusterd : Display status of Self Heal Daemon for disperse volume
 Problem : Status of Self Heal Daemon is not
 displayed in "gluster volume status"
 As disperse volumes are self heal compatible,
 show the status of self heal daemon in gluster
 volume status command
Change-Id: I83d3e6a2fd122b171f15cfd76ce8e6b6e00f92e2
BUG: 1217311
Signed-off-by: Ashish Pandey <aspandey@redhat.com>
Reviewed-on: http://review.gluster.org/10764
Reviewed-by: Xavier Hernandez <xhernandez@datalab.es>
Tested-by: Gluster Build System <jenkins@build.gluster.com>
Reviewed-by: Pranith Kumar Karampuri <pkarampu@redhat.com>
| -rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-op-sm.c | 38 | 
1 files changed, 22 insertions, 16 deletions
diff --git a/xlators/mgmt/glusterd/src/glusterd-op-sm.c b/xlators/mgmt/glusterd/src/glusterd-op-sm.c index 0e10b985aa6..73121250f1e 100644 --- a/xlators/mgmt/glusterd/src/glusterd-op-sm.c +++ b/xlators/mgmt/glusterd/src/glusterd-op-sm.c @@ -35,6 +35,7 @@  #include "glusterd-volgen.h"  #include "glusterd-locks.h"  #include "glusterd-messages.h" +#include "glusterd-utils.h"  #include "syscall.h"  #include "cli1-xdr.h"  #include "common-utils.h" @@ -46,7 +47,7 @@  #include "glusterd-nfs-svc.h"  #include "glusterd-quotad-svc.h"  #include "glusterd-server-quorum.h" - +#include "glusterd-volgen.h"  #include <sys/types.h>  #include <signal.h>  #include <sys/wait.h> @@ -1537,13 +1538,14 @@ glusterd_op_stage_status_volume (dict_t *dict, char **op_errstr)          char                   msg[2048]      = {0,};          char                  *volname        = NULL;          char                  *brick          = NULL; +        char                  *shd_key        = NULL;          xlator_t              *this           = NULL;          glusterd_conf_t       *priv           = NULL;          glusterd_brickinfo_t  *brickinfo      = NULL;          glusterd_volinfo_t    *volinfo        = NULL;          dict_t                *vol_opts       = NULL;          gf_boolean_t           nfs_disabled   = _gf_false; -        gf_boolean_t           shd_enabled    = _gf_true; +        gf_boolean_t           shd_enabled    = _gf_false;          GF_ASSERT (dict);          this = THIS; @@ -1616,17 +1618,18 @@ glusterd_op_stage_status_volume (dict_t *dict, char **op_errstr)                          goto out;                  }          } else if ((cmd & GF_CLI_STATUS_SHD) != 0) { -                if (!glusterd_is_volume_replicate (volinfo)) { +                if (glusterd_is_shd_compatible_volume (volinfo)) { +                        shd_key = volgen_get_shd_key(volinfo); +                        shd_enabled = dict_get_str_boolean (vol_opts, +                                                     shd_key, +                                                     _gf_true); +                } else {                          ret = -1;                          snprintf (msg, sizeof (msg), -                                  "Volume %s is not of type replicate", -                                  volname); +                              "Volume %s is not Self-heal compatible", +                              volname);                          goto out;                  } - -                shd_enabled = dict_get_str_boolean (vol_opts, -                                                    "cluster.self-heal-daemon", -                                                    _gf_true);                  if (!shd_enabled) {                          ret = -1;                          snprintf (msg, sizeof (msg), @@ -3061,13 +3064,14 @@ glusterd_op_status_volume (dict_t *dict, char **op_errstr,          uint32_t                cmd             = 0;          char                   *volname         = NULL;          char                   *brick           = NULL; +        char                   *shd_key         = NULL;          xlator_t               *this            = NULL;          glusterd_volinfo_t     *volinfo         = NULL;          glusterd_brickinfo_t   *brickinfo       = NULL;          glusterd_conf_t        *priv            = NULL;          dict_t                 *vol_opts        = NULL;          gf_boolean_t            nfs_disabled    = _gf_false; -        gf_boolean_t            shd_enabled     = _gf_true; +        gf_boolean_t            shd_enabled     = _gf_false;          gf_boolean_t            origin_glusterd = _gf_false;          this = THIS; @@ -3233,12 +3237,13 @@ glusterd_op_status_volume (dict_t *dict, char **op_errstr,                                  other_count++;                                  node_count++;                          } - -                        shd_enabled = dict_get_str_boolean -                                        (vol_opts, "cluster.self-heal-daemon", -                                         _gf_true); -                        if (glusterd_is_volume_replicate (volinfo) -                            && shd_enabled) { +                        if (glusterd_is_shd_compatible_volume (volinfo)) { +                                shd_key = volgen_get_shd_key (volinfo); +                                shd_enabled = dict_get_str_boolean (vol_opts, +                                                     shd_key, +                                                     _gf_true); +                        } +                        if (shd_enabled) {                                  ret = glusterd_add_node_to_dict                                          (priv->shd_svc.name, rsp_dict,                                           other_index, vol_opts); @@ -3248,6 +3253,7 @@ glusterd_op_status_volume (dict_t *dict, char **op_errstr,                                  node_count++;                                  other_index++;                          } +                          if (glusterd_is_volume_quota_enabled (volinfo)) {                                  ret = glusterd_add_node_to_dict                                                          (priv->quotad_svc.name,  | 
