diff options
| author | Raghavendra Bhat <raghavendra@redhat.com> | 2014-06-19 15:51:39 +0530 | 
|---|---|---|
| committer | Kaushal M <kaushal@redhat.com> | 2014-06-30 22:30:58 -0700 | 
| commit | c6f040524d75011c44dcc9afdfef80c60c78f7f7 (patch) | |
| tree | 8c1dd960693a98e07088d7026c7e98b7d8b8c8c2 /xlators | |
| parent | 2417de9c37d83e36567551dc682bb23f851fd2d7 (diff) | |
mgmt/glusterd: display snapd status as part of volume status
* Made changes to save the port used by snapd in the info file for the volume
  i.e. <glusterd-working-directory>/vols/<volname>/info
This is how the gluster volume status of a volume would look like for which the
uss feature is enabled.
[root@tatooine ~]# gluster volume status vol
Status of volume: vol
Gluster process                                         Port    Online  Pid
------------------------------------------------------------------------------
Brick tatooine:/export1/vol                             49155   Y       5041
Snapshot Daemon on localhost                            49156   Y       5080
NFS Server on localhost                                 2049    Y       5087
Task Status of Volume vol
------------------------------------------------------------------------------
There are no active volume tasks
Change-Id: I8f3e5d7d764a728497c2a5279a07486317bd7c6d
BUG: 1111041
Signed-off-by: Raghavendra Bhat <raghavendra@redhat.com>
Reviewed-on: http://review.gluster.org/8114
Tested-by: Gluster Build System <jenkins@build.gluster.com>
Reviewed-by: Kaushal M <kaushal@redhat.com>
Diffstat (limited to 'xlators')
| -rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-handler.c | 10 | ||||
| -rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-op-sm.c | 59 | ||||
| -rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-rpc-ops.c | 1 | ||||
| -rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-store.c | 213 | ||||
| -rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-store.h | 1 | ||||
| -rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-utils.c | 67 | ||||
| -rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-utils.h | 4 | ||||
| -rw-r--r-- | xlators/mgmt/glusterd/src/glusterd.h | 9 | 
8 files changed, 358 insertions, 6 deletions
diff --git a/xlators/mgmt/glusterd/src/glusterd-handler.c b/xlators/mgmt/glusterd/src/glusterd-handler.c index 9d952cec4d7..ef9888b3537 100644 --- a/xlators/mgmt/glusterd/src/glusterd-handler.c +++ b/xlators/mgmt/glusterd/src/glusterd-handler.c @@ -3830,6 +3830,16 @@ __glusterd_handle_status_volume (rpcsvc_request_t *req)                  goto out;          } +        if ((cmd & GF_CLI_STATUS_SNAPD) && +            (conf->op_version < GD_OP_VERSION_3_6_0)) { +                snprintf (err_str, sizeof (err_str), "The cluster is operating " +                          "at a lesser version than %d. Getting the status of " +                          "snapd is not allowed in this state", +                          GD_OP_VERSION_3_6_0); +                ret = -1; +                goto out; +        } +          ret = glusterd_op_begin_synctask (req, GD_OP_STATUS_VOLUME, dict);  out: diff --git a/xlators/mgmt/glusterd/src/glusterd-op-sm.c b/xlators/mgmt/glusterd/src/glusterd-op-sm.c index c9776366715..7174a9376de 100644 --- a/xlators/mgmt/glusterd/src/glusterd-op-sm.c +++ b/xlators/mgmt/glusterd/src/glusterd-op-sm.c @@ -1262,6 +1262,16 @@ glusterd_op_stage_status_volume (dict_t *dict, char **op_errstr)                  goto out;          } +        if ((cmd & GF_CLI_STATUS_SNAPD) && +            (priv->op_version < GD_OP_VERSION_3_6_0)) { +                snprintf (msg, sizeof (msg), "The cluster is operating at " +                          "version less than %d. Getting the " +                          "status of snapd is not allowed in this state.", +                          GD_OP_VERSION_3_6_0); +                ret = -1; +                goto out; +        } +          ret = dict_get_str (dict, "volname", &volname);          if (ret) {                  gf_log (this->name, GF_LOG_ERROR, "Unable to get volume name"); @@ -1325,6 +1335,13 @@ glusterd_op_stage_status_volume (dict_t *dict, char **op_errstr)                                    "quota enabled", volname);                          goto out;                  } +        } else if ((cmd & GF_CLI_STATUS_SNAPD) != 0) { +                if (!glusterd_is_snapd_enabled (volinfo)) { +                        ret = -1; +                        snprintf (msg, sizeof (msg), "Volume %s does not have " +                                  "uss enabled", volname); +                        goto out; +                }          } else if ((cmd & GF_CLI_STATUS_BRICK) != 0) {                  ret = dict_get_str (dict, "brick", &brick);                  if (ret) @@ -2661,7 +2678,13 @@ glusterd_op_status_volume (dict_t *dict, char **op_errstr,                          goto out;                  other_count++;                  node_count++; - +        } else if ((cmd & GF_CLI_STATUS_SNAPD) != 0) { +                ret = glusterd_add_node_to_dict ("snapd", rsp_dict, 0, +                                                 vol_opts); +                if (ret) +                        goto out; +                other_count++; +                node_count++;          } else if ((cmd & GF_CLI_STATUS_BRICK) != 0) {                  ret = dict_get_str (dict, "brick", &brick);                  if (ret) @@ -2708,6 +2731,16 @@ glusterd_op_status_volume (dict_t *dict, char **op_errstr,                  if ((cmd & GF_CLI_STATUS_MASK) == GF_CLI_STATUS_NONE) {                          other_index = brick_index + 1; +                        if (glusterd_is_snapd_enabled (volinfo)) { +                                ret = glusterd_add_snapd_to_dict (volinfo, +                                                                  rsp_dict, +                                                                  other_index); +                                if (ret) +                                        goto out; +                                other_count++; +                                other_index++; +                                node_count++; +                        }                          nfs_disabled = dict_get_str_boolean (vol_opts,                                                               "nfs.disable", @@ -5717,6 +5750,7 @@ glusterd_bricks_select_status_volume (dict_t *dict, char **op_errstr,          glusterd_pending_node_t *pending_node = NULL;          xlator_t                *this = NULL;          glusterd_conf_t         *priv = NULL; +        glusterd_snapd_t        *snapd = NULL;          GF_ASSERT (dict); @@ -5743,6 +5777,7 @@ glusterd_bricks_select_status_volume (dict_t *dict, char **op_errstr,          case GF_CLI_STATUS_NFS:          case GF_CLI_STATUS_SHD:          case GF_CLI_STATUS_QUOTAD: +        case GF_CLI_STATUS_SNAPD:                  break;          default:                  goto out; @@ -5843,6 +5878,28 @@ glusterd_bricks_select_status_volume (dict_t *dict, char **op_errstr,                  list_add_tail (&pending_node->list, selected);                  ret = 0; +        } else if ((cmd & GF_CLI_STATUS_SNAPD) != 0) { +                if (!glusterd_is_snapd_online (volinfo)) { +                        gf_log (this->name, GF_LOG_ERROR, "snapd is not " +                                "running"); +                        ret = -1; +                        goto out; +                } +                pending_node = GF_CALLOC (1, sizeof (*pending_node), +                                          gf_gld_mt_pending_node_t); +                if (!pending_node) { +                        gf_log (this->name, GF_LOG_ERROR, "failed to allocate " +                                "memory for pending node"); +                        ret = -1; +                        goto out; +                } + +                pending_node->node = (void *)(&volinfo->snapd); +                pending_node->type = GD_NODE_SNAPD; +                pending_node->index = 0; +                list_add_tail (&pending_node->list, selected); + +                ret = 0;          } else {                  list_for_each_entry (brickinfo, &volinfo->bricks, brick_list) {                          brick_index++; diff --git a/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c b/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c index 7c7be9355b8..10d5d7f2752 100644 --- a/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c +++ b/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c @@ -1886,6 +1886,7 @@ glusterd_brick_op (call_frame_t *frame, xlator_t *this,                  if ((pending_node->type == GD_NODE_NFS) ||                      (pending_node->type == GD_NODE_QUOTAD) || +                    (pending_node->type == GD_NODE_SNAPD) ||                      ((pending_node->type == GD_NODE_SHD) &&                       (req_ctx->op == GD_OP_STATUS_VOLUME)))                          ret = glusterd_node_op_build_payload diff --git a/xlators/mgmt/glusterd/src/glusterd-store.c b/xlators/mgmt/glusterd/src/glusterd-store.c index 839d77b7f8d..0177ed169fc 100644 --- a/xlators/mgmt/glusterd/src/glusterd-store.c +++ b/xlators/mgmt/glusterd/src/glusterd-store.c @@ -131,6 +131,24 @@ glusterd_store_brickinfopath_set (glusterd_volinfo_t *volinfo,          snprintf (brickpath, len, "%s/%s", brickdirpath, brickfname);  } +static void +glusterd_store_snapd_path_set (glusterd_volinfo_t *volinfo, +                               char *snapd_path, size_t len) +{ +        char                    volpath[PATH_MAX] = {0, }; +        glusterd_conf_t         *priv = NULL; + +        GF_ASSERT (volinfo); +        GF_ASSERT (len >= PATH_MAX); + +        priv = THIS->private; +        GF_ASSERT (priv); + +        GLUSTERD_GET_VOLUME_DIR (volpath, volinfo, priv); + +        snprintf (snapd_path, len, "%s/snapd.info", volpath); +} +  gf_boolean_t  glusterd_store_is_valid_brickpath (char *volname, char *brick)  { @@ -249,6 +267,21 @@ glusterd_store_create_brick_shandle_on_absence (glusterd_volinfo_t *volinfo,          return ret;  } +int32_t +glusterd_store_create_snapd_shandle_on_absence (glusterd_volinfo_t *volinfo) +{ +        char                    snapd_path[PATH_MAX] = {0,}; +        int32_t                 ret = 0; + +        GF_ASSERT (volinfo); + +        glusterd_store_snapd_path_set (volinfo, snapd_path, +                                       sizeof (snapd_path)); +        ret = gf_store_handle_create_on_absence (&volinfo->snapd.handle, +                                                 snapd_path); +        return ret; +} +  /* Store the bricks snapshot details only if required   *   * The snapshot details will be stored only if the cluster op-version is @@ -353,6 +386,30 @@ out:  }  int32_t +glusterd_store_snapd_write (int fd, glusterd_volinfo_t *volinfo) +{ +        char                    value[256] = {0,}; +        int32_t                 ret        = 0; +        xlator_t               *this       = NULL; + +        GF_ASSERT (volinfo); +        GF_ASSERT (fd > 0); + +        this = THIS; +        GF_ASSERT (this); + +        snprintf (value, sizeof(value), "%d", volinfo->snapd.port); +        ret = gf_store_save_value (fd, GLUSTERD_STORE_KEY_SNAPD_PORT, value); +        if (ret) +                gf_log (this->name, GF_LOG_ERROR, "failed to store the snapd " +                        "port of volume %s", volinfo->volname); + + +        gf_log (this->name, GF_LOG_DEBUG, "Returning %d", ret); +        return ret; +} + +int32_t  glusterd_store_perform_brick_store (glusterd_brickinfo_t *brickinfo)  {          int                         fd = -1; @@ -377,6 +434,42 @@ out:  }  int32_t +glusterd_store_perform_snapd_store (glusterd_volinfo_t *volinfo) +{ +        int                         fd  = -1; +        int32_t                     ret = -1; +        xlator_t                  *this = NULL; + +        GF_ASSERT (volinfo); + +        this = THIS; +        GF_ASSERT (this); + +        fd = gf_store_mkstemp (volinfo->snapd.handle); +        if (fd <= 0) { +                gf_log (this->name, GF_LOG_ERROR, "failed to create the " +                        "temporary file for the snapd store handle of volume " +                        "%s", volinfo->volname); +                goto out; +        } + +        ret = glusterd_store_snapd_write (fd, volinfo); +        if (ret) { +                gf_log (this->name, GF_LOG_ERROR, "failed to write snapd port " +                        "info to store handle (volume: %s", volinfo->volname); +                goto out; +        } + +        ret = gf_store_rename_tmppath (volinfo->snapd.handle); + +out: +        if (ret && (fd > 0)) +                gf_store_unlink_tmppath (volinfo->snapd.handle); +        gf_log (THIS->name, GF_LOG_DEBUG, "Returning %d", ret); +        return ret; +} + +int32_t  glusterd_store_brickinfo (glusterd_volinfo_t *volinfo,                            glusterd_brickinfo_t *brickinfo, int32_t brick_count,                            int vol_fd) @@ -407,6 +500,37 @@ out:  }  int32_t +glusterd_store_snapd_info (glusterd_volinfo_t *volinfo) +{ +        int32_t                 ret  = -1; +        xlator_t               *this = NULL; + +        GF_ASSERT (volinfo); + +        this = THIS; +        GF_ASSERT (this); + +        ret = glusterd_store_create_snapd_shandle_on_absence (volinfo); +        if (ret) { +                gf_log (this->name, GF_LOG_ERROR, "failed to create store " +                        "handle for snapd (volume: %s)", volinfo->volname); +                goto out; +        } + +        ret = glusterd_store_perform_snapd_store (volinfo); +        if (ret) +                gf_log (this->name, GF_LOG_ERROR, "failed to store snapd info " +                        "of the volume %s", volinfo->volname); + +out: +        if (ret) +                gf_store_unlink_tmppath (volinfo->snapd.handle); + +        gf_log (this->name, GF_LOG_DEBUG, "Returning with %d", ret); +        return ret; +} + +int32_t  glusterd_store_delete_brick (glusterd_brickinfo_t *brickinfo, char *delete_path)  {          int32_t                 ret = -1; @@ -654,6 +778,11 @@ glusterd_volume_write_snap_details (int fd, glusterd_volinfo_t *volinfo)                  goto out;          } +        ret = glusterd_store_snapd_info (volinfo); +        if (ret) +                gf_log (this->name, GF_LOG_ERROR, "snapd info store failed " +                        "volume: %s", volinfo->volname); +  out:          if (ret)                  gf_log (this->name, GF_LOG_ERROR, "Failed to write snap details" @@ -1329,6 +1458,8 @@ glusterd_store_volume_cleanup_tmp (glusterd_volinfo_t *volinfo)          gf_store_unlink_tmppath (volinfo->rb_shandle);          gf_store_unlink_tmppath (volinfo->node_state_shandle); + +        gf_store_unlink_tmppath (volinfo->snapd.handle);  }  int32_t @@ -1948,6 +2079,81 @@ out:          return ret;  } +int +glusterd_store_retrieve_snapd (glusterd_volinfo_t *volinfo) +{ +        int                     ret                     = -1; +        int                     exists                  = 0; +        char                    *key                    = NULL; +        char                    *value                  = NULL; +        char                    volpath[PATH_MAX]       = {0,}; +        char                    path[PATH_MAX]          = {0,}; +        xlator_t                *this                   = NULL; +        glusterd_conf_t         *conf                   = NULL; +        gf_store_iter_t         *iter                   = NULL; +        gf_store_op_errno_t     op_errno                = GD_STORE_SUCCESS; + +        this = THIS; +        GF_ASSERT (this); +        conf = THIS->private; +        GF_ASSERT (volinfo); + +        if (conf->op_version < GD_OP_VERSION_3_6_0) { +                ret = 0; +                goto out; +        } + +        GLUSTERD_GET_VOLUME_DIR(volpath, volinfo, conf); + +        snprintf (path, sizeof (path), "%s/%s", volpath, +                  GLUSTERD_VOLUME_SNAPD_INFO_FILE); + +        ret = gf_store_handle_retrieve (path, &volinfo->snapd.handle); +        if (ret) { +                gf_log (this->name, GF_LOG_ERROR, "volinfo handle is NULL"); +                goto out; +        } + +        ret = gf_store_iter_new (volinfo->snapd.handle, &iter); +        if (ret) { +                gf_log (this->name, GF_LOG_ERROR, "Failed to get new store " +                        "iter"); +                goto out; +        } + +        ret = gf_store_iter_get_next (iter, &key, &value, &op_errno); +        if (ret) { +                gf_log (this->name, GF_LOG_ERROR, "Failed to get next store " +                        "iter"); +                goto out; +        } + +        while (!ret) { +                if (!strncmp (key, GLUSTERD_STORE_KEY_SNAPD_PORT, +                              strlen (GLUSTERD_STORE_KEY_SNAPD_PORT))) { +                        volinfo->snapd.port = atoi (value); +                } + +                ret = gf_store_iter_get_next (iter, &key, &value, +                                              &op_errno); +        } + +        if (op_errno != GD_STORE_EOF) +                goto out; + +        ret = gf_store_iter_destroy (iter); +        if (ret) { +                gf_log (this->name, GF_LOG_ERROR, "Failed to destroy store " +                        "iter"); +                goto out; +        } + +        ret = 0; + +out: +        return ret; +} +  int32_t  glusterd_store_retrieve_bricks (glusterd_volinfo_t *volinfo)  { @@ -2452,7 +2658,8 @@ glusterd_store_update_volinfo (glusterd_volinfo_t *volinfo)                                          "failed to parse restored snap's uuid");                  } else if (!strncmp (key, GLUSTERD_STORE_KEY_PARENT_VOLNAME,                                  strlen (GLUSTERD_STORE_KEY_PARENT_VOLNAME))) { -                        strncpy (volinfo->parent_volname, value, sizeof(volinfo->parent_volname) - 1); +                        strncpy (volinfo->parent_volname, value, +                                 sizeof(volinfo->parent_volname) - 1);                  } else {                          if (is_key_glusterd_hooks_friendly (key)) { @@ -2598,6 +2805,10 @@ glusterd_store_retrieve_volume (char *volname, glusterd_snap_t *snap)          if (ret)                  goto out; +        ret = glusterd_store_retrieve_snapd (volinfo); +        if (ret) +                goto out; +          ret = glusterd_compute_cksum (volinfo, _gf_false);          if (ret)                  goto out; diff --git a/xlators/mgmt/glusterd/src/glusterd-store.h b/xlators/mgmt/glusterd/src/glusterd-store.h index ba3662bc7b2..4c0f0d42321 100644 --- a/xlators/mgmt/glusterd/src/glusterd-store.h +++ b/xlators/mgmt/glusterd/src/glusterd-store.h @@ -71,6 +71,7 @@ typedef enum glusterd_store_ver_ac_{  #define GLUSTERD_STORE_KEY_SNAP_MAX_HARD_LIMIT  "snap-max-hard-limit"  #define GLUSTERD_STORE_KEY_SNAP_AUTO_DELETE     "auto-delete"  #define GLUSTERD_STORE_KEY_SNAP_MAX_SOFT_LIMIT  "snap-max-soft-limit" +#define GLUSTERD_STORE_KEY_SNAPD_PORT           "snapd-port"  #define GLUSTERD_STORE_KEY_BRICK_HOSTNAME       "hostname"  #define GLUSTERD_STORE_KEY_BRICK_PATH           "path" diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.c b/xlators/mgmt/glusterd/src/glusterd-utils.c index 6ff444862a4..04a2e62ee65 100644 --- a/xlators/mgmt/glusterd/src/glusterd-utils.c +++ b/xlators/mgmt/glusterd/src/glusterd-utils.c @@ -5781,6 +5781,7 @@ glusterd_pending_node_get_rpc (glusterd_pending_node_t *pending_node)          glusterd_volinfo_t      *volinfo   = NULL;          nodesrv_t               *nfs       = NULL;          nodesrv_t               *quotad    = NULL; +        glusterd_snapd_t        *snapd     = NULL;          GF_VALIDATE_OR_GOTO (THIS->name, pending_node, out);          GF_VALIDATE_OR_GOTO (THIS->name, pending_node->node, out); @@ -5805,7 +5806,9 @@ glusterd_pending_node_get_rpc (glusterd_pending_node_t *pending_node)          } else if (pending_node->type == GD_NODE_QUOTAD) {                  quotad = pending_node->node;                  rpc = quotad->rpc; - +        } else if (pending_node->type == GD_NODE_SNAPD) { +                snapd = pending_node->node; +                rpc = quotad->rpc;          } else {                  GF_ASSERT (0);          } @@ -7495,6 +7498,67 @@ out:  }  int32_t +glusterd_add_snapd_to_dict (glusterd_volinfo_t *volinfo, +                            dict_t  *dict, int32_t count) +{ + +        int             ret                   = -1; +        int32_t         pid                   = -1; +        int32_t         brick_online          = -1; +        char            key[1024]             = {0}; +        char            base_key[1024]        = {0}; +        char            pidfile[PATH_MAX]     = {0}; +        xlator_t        *this                 = NULL; +        glusterd_conf_t *priv                 = NULL; + + +        GF_ASSERT (volinfo); +        GF_ASSERT (dict); + +        this = THIS; +        GF_ASSERT (this); + +        priv = this->private; + +        snprintf (base_key, sizeof (base_key), "brick%d", count); +        snprintf (key, sizeof (key), "%s.hostname", base_key); +        ret = dict_set_str (dict, key, "Snap Daemon"); +        if (ret) +                goto out; + +        snprintf (key, sizeof (key), "%s.path", base_key); +        ret = dict_set_dynstr (dict, key, gf_strdup (uuid_utoa (MY_UUID))); +        if (ret) +                goto out; + +        memset (key, 0, sizeof (key)); +        snprintf (key, sizeof (key), "%s.port", base_key); +        ret = dict_set_int32 (dict, key, volinfo->snapd.port); +        if (ret) +                goto out; + +        glusterd_get_snapd_pidfile (volinfo, pidfile, sizeof (pidfile)); + +        brick_online = gf_is_service_running (pidfile, &pid); + +        memset (key, 0, sizeof (key)); +        snprintf (key, sizeof (key), "%s.pid", base_key); +        ret = dict_set_int32 (dict, key, pid); +        if (ret) +                goto out; + +        memset (key, 0, sizeof (key)); +        snprintf (key, sizeof (key), "%s.status", base_key); +        ret = dict_set_int32 (dict, key, brick_online); + +out: +        if (ret) +                gf_log (this->name, GF_LOG_DEBUG, "Returning %d", ret); + +        return ret; +} + +int32_t  glusterd_get_all_volnames (dict_t *dict)  {          int                    ret        = -1; @@ -13497,6 +13561,7 @@ glusterd_handle_snapd_option (glusterd_volinfo_t *volinfo)                                  volinfo->volname);                          goto out;                  } +                volinfo->snapd.port = 0;          }  out: diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.h b/xlators/mgmt/glusterd/src/glusterd-utils.h index 320bc20cdd2..3edb0c55db4 100644 --- a/xlators/mgmt/glusterd/src/glusterd-utils.h +++ b/xlators/mgmt/glusterd/src/glusterd-utils.h @@ -442,6 +442,10 @@ glusterd_add_brick_to_dict (glusterd_volinfo_t *volinfo,                              dict_t  *dict, int32_t count);  int32_t +glusterd_add_snapd_to_dict (glusterd_volinfo_t *volinfo, +                            dict_t  *dict, int32_t count); + +int32_t  glusterd_get_all_volnames (dict_t *dict);  gf_boolean_t diff --git a/xlators/mgmt/glusterd/src/glusterd.h b/xlators/mgmt/glusterd/src/glusterd.h index b8e8933258a..a8ecb505a5b 100644 --- a/xlators/mgmt/glusterd/src/glusterd.h +++ b/xlators/mgmt/glusterd/src/glusterd.h @@ -123,9 +123,10 @@ typedef struct {  } nodesrv_t;  typedef struct { -        struct rpc_clnt  *rpc; -        int               port; -        gf_boolean_t      online; +        struct rpc_clnt   *rpc; +        int                port; +        gf_boolean_t       online; +        gf_store_handle_t *handle;  } glusterd_snapd_t;  typedef struct { @@ -426,6 +427,7 @@ typedef enum gd_node_type_ {          GD_NODE_REBALANCE,          GD_NODE_NFS,          GD_NODE_QUOTAD, +        GD_NODE_SNAPD,  } gd_node_type;  typedef enum missed_snap_stat { @@ -466,6 +468,7 @@ enum glusterd_vol_comp_status_ {  #define GLUSTERD_VOLUME_DIR_PREFIX "vols"  #define GLUSTERD_PEER_DIR_PREFIX "peers"  #define GLUSTERD_VOLUME_INFO_FILE "info" +#define GLUSTERD_VOLUME_SNAPD_INFO_FILE "snapd.info"  #define GLUSTERD_SNAP_INFO_FILE "info"  #define GLUSTERD_VOLUME_RBSTATE_FILE "rbstate"  #define GLUSTERD_BRICK_INFO_DIR "bricks"  | 
