diff options
| -rw-r--r-- | cli/src/cli-cmd-parser.c | 16 | ||||
| -rw-r--r-- | cli/src/cli-cmd-volume.c | 28 | ||||
| -rw-r--r-- | cli/src/cli-rpc-ops.c | 8 | ||||
| -rw-r--r-- | heal/src/glfs-heal.c | 180 | ||||
| -rw-r--r-- | rpc/rpc-lib/src/protocol-common.h | 2 | ||||
| -rw-r--r-- | tests/basic/afr/granular-esh/add-brick.t | 2 | ||||
| -rw-r--r-- | tests/basic/afr/granular-esh/cli.t | 142 | ||||
| -rw-r--r-- | tests/basic/afr/granular-esh/conservative-merge.t | 4 | ||||
| -rw-r--r-- | tests/basic/afr/granular-esh/granular-esh.t | 2 | ||||
| -rw-r--r-- | tests/basic/afr/granular-esh/granular-indices-but-non-granular-heal.t | 4 | ||||
| -rw-r--r-- | tests/basic/afr/granular-esh/replace-brick.t | 2 | ||||
| -rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-op-sm.c | 19 | ||||
| -rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-volume-ops.c | 61 | 
13 files changed, 407 insertions, 63 deletions
diff --git a/cli/src/cli-cmd-parser.c b/cli/src/cli-cmd-parser.c index e0eb8cff413..8a446595e79 100644 --- a/cli/src/cli-cmd-parser.c +++ b/cli/src/cli-cmd-parser.c @@ -3611,7 +3611,8 @@ cli_cmd_volume_heal_options_parse (const char **words, int wordcount,          if (wordcount == 5) {                  if (strcmp (words[3], "info") && -                    strcmp (words[3], "statistics")) { +                    strcmp (words[3], "statistics") && +                    strcmp (words[3], "granular-entry-heal")) {                          ret = -1;                          goto out;                  } @@ -3641,6 +3642,19 @@ cli_cmd_volume_heal_options_parse (const char **words, int wordcount,                                  goto done;                          }                  } + +                if (!strcmp (words[3], "granular-entry-heal")) { +                        if (!strcmp (words[4], "enable")) { +                                ret = dict_set_int32 (dict, "heal-op", +                                          GF_SHD_OP_GRANULAR_ENTRY_HEAL_ENABLE); +                                goto done; +                        } else if (!strcmp (words[4], "disable")) { +                                ret = dict_set_int32 (dict, "heal-op", +                                         GF_SHD_OP_GRANULAR_ENTRY_HEAL_DISABLE); +                                goto done; +                        } +                } +                  ret = -1;                  goto out;          } diff --git a/cli/src/cli-cmd-volume.c b/cli/src/cli-cmd-volume.c index c721171f517..0d25279f381 100644 --- a/cli/src/cli-cmd-volume.c +++ b/cli/src/cli-cmd-volume.c @@ -2151,7 +2151,8 @@ cli_print_brick_status (cli_volume_status_t *status)                               (op == GF_SHD_OP_SBRAIN_HEAL_FROM_LATEST_MTIME) ||\                               (op == GF_SHD_OP_SBRAIN_HEAL_FROM_BRICK) ||      \                               (op == GF_SHD_OP_INDEX_SUMMARY) ||               \ -                             (op == GF_SHD_OP_SPLIT_BRAIN_FILES)) +                             (op == GF_SHD_OP_SPLIT_BRAIN_FILES) ||           \ +                             (op == GF_SHD_OP_GRANULAR_ENTRY_HEAL_ENABLE))  int  cli_launch_glfs_heal (int heal_op, dict_t *options) @@ -2200,6 +2201,10 @@ cli_launch_glfs_heal (int heal_op, dict_t *options)                          runner_add_args (&runner, "xml", NULL);                  }                  break; +        case GF_SHD_OP_GRANULAR_ENTRY_HEAL_ENABLE: +        case GF_SHD_OP_GRANULAR_ENTRY_HEAL_DISABLE: +                runner_add_args (&runner, "granular-entry-heal-op", NULL); +                break;          default:                  ret = -1;          } @@ -2211,11 +2216,11 @@ cli_launch_glfs_heal (int heal_op, dict_t *options)                  printf ("%s", out);          }          ret = runner_end (&runner); -        ret = WEXITSTATUS (ret);  out:          return ret;  } +  int  cli_cmd_volume_heal_cbk (struct cli_state *state, struct cli_cmd_word *word,                            const char **words, int wordcount) @@ -2252,19 +2257,19 @@ cli_cmd_volume_heal_cbk (struct cli_state *state, struct cli_cmd_word *word,                  goto out;          if (NEEDS_GLFS_HEAL (heal_op)) {                  ret = cli_launch_glfs_heal (heal_op, options); -                if (ret == -1) +                if (ret < 0) +                        goto out; +                if (heal_op != GF_SHD_OP_GRANULAR_ENTRY_HEAL_ENABLE)                          goto out;          } -        else { -                proc = &cli_rpc_prog->proctable[GLUSTER_CLI_HEAL_VOLUME]; -                CLI_LOCAL_INIT (local, words, frame, options); +        proc = &cli_rpc_prog->proctable[GLUSTER_CLI_HEAL_VOLUME]; -                if (proc->fn) { -                        ret = proc->fn (frame, THIS, options); -                } -        } +        CLI_LOCAL_INIT (local, words, frame, options); +        if (proc->fn) { +                ret = proc->fn (frame, THIS, options); +        }  out:          if (ret) {                  cli_cmd_sent_status_get (&sent); @@ -2673,7 +2678,8 @@ struct cli_cmd volume_cmds[] = {            "statistics [heal-count [replica <HOSTNAME:BRICKNAME>]] |"            "info [healed | heal-failed | split-brain] |"            "split-brain {bigger-file <FILE> | latest-mtime <FILE> |" -                       "source-brick <HOSTNAME:BRICKNAME> [<FILE>]}]", +                       "source-brick <HOSTNAME:BRICKNAME> [<FILE>]} |" +          "granular-entry-heal {enable | disable}]",            cli_cmd_volume_heal_cbk,            "self-heal commands on volume specified by <VOLNAME>"}, diff --git a/cli/src/cli-rpc-ops.c b/cli/src/cli-rpc-ops.c index d88ddd7f136..602d3ff1611 100644 --- a/cli/src/cli-rpc-ops.c +++ b/cli/src/cli-rpc-ops.c @@ -8670,6 +8670,14 @@ gf_cli_heal_volume_cbk (struct rpc_req *req, struct iovec *iov,                  operation   = "";                  heal_op_str = "Disable heal";                  break; +        case    GF_SHD_OP_GRANULAR_ENTRY_HEAL_ENABLE: +                operation   = ""; +                heal_op_str = "Enable granular entry heal"; +                break; +        case    GF_SHD_OP_GRANULAR_ENTRY_HEAL_DISABLE: +                operation   = ""; +                heal_op_str = "Disable granular entry heal"; +                break;          }          if (rsp.op_ret) { diff --git a/heal/src/glfs-heal.c b/heal/src/glfs-heal.c index 0a880cb752e..a306d8962fa 100644 --- a/heal/src/glfs-heal.c +++ b/heal/src/glfs-heal.c @@ -39,7 +39,7 @@ xmlDocPtr        glfsh_doc = NULL;                          ret = 0;                                \          } while (0)                                             \ -typedef void    (*print_status) (dict_t *, char *, uuid_t, uint64_t *, +typedef int    (*print_status) (dict_t *, char *, uuid_t, uint64_t *,                   gf_boolean_t flag);  int glfsh_heal_splitbrain_file (glfs_t *fs, xlator_t *top_subvol, @@ -65,6 +65,11 @@ int32_t is_xml;                    "source-brick <HOSTNAME:BRICKNAME> [<FILE>] | "\                    "split-brain-info]\n" +typedef enum { +        GLFSH_MODE_CONTINUE_ON_ERROR = 1, +        GLFSH_MODE_EXIT_ON_FIRST_FAILURE, +} glfsh_fail_mode_t; +  int  glfsh_init ()  { @@ -72,6 +77,30 @@ glfsh_init ()  }  int +glfsh_end_op_granular_entry_heal (int op_ret, char *op_errstr) +{ +        /* If error sting is available, give it higher precedence.*/ + +        if (op_errstr) { +                printf ("%s\n", op_errstr); +        } else if (op_ret < 0) { +                if (op_ret == -EAGAIN) +                        printf ("One or more entries need heal. Please execute " +                                "the command again after there are no entries " +                                "to be healed\n"); +                else if (op_ret == -ENOTCONN) +                        printf ("One or more bricks could be down. Please " +                                "execute the command again after bringing all " +                                "bricks online and finishing any pending " +                                "heals\n"); +                else +                        printf ("Command failed - %s. Please check the logs for" +                                " more details\n", strerror (-op_ret)); +        } +        return 0; +} + +int  glfsh_end (int op_ret, char *op_errstr)  {          if (op_errstr) @@ -87,6 +116,12 @@ glfsh_print_hr_spb_status (char *path, uuid_t gfid, char *status)  }  void +glfsh_no_print_hr_heal_status (char *path, uuid_t gfid, char *status) +{ +        return; +} + +void  glfsh_print_hr_heal_status (char *path, uuid_t gfid, char *status)  {          printf ("%s%s\n", path, status); @@ -291,6 +326,12 @@ out:  }  int +glfsh_no_print_hr_heal_op_status (int ret, uint64_t num_entries, char *fmt_str) +{ +        return 0; +} + +int  glfsh_print_hr_heal_op_status (int ret, uint64_t num_entries, char *fmt_str)  {          if (ret < 0 && num_entries == 0) { @@ -417,7 +458,7 @@ glfsh_index_purge (xlator_t *subvol, inode_t *inode, char *name)          return ret;  } -void +int  glfsh_print_spb_status (dict_t *dict, char *path, uuid_t gfid,                          uint64_t *num_entries, gf_boolean_t flag)  { @@ -429,7 +470,7 @@ glfsh_print_spb_status (dict_t *dict, char *path, uuid_t gfid,          ret = dict_get_str (dict, "heal-info", &value);          if (ret) -                return; +                return 0;          if (!strcmp (value, "split-brain")) {                  split_b = _gf_true; @@ -451,10 +492,10 @@ glfsh_print_spb_status (dict_t *dict, char *path, uuid_t gfid,                                                  gfid, NULL);                  }          } -        return; +        return 0;  } -void +int  glfsh_print_heal_status (dict_t *dict, char *path, uuid_t gfid,                           uint64_t *num_entries, gf_boolean_t ignore_dirty)  { @@ -466,7 +507,7 @@ glfsh_print_heal_status (dict_t *dict, char *path, uuid_t gfid,          ret = dict_get_str (dict, "heal-info", &value);          if (ret || (!strcmp (value, "no-heal"))) -                return; +                return 0;          if (!strcmp (value, "heal")) {                  ret = gf_asprintf (&status, " "); @@ -509,7 +550,7 @@ out:                  if (pending) {                          GF_FREE (status);                          status = NULL; -                        return; +                        return 0;                  }          }          if (ret == -1) @@ -522,7 +563,21 @@ out:                                           status ? status : "");          GF_FREE (status); -        return; +        return 0; +} + +int +glfsh_heal_status_boolean (dict_t *dict, char *path, uuid_t gfid, +                           uint64_t *num_entries, gf_boolean_t ignore_dirty) +{ +        int             ret             = 0; +        char            *value          = NULL; + +        ret = dict_get_str (dict, "heal-info", &value); +        if ((!ret) && (!strcmp (value, "no-heal"))) +                return 0; +        else +                return -1;  }  static int @@ -556,11 +611,12 @@ static int  glfsh_process_entries (xlator_t *xl, fd_t *fd, gf_dirent_t *entries,                         uint64_t *offset, uint64_t *num_entries,                         print_status glfsh_print_status, -                       gf_boolean_t ignore_dirty) +                       gf_boolean_t ignore_dirty, glfsh_fail_mode_t mode)  {          gf_dirent_t      *entry = NULL;          gf_dirent_t      *tmp = NULL;          int              ret = 0; +        int              print_status = 0;          char            *path = NULL;          uuid_t          gfid = {0};          xlator_t        *this = NULL; @@ -586,8 +642,13 @@ glfsh_process_entries (xlator_t *xl, fd_t *fd, gf_dirent_t *entries,                  gf_uuid_copy (loc.gfid, gfid);                  ret = syncop_getxattr (this, &loc, &dict, GF_HEAL_INFO, NULL,                                         NULL); -                if (ret) -                        continue; +                if (ret) { +                        if ((mode != GLFSH_MODE_CONTINUE_ON_ERROR) && +                            (ret == -ENOTCONN)) +                                goto out; +                        else +                                continue; +                }                  ret = syncop_gfid_to_path (this->itable, xl, gfid, &path); @@ -596,11 +657,19 @@ glfsh_process_entries (xlator_t *xl, fd_t *fd, gf_dirent_t *entries,                          ret = 0;                          continue;                  } -                if (dict) -                        glfsh_print_status (dict, path, gfid, -                                            num_entries, ignore_dirty); +                if (dict) { +                        print_status = glfsh_print_status (dict, path, gfid, +                                                           num_entries, +                                                           ignore_dirty); +                        if ((print_status) && +                            (mode != GLFSH_MODE_CONTINUE_ON_ERROR)) { +                                ret = -EAGAIN; +                                goto out; +                        } +                }          }          ret = 0; +out:          GF_FREE (path);          if (dict) {                  dict_unref (dict); @@ -615,17 +684,21 @@ glfsh_crawl_directory (glfs_t *fs, xlator_t *top_subvol, loc_t *rootloc,                         dict_t *xattr_req, uint64_t *num_entries,                         gf_boolean_t ignore)  { -        uint64_t        offset = 0; +        int             ret          = 0; +        int             heal_op      = -1; +        uint64_t        offset       = 0;          gf_dirent_t     entries; -        int             ret = 0;          gf_boolean_t    free_entries = _gf_false; -        int             heal_op = -1; +        glfsh_fail_mode_t mode = GLFSH_MODE_CONTINUE_ON_ERROR;          INIT_LIST_HEAD (&entries.list);          ret = dict_get_int32 (xattr_req, "heal-op", &heal_op);          if (ret)                  return ret; +        if (heal_op == GF_SHD_OP_GRANULAR_ENTRY_HEAL_ENABLE) +                mode = GLFSH_MODE_EXIT_ON_FIRST_FAILURE; +          while (1) {                  ret = syncop_readdir (readdir_xl, fd, 131072, offset, &entries,                                        NULL, NULL); @@ -642,7 +715,7 @@ glfsh_crawl_directory (glfs_t *fs, xlator_t *top_subvol, loc_t *rootloc,                                                       &entries, &offset,                                                       num_entries,                                                       glfsh_print_heal_status, -                                                     ignore); +                                                     ignore, mode);                          if (ret < 0)                                  goto out;                  } else if (heal_op == GF_SHD_OP_SPLIT_BRAIN_FILES) { @@ -650,13 +723,20 @@ glfsh_crawl_directory (glfs_t *fs, xlator_t *top_subvol, loc_t *rootloc,                                                       &entries, &offset,                                                       num_entries,                                                       glfsh_print_spb_status, -                                                     ignore); +                                                     ignore, mode);                          if (ret < 0)                                  goto out;                  } else if (heal_op == GF_SHD_OP_SBRAIN_HEAL_FROM_BRICK) {                          ret = glfsh_heal_entries (fs, top_subvol, rootloc,                                                    &entries, &offset,                                                    num_entries, xattr_req); +                } else if (heal_op == GF_SHD_OP_GRANULAR_ENTRY_HEAL_ENABLE) { +                        ret = glfsh_process_entries (readdir_xl, fd, &entries, +                                                     &offset, num_entries, +                                                     glfsh_heal_status_boolean, +                                                     ignore, mode); +                        if (ret < 0) +                                goto out;                  }                  gf_dirent_free (&entries);                  free_entries = _gf_false; @@ -669,6 +749,12 @@ out:  }  static int +glfsh_no_print_brick_from_xl (xlator_t *xl, loc_t *rootloc) +{ +        return 0; +} + +static int  glfsh_print_brick_from_xl (xlator_t *xl, loc_t *rootloc)  {          char    *remote_host = NULL; @@ -746,6 +832,13 @@ glfsh_print_pending_heals (glfs_t *fs, xlator_t *top_subvol, loc_t *rootloc,          if (ret)                  goto out; +        if ((!is_parent_replicate) && +            ((heal_op == GF_SHD_OP_GRANULAR_ENTRY_HEAL_ENABLE) || +             (heal_op == GF_SHD_OP_GRANULAR_ENTRY_HEAL_DISABLE))) { +                ret = 0; +                goto out; +        } +          ret = glfsh_output->print_brick_from_xl (xl, rootloc);          if (ret < 0)                  goto out; @@ -753,6 +846,10 @@ glfsh_print_pending_heals (glfs_t *fs, xlator_t *top_subvol, loc_t *rootloc,          ret = glfsh_print_pending_heals_type (fs, top_subvol, rootloc, xl,                                                heal_op, xattr_req,                                                GF_XATTROP_INDEX_GFID, &count); + +        if (ret < 0 && heal_op == GF_SHD_OP_GRANULAR_ENTRY_HEAL_ENABLE) +                goto out; +          total += count;          count = 0;          if (ret == -ENOTCONN) @@ -833,14 +930,14 @@ out:          return NULL;  } -  int  glfsh_gather_heal_info (glfs_t *fs, xlator_t *top_subvol, loc_t *rootloc,                          gf_xl_afr_op_t heal_op)  { -        xlator_t  *xl       = NULL; +        int        ret       = 0; +        xlator_t  *xl        = NULL;          xlator_t  *heal_xl   = NULL; -        xlator_t  *old_THIS = NULL; +        xlator_t  *old_THIS  = NULL;          xl = top_subvol;          while (xl->next) @@ -851,20 +948,28 @@ glfsh_gather_heal_info (glfs_t *fs, xlator_t *top_subvol, loc_t *rootloc,                          if (heal_xl) {                                  old_THIS = THIS;                                  THIS = heal_xl; -                                glfsh_print_pending_heals (fs, top_subvol, -                                                           rootloc, xl, -                                                           heal_op, -                                                           !strcmp -                                                           (heal_xl->type, -                                                           "cluster/replicate")); +                                ret = glfsh_print_pending_heals (fs, top_subvol, +                                                                 rootloc, xl, +                                                                 heal_op, +                                                                 !strcmp +                                                                (heal_xl->type, +                                                          "cluster/replicate"));                                  THIS = old_THIS; + +                                if ((ret < 0) && +                              (heal_op == GF_SHD_OP_GRANULAR_ENTRY_HEAL_ENABLE)) +                                        goto out;                          }                  }                  xl = xl->prev;          } -        return 0; +out: +        if (heal_op != GF_SHD_OP_GRANULAR_ENTRY_HEAL_ENABLE) +                ret = 0; + +        return ret;  }  int @@ -1102,6 +1207,15 @@ glfsh_info_t glfsh_human_readable = {          .end = glfsh_end  }; +glfsh_info_t glfsh_no_print = { +        .init = glfsh_init, +        .print_brick_from_xl = glfsh_no_print_brick_from_xl, +        .print_heal_op_status = glfsh_no_print_hr_heal_op_status, +        .print_heal_status = glfsh_no_print_hr_heal_status, +        .print_spb_status = glfsh_no_print_hr_heal_status, +        .end = glfsh_end_op_granular_entry_heal +}; +  #if (HAVE_LIB_XML)  glfsh_info_t glfsh_xml_output = {          .init = glfsh_xml_init, @@ -1145,6 +1259,8 @@ main (int argc, char **argv)                  } else if (!strcmp (argv[2], "xml")) {                          heal_op = GF_SHD_OP_INDEX_SUMMARY;                          is_xml = 1; +                } else if (!strcmp (argv[2], "granular-entry-heal-op")) { +                        heal_op = GF_SHD_OP_GRANULAR_ENTRY_HEAL_ENABLE;                  } else {                          printf (USAGE_STR, argv[0]);                          ret = -1; @@ -1201,6 +1317,9 @@ main (int argc, char **argv)          } +        if (heal_op == GF_SHD_OP_GRANULAR_ENTRY_HEAL_ENABLE) +                glfsh_output = &glfsh_no_print; +          ret = glfsh_output->init ();          if (ret)                  exit (EXIT_FAILURE); @@ -1277,6 +1396,7 @@ main (int argc, char **argv)          switch (heal_op) {          case GF_SHD_OP_INDEX_SUMMARY:          case GF_SHD_OP_SPLIT_BRAIN_FILES: +        case GF_SHD_OP_GRANULAR_ENTRY_HEAL_ENABLE:                  ret = glfsh_gather_heal_info (fs, top_subvol, &rootloc,                                                heal_op);                  break; @@ -1295,6 +1415,8 @@ main (int argc, char **argv)          }          glfsh_output->end (ret, NULL); +        if (ret < 0) +                ret = -ret;          loc_wipe (&rootloc);          glfs_subvol_done (fs, top_subvol);          cleanup (fs); diff --git a/rpc/rpc-lib/src/protocol-common.h b/rpc/rpc-lib/src/protocol-common.h index 915d358e707..8a178148a10 100644 --- a/rpc/rpc-lib/src/protocol-common.h +++ b/rpc/rpc-lib/src/protocol-common.h @@ -252,6 +252,8 @@ typedef enum {          GF_SHD_OP_HEAL_ENABLE,          GF_SHD_OP_HEAL_DISABLE,          GF_SHD_OP_SBRAIN_HEAL_FROM_LATEST_MTIME, +        GF_SHD_OP_GRANULAR_ENTRY_HEAL_ENABLE, +        GF_SHD_OP_GRANULAR_ENTRY_HEAL_DISABLE,  } gf_xl_afr_op_t ;  struct gf_gsync_detailed_status_ { diff --git a/tests/basic/afr/granular-esh/add-brick.t b/tests/basic/afr/granular-esh/add-brick.t index f3125d7fe7d..270cf1d32a6 100644 --- a/tests/basic/afr/granular-esh/add-brick.t +++ b/tests/basic/afr/granular-esh/add-brick.t @@ -14,7 +14,7 @@ TEST $CLI volume set $V0 cluster.data-self-heal off  TEST $CLI volume set $V0 cluster.metadata-self-heal off  TEST $CLI volume set $V0 cluster.entry-self-heal off  TEST $CLI volume set $V0 self-heal-daemon off -TEST $CLI volume set $V0 granular-entry-heal on +TEST $CLI volume heal $V0 granular-entry-heal enable  TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0 diff --git a/tests/basic/afr/granular-esh/cli.t b/tests/basic/afr/granular-esh/cli.t new file mode 100644 index 00000000000..a655180a095 --- /dev/null +++ b/tests/basic/afr/granular-esh/cli.t @@ -0,0 +1,142 @@ +#!/bin/bash + +. $(dirname $0)/../../../include.rc +. $(dirname $0)/../../../volume.rc +. $(dirname $0)/../../../afr.rc + +cleanup + +TESTS_EXPECTED_IN_LOOP=4 + +TEST glusterd +TEST pidof glusterd + +TEST   $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1} +# Test that enabling the option should work on a newly created volume +TEST   $CLI volume set $V0 cluster.granular-entry-heal on +TEST   $CLI volume set $V0 cluster.granular-entry-heal off + +######################### +##### DISPERSE TEST ##### +######################### +# Execute the same command on a disperse volume and make sure it fails. +TEST $CLI volume create $V1 disperse 3 redundancy 1 $H0:$B0/${V1}{0,1,2} +TEST $CLI volume start $V1 +TEST ! $CLI volume heal $V1 granular-entry-heal enable +TEST ! $CLI volume heal $V1 granular-entry-heal disable + +####################### +###### TIER TEST ###### +####################### +# Execute the same command on a disperse + replicate tiered volume and make +# sure the option is set on the replicate leg of the volume +TEST $CLI volume attach-tier $V1 replica 2 $H0:$B0/${V1}{3,4} +TEST $CLI volume heal $V1 granular-entry-heal enable +EXPECT "enable" volume_get_field $V1 cluster.granular-entry-heal +TEST $CLI volume heal $V1 granular-entry-heal disable +EXPECT "disable" volume_get_field $V1 cluster.granular-entry-heal + +# Kill a disperse brick and make heal be pending on the volume. +TEST kill_brick $V1 $H0 $B0/${V1}0 + +# Now make sure that one offline brick in disperse does not affect enabling the +# option on the volume. +TEST $CLI volume heal $V1 granular-entry-heal enable +EXPECT "enable" volume_get_field $V1 cluster.granular-entry-heal +TEST $CLI volume heal $V1 granular-entry-heal disable +EXPECT "disable" volume_get_field $V1 cluster.granular-entry-heal + +# Now kill a replicate brick. +TEST kill_brick $V1 $H0 $B0/${V1}3 +# Now make sure that one offline brick in replicate causes the command to be +# failed. +TEST ! $CLI volume heal $V1 granular-entry-heal enable +EXPECT "disable" volume_get_field $V1 cluster.granular-entry-heal + +###################### +### REPLICATE TEST ### +###################### +TEST   $CLI volume start $V0 +TEST   $CLI volume set $V0 cluster.data-self-heal off +TEST   $CLI volume set $V0 cluster.metadata-self-heal off +TEST   $CLI volume set $V0 cluster.entry-self-heal off +TEST   $CLI volume set $V0 self-heal-daemon off +# Test that the volume-set way of enabling the option is disallowed +TEST ! $CLI volume set $V0 granular-entry-heal on +# Test that the volume-heal way of enabling the option is allowed +TEST   $CLI volume heal $V0 granular-entry-heal enable +# Volume-reset of the option should be allowed +TEST   $CLI volume reset $V0 granular-entry-heal +TEST   $CLI volume heal $V0 granular-entry-heal enable + +EXPECT "enable" volume_option $V0 cluster.granular-entry-heal + +TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0 + +# Kill brick-0. +TEST kill_brick $V0 $H0 $B0/${V0}0 + +# Disabling the option should work even when one or more bricks are down +TEST $CLI volume heal $V0 granular-entry-heal disable +# When a brick is down, 'enable' attempt should be failed +TEST ! $CLI volume heal $V0 granular-entry-heal enable + +# Restart the killed brick +TEST $CLI volume start $V0 force +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0 +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1 + +# When all bricks are up, it should be possible to enable the option +TEST $CLI volume heal $V0 granular-entry-heal enable + +# Kill brick-0 again +TEST kill_brick $V0 $H0 $B0/${V0}0 + +# Create files under root +for i in {1..2} +do +        echo $i > $M0/f$i +done + +# Test that the index associated with '/' is created on B1. +TEST stat $B0/${V0}1/.glusterfs/indices/entry-changes/$ROOT_GFID + +# Check for successful creation of granular entry indices +for i in {1..2} +do +        TEST_IN_LOOP stat $B0/${V0}1/.glusterfs/indices/entry-changes/$ROOT_GFID/f$i +done + +TEST $CLI volume start $V0 force +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0 +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1 + +TEST gluster volume set $V0 cluster.self-heal-daemon on +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0 +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1 + +TEST $CLI volume heal $V0 + +# Wait for heal to complete +EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0 + +# Test if data was healed +for i in {1..2} +do +        TEST_IN_LOOP diff $B0/${V0}0/f$i $B0/${V0}1/f$i +done + +# Now verify that there are no name indices left after self-heal +TEST ! stat $B0/${V0}1/.glusterfs/indices/entry-changes/$ROOT_GFID/f1 +TEST ! stat $B0/${V0}1/.glusterfs/indices/entry-changes/$ROOT_GFID/f2 +TEST ! stat $B0/${V0}1/.glusterfs/indices/entry-changes/$ROOT_GFID + +# Perform a volume-reset-all-options operation +TEST $CLI volume reset $V0 +# Ensure that granular entry heal is also disabled +EXPECT "no" volume_get_field $V0 cluster.granular-entry-heal +EXPECT "on" volume_get_field $V0 cluster.entry-self-heal + +cleanup +#G_TESTDEF_TEST_STATUS_NETBSD7=BAD_TEST,BUG=1399038 diff --git a/tests/basic/afr/granular-esh/conservative-merge.t b/tests/basic/afr/granular-esh/conservative-merge.t index b566a0ea4d3..b170e47e0cb 100644 --- a/tests/basic/afr/granular-esh/conservative-merge.t +++ b/tests/basic/afr/granular-esh/conservative-merge.t @@ -11,13 +11,13 @@ TESTS_EXPECTED_IN_LOOP=4  TEST glusterd  TEST pidof glusterd  TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1} +TEST $CLI volume start $V0  TEST $CLI volume set $V0 self-heal-daemon off  TEST $CLI volume set $V0 data-self-heal off  TEST $CLI volume set $V0 metadata-self-heal off  TEST $CLI volume set $V0 entry-self-heal off -TEST $CLI volume set $V0 granular-entry-heal on +TEST $CLI volume heal $V0 granular-entry-heal enable -TEST $CLI volume start $V0  TEST $GFS --volfile-id=$V0 -s $H0 $M0  TEST mkdir $M0/dir diff --git a/tests/basic/afr/granular-esh/granular-esh.t b/tests/basic/afr/granular-esh/granular-esh.t index ee53878e004..de0e8f4290b 100644 --- a/tests/basic/afr/granular-esh/granular-esh.t +++ b/tests/basic/afr/granular-esh/granular-esh.t @@ -16,7 +16,7 @@ TEST $CLI volume set $V0 cluster.data-self-heal off  TEST $CLI volume set $V0 cluster.metadata-self-heal off  TEST $CLI volume set $V0 cluster.entry-self-heal off  TEST $CLI volume set $V0 self-heal-daemon off -TEST $CLI volume set $V0 granular-entry-heal on +TEST $CLI volume heal $V0 granular-entry-heal enable  TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0 diff --git a/tests/basic/afr/granular-esh/granular-indices-but-non-granular-heal.t b/tests/basic/afr/granular-esh/granular-indices-but-non-granular-heal.t index 2da90a98c76..1b5421bf4b6 100644 --- a/tests/basic/afr/granular-esh/granular-indices-but-non-granular-heal.t +++ b/tests/basic/afr/granular-esh/granular-indices-but-non-granular-heal.t @@ -17,7 +17,7 @@ TEST $CLI volume set $V0 cluster.data-self-heal off  TEST $CLI volume set $V0 cluster.metadata-self-heal off  TEST $CLI volume set $V0 cluster.entry-self-heal off  TEST $CLI volume set $V0 self-heal-daemon off -TEST $CLI volume set $V0 granular-entry-heal on +TEST $CLI volume heal $V0 granular-entry-heal enable  TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0 @@ -40,7 +40,7 @@ do  done  # Now disable granular-entry-heal -TEST $CLI volume set $V0 granular-entry-heal off +TEST $CLI volume heal $V0 granular-entry-heal disable  # Start the brick that was down  TEST $CLI volume start $V0 force diff --git a/tests/basic/afr/granular-esh/replace-brick.t b/tests/basic/afr/granular-esh/replace-brick.t index aaa54da2a2c..639ed81b95c 100644 --- a/tests/basic/afr/granular-esh/replace-brick.t +++ b/tests/basic/afr/granular-esh/replace-brick.t @@ -12,7 +12,7 @@ TEST $CLI volume set $V0 cluster.data-self-heal off  TEST $CLI volume set $V0 cluster.metadata-self-heal off  TEST $CLI volume set $V0 cluster.entry-self-heal off  TEST $CLI volume set $V0 self-heal-daemon off -TEST $CLI volume set $V0 granular-entry-heal on +TEST $CLI volume heal $V0 granular-entry-heal enable  TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0; diff --git a/xlators/mgmt/glusterd/src/glusterd-op-sm.c b/xlators/mgmt/glusterd/src/glusterd-op-sm.c index 9c26cb50e9b..4ad23359efd 100644 --- a/xlators/mgmt/glusterd/src/glusterd-op-sm.c +++ b/xlators/mgmt/glusterd/src/glusterd-op-sm.c @@ -1112,6 +1112,25 @@ glusterd_op_stage_set_volume (dict_t *dict, char **op_errstr)                  if (key_fixed)                          key = key_fixed; +                if (strcmp (key, "cluster.granular-entry-heal") == 0) { +                        /* For granular entry-heal, if the set command was +                         * invoked through volume-set CLI, then allow the +                         * command only if the volume is still in 'Created' +                         * state +                         */ +                        if ((dict_get (dict, "is-special-key") == NULL) && +                            (volinfo->status != GLUSTERD_STATUS_NONE)) { +                                snprintf (errstr, sizeof (errstr), " 'gluster " +                                          "volume set <VOLNAME> %s {enable, " +                                          "disable}' is not supported. Use " +                                          "'gluster volume heal <VOLNAME> " +                                          "granular-entry-heal {enable, " +                                          "disable}' instead.", key); +                                ret = -1; +                                goto out; +                        } +                } +                  /* Check if the key is cluster.op-version and set                   * local_new_op_version to the value given if possible.                   */ diff --git a/xlators/mgmt/glusterd/src/glusterd-volume-ops.c b/xlators/mgmt/glusterd/src/glusterd-volume-ops.c index 72e14b0429d..b0a9372069e 100644 --- a/xlators/mgmt/glusterd/src/glusterd-volume-ops.c +++ b/xlators/mgmt/glusterd/src/glusterd-volume-ops.c @@ -760,8 +760,9 @@ out:          return ret;  }  static int -glusterd_handle_heal_enable_disable (rpcsvc_request_t *req, dict_t *dict, -                                     glusterd_volinfo_t *volinfo) +glusterd_handle_heal_options_enable_disable (rpcsvc_request_t *req, +                                             dict_t *dict, +                                             glusterd_volinfo_t *volinfo)  {          gf_xl_afr_op_t                  heal_op = GF_SHD_OP_INVALID;          int                             ret = 0; @@ -775,30 +776,58 @@ glusterd_handle_heal_enable_disable (rpcsvc_request_t *req, dict_t *dict,          }          if ((heal_op != GF_SHD_OP_HEAL_ENABLE) && -            (heal_op != GF_SHD_OP_HEAL_DISABLE)) { +            (heal_op != GF_SHD_OP_HEAL_DISABLE) && +            (heal_op != GF_SHD_OP_GRANULAR_ENTRY_HEAL_ENABLE) && +            (heal_op != GF_SHD_OP_GRANULAR_ENTRY_HEAL_DISABLE)) {                  ret = -EINVAL;                  goto out;          } -        if (heal_op == GF_SHD_OP_HEAL_ENABLE) { +        if (((heal_op == GF_SHD_OP_GRANULAR_ENTRY_HEAL_ENABLE) || +            (heal_op == GF_SHD_OP_GRANULAR_ENTRY_HEAL_DISABLE)) && +            (volinfo->type == GF_CLUSTER_TYPE_DISPERSE)) { +                ret = -1; +                goto out; +        } + +        if ((heal_op == GF_SHD_OP_HEAL_ENABLE) || +            (heal_op == GF_SHD_OP_GRANULAR_ENTRY_HEAL_ENABLE)) {                  value = "enable"; -        } else if (heal_op == GF_SHD_OP_HEAL_DISABLE) { +        } else if ((heal_op == GF_SHD_OP_HEAL_DISABLE) || +                   (heal_op == GF_SHD_OP_GRANULAR_ENTRY_HEAL_DISABLE)) {                  value = "disable";          }         /* Convert this command to volume-set command based on volume type */          if (volinfo->type == GF_CLUSTER_TYPE_TIER) { -                ret = glusterd_handle_shd_option_for_tier (volinfo, value, -                                                           dict); -                if (!ret) -                        goto set_volume; -                goto out; +                switch (heal_op) { +                case GF_SHD_OP_HEAL_ENABLE: +                case GF_SHD_OP_HEAL_DISABLE: +                        ret = glusterd_handle_shd_option_for_tier (volinfo, +                                                                   value, dict); +                        if (!ret) +                                goto set_volume; +                        goto out; +                        /* For any other heal_op, including granular-entry heal, +                         * just break out of the block but don't goto out yet. +                         */ +                default: +                        break; +                }          } -        key = volgen_get_shd_key (volinfo->type); -        if (!key) { -                ret = -1; -                goto out; +       if ((heal_op == GF_SHD_OP_HEAL_ENABLE) || +           (heal_op == GF_SHD_OP_HEAL_DISABLE)) { +                key = volgen_get_shd_key (volinfo->type); +                if (!key) { +                        ret = -1; +                        goto out; +                } +        } else { +                key = "cluster.granular-entry-heal"; +                ret = dict_set_int8 (dict, "is-special-key", 1); +                if (ret) +                        goto out;          }          ret = dict_set_str (dict, "key1", key); @@ -884,7 +913,7 @@ __glusterd_handle_cli_heal_volume (rpcsvc_request_t *req)                  goto out;          } -        ret = glusterd_handle_heal_enable_disable (req, dict, volinfo); +        ret = glusterd_handle_heal_options_enable_disable (req, dict, volinfo);          if (ret == -EINVAL) {                  ret = 0;          } else { @@ -1823,6 +1852,8 @@ glusterd_handle_heal_cmd (xlator_t *this, glusterd_volinfo_t *volinfo,          case GF_SHD_OP_INVALID:          case GF_SHD_OP_HEAL_ENABLE: /* This op should be handled in volume-set*/          case GF_SHD_OP_HEAL_DISABLE:/* This op should be handled in volume-set*/ +        case GF_SHD_OP_GRANULAR_ENTRY_HEAL_ENABLE: /* This op should be handled in volume-set */ +        case GF_SHD_OP_GRANULAR_ENTRY_HEAL_DISABLE: /* This op should be handled in volume-set */          case GF_SHD_OP_SBRAIN_HEAL_FROM_BIGGER_FILE:/*glfsheal cmd*/          case GF_SHD_OP_SBRAIN_HEAL_FROM_LATEST_MTIME:/*glfsheal cmd*/          case GF_SHD_OP_SBRAIN_HEAL_FROM_BRICK:/*glfsheal cmd*/  | 
