diff options
| -rw-r--r-- | tests/bugs/distribute/bug-860663.t | 18 | ||||
| -rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-rebalance.c | 37 | 
2 files changed, 48 insertions, 7 deletions
diff --git a/tests/bugs/distribute/bug-860663.t b/tests/bugs/distribute/bug-860663.t index f250a736e41..cc2b505080b 100644 --- a/tests/bugs/distribute/bug-860663.t +++ b/tests/bugs/distribute/bug-860663.t @@ -1,5 +1,6 @@  #!/bin/bash +. $(dirname $0)/../../volume.rc  . $(dirname $0)/../../include.rc  cleanup; @@ -28,23 +29,26 @@ TEST $CLI volume start $V0  ## Mount FUSE  TEST glusterfs -s $H0 --volfile-id $V0 $M0; -TEST $(dirname $0)/bug-860663 $M0/files 10000 +TEST $(dirname $0)/bug-860663 $M0/files 1000  ORIG_FILE_COUNT=`ls -l $M0 | wc -l`; -TEST [ $ORIG_FILE_COUNT -ge 10000 ] +TEST [ $ORIG_FILE_COUNT -ge 1000 ]  # Kill a brick process  kill -9 `cat $GLUSTERD_WORKDIR/vols/$V0/run/$H0-d-backends-${V0}1.pid`; -TEST $CLI volume rebalance $V0 fix-layout start +TEST ! $CLI volume rebalance $V0 fix-layout start -sleep 30; +TEST $CLI volume start $V0 force +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" brick_up_status $V0 $H0 $B0/${V0}1 -TEST ! $(dirname $0)/bug-860663 $M0/files 10000 +TEST $CLI volume rebalance $V0 fix-layout start -TEST $CLI volume start $V0 force +EXPECT_WITHIN $REBALANCE_TIMEOUT "fix-layout completed" rebalance_status_field $V0; -sleep 5; +# Unmount and remount to make sure we're doing fresh lookups. +TEST umount $M0 +TEST glusterfs -s $H0 --volfile-id $V0 $M0;  NEW_FILE_COUNT=`ls -l $M0 | wc -l`; diff --git a/xlators/mgmt/glusterd/src/glusterd-rebalance.c b/xlators/mgmt/glusterd/src/glusterd-rebalance.c index 7112e599467..43c3472fe5a 100644 --- a/xlators/mgmt/glusterd/src/glusterd-rebalance.c +++ b/xlators/mgmt/glusterd/src/glusterd-rebalance.c @@ -637,6 +637,8 @@ glusterd_op_stage_rebalance (dict_t *dict, char **op_errstr)          int32_t                 cmd          = 0;          char                    msg[2048]    = {0};          glusterd_volinfo_t      *volinfo     = NULL; +        glusterd_brickinfo_t    *brickinfo   = NULL; +        glusterd_peerinfo_t     *peerinfo    = NULL;          char                    *task_id_str = NULL;          dict_t                  *op_ctx      = NULL;          xlator_t                *this        = 0; @@ -704,6 +706,41 @@ glusterd_op_stage_rebalance (dict_t *dict, char **op_errstr)                          goto out;                  } +                cds_list_for_each_entry (brickinfo, &volinfo->bricks, +                                         brick_list) { +                        if (glusterd_is_local_brick (THIS, volinfo, brickinfo)) { +                                if (brickinfo->status != GF_BRICK_STARTED) { +                                        gf_asprintf (op_errstr, "Received" +                                                     " rebalance on volume with " +                                                     " stopped brick %s", +                                                     brickinfo->path); +                                        ret = -1; +                                        goto out; +                                } +                        } else { +                                rcu_read_lock (); +                                peerinfo = glusterd_peerinfo_find_by_uuid +                                           (brickinfo->uuid); +                                if (!peerinfo) { +                                        gf_asprintf (op_errstr, "Host node of " +                                                     "brick %s doesn't belong " +                                                     "to cluster", +                                                     brickinfo->path); +                                        ret = -1; +                                        rcu_read_unlock (); +                                        goto out; +                                } else if (!peerinfo->connected) { +                                        gf_asprintf (op_errstr, "Host node of " +                                                     "brick %s is down", +                                                     brickinfo->path); +                                        ret = -1; +                                        rcu_read_unlock (); +                                        goto out; +                                } +                                rcu_read_unlock (); +                        } +                } +          case GF_DEFRAG_CMD_START_FORCE:                  if (is_origin_glusterd (dict)) {                          op_ctx = glusterd_op_get_ctx ();  | 
