summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorhadarsharon <hsharon@redhat.com>2019-05-30 12:10:40 +0300
committerBala Konda Reddy M <bmekala@redhat.com>2019-06-09 10:46:58 +0000
commitf4a81a466d0bcf9b4be6cb5890d3f0ef7e41004f (patch)
treebf0226d3aca4bbe2bf96c778e03d1040c2250288
parent9356f6131d76bb72dfe7ab5d7e243cbbdbc16e6f (diff)
Optimized test case: tests/functional/glusterd/test_add_brick
Worked on the following: 1. Removed redundant throwaway variables (ret, _, _) 2. More consistent exceptions 3. Added comments within code 4. Clarified Error messages in case of Assertion Errors Change-Id: I8ca0acce848bd9a8a5d217b5a4e247590177154d Signed-off-by: hadarsharon <hsharon@redhat.com>
-rw-r--r--tests/functional/glusterd/test_add_brick.py119
1 files changed, 54 insertions, 65 deletions
diff --git a/tests/functional/glusterd/test_add_brick.py b/tests/functional/glusterd/test_add_brick.py
index aa3b6ae..983b13f 100644
--- a/tests/functional/glusterd/test_add_brick.py
+++ b/tests/functional/glusterd/test_add_brick.py
@@ -35,8 +35,7 @@ class TestVolumeCreate(GlusterBaseClass):
GlusterBaseClass.setUpClass.im_func(cls)
# check whether peers are in connected state
- ret = cls.validate_peers_are_connected()
- if not ret:
+ if not cls.validate_peers_are_connected():
raise ExecutionError("Peers are not in connected state")
def tearDown(self):
@@ -47,8 +46,7 @@ class TestVolumeCreate(GlusterBaseClass):
raise ExecutionError("Failed to get the volume list")
for volume in vol_list:
- ret = cleanup_volume(self.mnode, volume)
- if not ret:
+ if not cleanup_volume(self.mnode, volume):
raise ExecutionError("Unable to delete volume % s" % volume)
g.log.info("Volume deleted successfully : %s", volume)
@@ -56,79 +54,70 @@ class TestVolumeCreate(GlusterBaseClass):
def test_add_brick_functionality(self):
- ret = setup_volume(self.mnode, self.all_servers_info, self.volume)
- self.assertTrue(ret, "Failed to create and start volume %s"
- % self.volname)
+ # create and start volume
+ self.assertTrue(
+ setup_volume(self.mnode, self.all_servers_info, self.volume),
+ "Failed to create and start volume %s" % self.volname)
g.log.info("Volume created and started successfully")
# form bricks list to test add brick functionality
-
replica_count_of_volume = self.volume['voltype']['replica_count']
num_of_bricks = 4 * replica_count_of_volume
- bricks_list = form_bricks_list(self.mnode, self.volname,
- num_of_bricks, self.servers,
- self.all_servers_info)
+ bricks_list = form_bricks_list(self.mnode, self.volname, num_of_bricks,
+ self.servers, self.all_servers_info)
self.assertIsNotNone(bricks_list, "Bricks list is None")
# Try to add a single brick to volume, which should fail as it is a
# replicated volume, we should pass multiple of replica count number
# of bricks
-
- bricks_list_to_add = [bricks_list[0]]
- ret, _, _ = add_brick(self.mnode, self.volname, bricks_list_to_add)
- self.assertNotEqual(ret, 0, "Expected: It should fail to add a single"
- "brick to a replicated volume. Actual: "
- "Successfully added single brick to volume")
- g.log.info("failed to add a single brick to replicated volume")
-
- # add brick replica count number of bricks in which one is
- # non existing brick
- kwargs = {}
- kwargs['replica_count'] = replica_count_of_volume
-
- bricks_list_to_add = bricks_list[1:replica_count_of_volume + 1]
-
- num_of_bricks = len(bricks_list_to_add)
- index_of_non_existing_brick = random.randint(0, num_of_bricks - 1)
- complete_brick = bricks_list_to_add[index_of_non_existing_brick]
- non_existing_brick = complete_brick + "/non_existing_brick"
- bricks_list_to_add[index_of_non_existing_brick] = non_existing_brick
-
- ret, _, _ = add_brick(self.mnode, self.volname,
- bricks_list_to_add, False, **kwargs)
- self.assertNotEqual(ret, 0, "Expected: It should fail to add non"
- "existing brick to a volume. Actual: "
- "Successfully added non existing brick to volume")
- g.log.info("failed to add a non existing brick to volume")
-
- # adding brick from node which is not part of cluster
- bricks_list_to_add = bricks_list[replica_count_of_volume + 1:
- (2 * replica_count_of_volume) + 1]
-
- num_of_bricks = len(bricks_list_to_add)
- index_of_node = random.randint(0, num_of_bricks - 1)
- complete_brick = bricks_list_to_add[index_of_node].split(":")
- complete_brick[0] = "abc.def.ghi.jkl"
- bricks_list_to_add[index_of_node] = ":".join(complete_brick)
- ret, _, _ = add_brick(self.mnode, self.volname,
- bricks_list_to_add, False, **kwargs)
- self.assertNotEqual(ret, 0, "Expected: It should fail to add brick "
- "from a node which is not part of a cluster."
- "Actual:Successfully added bricks from node which"
- " is not a part of cluster to volume")
-
- g.log.info("Failed to add bricks form node which is not a part of "
- "cluster to volume")
+ self.assertNotEqual(
+ add_brick(self.mnode, self.volname, bricks_list[0])[0], 0,
+ "Expected: It should fail to add a single brick to a replicated "
+ "volume. Actual: Successfully added single brick to volume")
+ g.log.info("Failed to add a single brick to replicated volume "
+ "(as expected)")
+
+ # add brick replica count number of bricks in which one is a
+ # non existing brick (not using the brick used in the earlier test)
+ kwargs = {'replica_count': replica_count_of_volume}
+ bricks_to_add = bricks_list[1:replica_count_of_volume + 1]
+ # make one of the bricks a non-existing one (randomly)
+ random_index = random.randint(0, replica_count_of_volume - 1)
+ bricks_to_add[random_index] += "/non_existing_brick"
+
+ self.assertNotEqual(
+ add_brick(self.mnode, self.volname, bricks_to_add, **kwargs)[0], 0,
+ "Expected: It should fail to add a non existing brick to volume. "
+ "Actual: Successfully added a non existing brick to volume")
+ g.log.info("Failed to add a non existing brick to volume "
+ "(as expected)")
+
+ # add a brick from a node which is not a part of the cluster
+ # (not using bricks used in earlier tests)
+ bricks_to_add = bricks_list[replica_count_of_volume + 1:
+ (2 * replica_count_of_volume) + 1]
+ # change one (random) brick's node name to a non existent node
+ random_index = random.randint(0, replica_count_of_volume - 1)
+ brick_to_change = bricks_to_add[random_index].split(":")
+ brick_to_change[0] = "abc.def.ghi.jkl"
+ bricks_to_add[random_index] = ":".join(brick_to_change)
+ self.assertNotEqual(
+ add_brick(self.mnode, self.volname, bricks_to_add, **kwargs)[0], 0,
+ "Expected: It should fail to add brick from a node which is not "
+ "part of a cluster. Actual: Successfully added bricks from node "
+ "which is not a part of cluster to volume")
+ g.log.info("Failed to add bricks from node which is not a part of "
+ "cluster to volume (as expected)")
# add correct number of valid bricks, it should succeed
-
- bricks_list_to_add = bricks_list[(2 * replica_count_of_volume) + 1:
- (3 * replica_count_of_volume) + 1]
- ret, _, _ = add_brick(self.mnode, self.volname,
- bricks_list_to_add, False, **kwargs)
- self.assertEqual(ret, 0, "Failed to add the bricks to the volume")
+ # (not using bricks used in earlier tests)
+ bricks_to_add = bricks_list[(2 * replica_count_of_volume) + 1:
+ (3 * replica_count_of_volume) + 1]
+ self.assertEqual(
+ add_brick(self.mnode, self.volname, bricks_to_add, **kwargs)[0], 0,
+ "Failed to add the bricks to the volume")
g.log.info("Successfully added bricks to volume")
# Perform rebalance start operation
- ret, _, _ = rebalance_start(self.mnode, self.volname)
- self.assertEqual(ret, 0, "Rebalance start is success")
+ self.assertEqual(rebalance_start(self.mnode, self.volname)[0], 0,
+ "Rebalance start failed")