diff options
author | Bala Konda Reddy M <bala12352@gmail.com> | 2020-08-17 16:11:38 +0530 |
---|---|---|
committer | Bala Konda Reddy M <bala12352@gmail.com> | 2020-08-18 05:58:43 +0000 |
commit | 81440d1bab4d43785b37d285877b235ddd9ac6b6 (patch) | |
tree | 890ba5879f4188b4ee46ad897c443e1c55a7f903 /tests/functional/authentication/test_fusereject.py | |
parent | 891472d8b77574dbb3346b98bb0948e0f2d12a2c (diff) |
[Testfix] Remove redundant logging - Part 1
Problem:
In most of the testcases due to redundant logging,
the performance of the whole suite completion time
is affected.
Solution:
Currently there are 100+ g.log.info statements in the
authentincation suite and half of them are redundant.
Removed the g.log.info statements whereever it is not
required. After the changes the g.log.info statements
are around 50 and not removed the statements to reduce
the number of lines but for the improvement of the
whole suite.
Modified few line indents as well and added teardown
for the missing files.
Note: Will be submitting for each components separately
Change-Id: I63973e115dd5dbbc7fc9462978397e7915181265
Signed-off-by: Bala Konda Reddy M <bala12352@gmail.com>
Diffstat (limited to 'tests/functional/authentication/test_fusereject.py')
-rw-r--r-- | tests/functional/authentication/test_fusereject.py | 19 |
1 files changed, 4 insertions, 15 deletions
diff --git a/tests/functional/authentication/test_fusereject.py b/tests/functional/authentication/test_fusereject.py index 19bafdff7..6600f4e49 100644 --- a/tests/functional/authentication/test_fusereject.py +++ b/tests/functional/authentication/test_fusereject.py @@ -28,8 +28,7 @@ from glustolibs.gluster.mount_ops import (mount_volume, is_mounted, from glustolibs.gluster.brick_libs import get_all_bricks, are_bricks_online -@runs_on([['replicated'], - ['glusterfs']]) +@runs_on([['replicated'], ['glusterfs']]) class AuthRejectVol(GlusterBaseClass): """ Create a replicated volume and start the volume and check @@ -40,18 +39,18 @@ class AuthRejectVol(GlusterBaseClass): """ Creating a replicated volume and checking if it is started """ + # Calling GlusterBaseClass Setup + self.get_super_method(self, 'setUp')() + ret = self.setup_volume() if not ret: raise ExecutionError("Failed to setup volume %s" % self.volname) - g.log.info("Volume %s has been setup successfully", self.volname) # Check if volume is started volinfo = get_volume_info(self.mnode, self.volname) if volinfo[self.volname]['statusStr'] != "Started": raise ExecutionError("Volume has not Started") g.log.info("Volume is started") - # Calling GlusterBaseClass Setup - self.get_super_method(self, 'setUp')() def tearDown(self): """ @@ -64,13 +63,10 @@ class AuthRejectVol(GlusterBaseClass): raise ExecutionError("Failed to unmount volume from client" " %s" % client) g.log.info("Unmounted Volume from client %s successfully", client) - g.log.info("Cleaning up volume") ret = self.cleanup_volume() if not ret: raise ExecutionError("Failed to Cleanup the " "Volume %s" % self.volname) - g.log.info("Volume deleted successfully " - ": %s", self.volname) # Calling GlusterBaseClass tearDown self.get_super_method(self, 'tearDown')() @@ -106,7 +102,6 @@ class AuthRejectVol(GlusterBaseClass): # Fetching all the bricks self.mountpoint = "/mnt/testvol" - g.log.info("Fetching bricks for the volume : %s", self.volname) bricks_list = get_all_bricks(self.mnode, self.volname) self.assertIsNotNone(bricks_list, "Brick list is empty") g.log.info("Brick List : %s", bricks_list) @@ -114,7 +109,6 @@ class AuthRejectVol(GlusterBaseClass): # Check are bricks online ret = are_bricks_online(self.mnode, self.volname, bricks_list) self.assertTrue(ret, "All bricks are not online") - g.log.info("All bricks are online") # Using this way to check because of bug 1586036 # Mounting volume @@ -144,10 +138,8 @@ class AuthRejectVol(GlusterBaseClass): # Mounting the vol on client2 # Check bricks are online - g.log.info("Brick List : %s", bricks_list) ret = are_bricks_online(self.mnode, self.volname, bricks_list) self.assertTrue(ret, "All bricks are not online") - g.log.info("All bricks are online") # Mounting Volume ret, _, _ = mount_volume(self.volname, self.mount_type, @@ -162,7 +154,6 @@ class AuthRejectVol(GlusterBaseClass): user='root') self.assertTrue(out, "Volume %s has failed to mount" % self.volname) - g.log.info("Volume is mounted successfully %s", self.volname) # Reset Volume ret, _, _ = volume_reset(mnode=self.mnode, volname=self.volname) @@ -170,10 +161,8 @@ class AuthRejectVol(GlusterBaseClass): g.log.info("Volume %s reset operation is successful", self.volname) # Checking if bricks are online - g.log.info("Brick List : %s", bricks_list) ret = are_bricks_online(self.mnode, self.volname, bricks_list) self.assertTrue(ret, "All bricks are not online") - g.log.info("All bricks are online") # Mounting Volume ret, _, _ = mount_volume(self.volname, self.mount_type, |