summaryrefslogtreecommitdiffstats
path: root/tests/functional
diff options
context:
space:
mode:
authorBala Konda Reddy M <bala12352@gmail.com>2020-08-17 16:11:38 +0530
committerBala Konda Reddy M <bala12352@gmail.com>2020-08-18 05:58:43 +0000
commit81440d1bab4d43785b37d285877b235ddd9ac6b6 (patch)
tree890ba5879f4188b4ee46ad897c443e1c55a7f903 /tests/functional
parent891472d8b77574dbb3346b98bb0948e0f2d12a2c (diff)
[Testfix] Remove redundant logging - Part 1
Problem: In most of the testcases due to redundant logging, the performance of the whole suite completion time is affected. Solution: Currently there are 100+ g.log.info statements in the authentincation suite and half of them are redundant. Removed the g.log.info statements whereever it is not required. After the changes the g.log.info statements are around 50 and not removed the statements to reduce the number of lines but for the improvement of the whole suite. Modified few line indents as well and added teardown for the missing files. Note: Will be submitting for each components separately Change-Id: I63973e115dd5dbbc7fc9462978397e7915181265 Signed-off-by: Bala Konda Reddy M <bala12352@gmail.com>
Diffstat (limited to 'tests/functional')
-rw-r--r--tests/functional/authentication/test_auth_allow.py13
-rw-r--r--tests/functional/authentication/test_auth_invalid_values.py13
-rw-r--r--tests/functional/authentication/test_auth_reject_allow.py19
-rw-r--r--tests/functional/authentication/test_authentication_allow_blank.py15
-rw-r--r--tests/functional/authentication/test_fusereject.py19
-rw-r--r--tests/functional/authentication/test_verify_auth_reject_precedence.py17
-rw-r--r--tests/functional/authentication/test_vol_auth.py15
7 files changed, 32 insertions, 79 deletions
diff --git a/tests/functional/authentication/test_auth_allow.py b/tests/functional/authentication/test_auth_allow.py
index 194ea189c..1f4b4b067 100644
--- a/tests/functional/authentication/test_auth_allow.py
+++ b/tests/functional/authentication/test_auth_allow.py
@@ -25,8 +25,7 @@ from glustolibs.gluster.auth_ops import set_auth_allow
@runs_on([['replicated', 'distributed', 'distributed-replicated',
- 'dispersed', 'distributed-dispersed'],
- ['glusterfs']])
+ 'dispersed', 'distributed-dispersed'], ['glusterfs']])
class FuseAuthAllow(GlusterBaseClass):
"""
Tests to verify auth.allow feature on fuse mount.
@@ -38,13 +37,10 @@ class FuseAuthAllow(GlusterBaseClass):
"""
cls.get_super_method(cls, 'setUpClass')()
# Create and start volume
- g.log.info("Starting volume setup process %s", cls.volname)
ret = cls.setup_volume()
if not ret:
raise ExecutionError("Failed to setup "
"and start volume %s" % cls.volname)
- g.log.info("Successfully created and started the volume: %s",
- cls.volname)
def authenticated_mount(self, mount_obj):
"""
@@ -147,7 +143,6 @@ class FuseAuthAllow(GlusterBaseClass):
auth_dict = {'all': [self.mounts[0].client_system]}
ret = set_auth_allow(self.volname, self.mnode, auth_dict)
self.assertTrue(ret, "Failed to set authentication")
- g.log.info("Successfully set authentication on volume")
# Mounting volume on client1
self.authenticated_mount(self.mounts[0])
@@ -179,7 +174,6 @@ class FuseAuthAllow(GlusterBaseClass):
auth_dict = {'all': [hostname_client1.strip()]}
ret = set_auth_allow(self.volname, self.mnode, auth_dict)
self.assertTrue(ret, "Failed to set authentication")
- g.log.info("Successfully set authentication on volume")
# Mounting volume on client1
self.authenticated_mount(self.mounts[0])
@@ -204,8 +198,9 @@ class FuseAuthAllow(GlusterBaseClass):
"""
Cleanup volume
"""
- g.log.info("Cleaning up volume")
ret = self.cleanup_volume()
if not ret:
raise ExecutionError("Failed to cleanup volume.")
- g.log.info("Volume cleanup was successful.")
+
+ # Calling GlusterBaseClass tearDown
+ self.get_super_method(self, 'tearDown')()
diff --git a/tests/functional/authentication/test_auth_invalid_values.py b/tests/functional/authentication/test_auth_invalid_values.py
index 7fd514b71..a494c8357 100644
--- a/tests/functional/authentication/test_auth_invalid_values.py
+++ b/tests/functional/authentication/test_auth_invalid_values.py
@@ -27,9 +27,8 @@ from glustolibs.gluster.volume_ops import set_volume_options
from glustolibs.gluster.volume_libs import is_volume_exported
-@runs_on([['replicated', 'distributed', 'distributed-replicated',
- 'dispersed', 'distributed-dispersed'],
- ['glusterfs', 'nfs']])
+@runs_on([['replicated', 'distributed', 'distributed-replicated', 'dispersed',
+ 'distributed-dispersed'], ['glusterfs', 'nfs']])
class AuthInvalidValues(GlusterBaseClass):
"""
Tests to verify negative scenario in authentication allow and reject
@@ -42,13 +41,10 @@ class AuthInvalidValues(GlusterBaseClass):
"""
cls.get_super_method(cls, 'setUpClass')()
# Create and start volume
- g.log.info("Starting volume setup process %s", cls.volname)
ret = cls.setup_volume()
if not ret:
raise ExecutionError("Failed to setup "
"and start volume %s" % cls.volname)
- g.log.info("Successfully created and started the volume: %s",
- cls.volname)
def set_invalid_auth(self, auth_opt, values_list):
"""
@@ -157,8 +153,9 @@ class AuthInvalidValues(GlusterBaseClass):
"""
Cleanup volume
"""
- g.log.info("Cleaning up volume")
ret = self.cleanup_volume()
if not ret:
raise ExecutionError("Failed to cleanup volume.")
- g.log.info("Volume cleanup was successful.")
+
+ # Calling GlusterBaseClass tearDown
+ self.get_super_method(self, 'tearDown')()
diff --git a/tests/functional/authentication/test_auth_reject_allow.py b/tests/functional/authentication/test_auth_reject_allow.py
index 083b08e1b..6afdc5ae8 100644
--- a/tests/functional/authentication/test_auth_reject_allow.py
+++ b/tests/functional/authentication/test_auth_reject_allow.py
@@ -28,8 +28,7 @@ from glustolibs.gluster.auth_ops import set_auth_allow, set_auth_reject
@runs_on([['replicated', 'distributed', 'distributed-replicated',
- 'dispersed', 'distributed-dispersed'],
- ['glusterfs']])
+ 'dispersed', 'distributed-dispersed'], ['glusterfs']])
class FuseAuthRejectAllow(GlusterBaseClass):
"""
Tests to verify auth.reject and auth.allow volume options in volume and
@@ -42,13 +41,10 @@ class FuseAuthRejectAllow(GlusterBaseClass):
"""
cls.get_super_method(cls, 'setUpClass')()
# Create and start volume
- g.log.info("Starting volume setup process %s", cls.volname)
ret = cls.setup_volume()
if not ret:
raise ExecutionError("Failed to setup "
"and start volume %s" % cls.volname)
- g.log.info("Successfully created and started the volume: %s",
- cls.volname)
def authenticated_mount(self, mount_obj):
"""
@@ -167,13 +163,11 @@ class FuseAuthRejectAllow(GlusterBaseClass):
auth_dict = {'all': [self.mounts[0].client_system]}
ret = set_auth_reject(self.volname, self.mnode, auth_dict)
self.assertTrue(ret, "Failed to set auth.reject volume option.")
- g.log.info("Successfully set auth.reject option on volume")
# Setting auth.allow on volume for client2 using ip
auth_dict = {'all': [self.mounts[1].client_system]}
ret = set_auth_allow(self.volname, self.mnode, auth_dict)
self.assertTrue(ret, "Failed to set auth.allow volume option")
- g.log.info("Successfully set auth.allow option on volume")
# Trying to mount volume on client1
self.unauthenticated_mount(self.mounts[0])
@@ -213,13 +207,11 @@ class FuseAuthRejectAllow(GlusterBaseClass):
auth_dict = {'all': [hostname_client1.strip()]}
ret = set_auth_reject(self.volname, self.mnode, auth_dict)
self.assertTrue(ret, "Failed to set auth.reject volume option.")
- g.log.info("Successfully set auth.reject option on volume")
# Setting auth.allow on volume for client2 using hostname
auth_dict = {'all': [hostname_client2.strip()]}
ret = set_auth_allow(self.volname, self.mnode, auth_dict)
self.assertTrue(ret, "Failed to set auth.allow volume option")
- g.log.info("Successfully set auth.allow option on volume")
# Trying to mount volume on client1
self.unauthenticated_mount(self.mounts[0])
@@ -251,13 +243,11 @@ class FuseAuthRejectAllow(GlusterBaseClass):
auth_dict = {'/d1': [self.mounts[0].client_system]}
ret = set_auth_reject(self.volname, self.mnode, auth_dict)
self.assertTrue(ret, "Failed to set auth.reject volume option.")
- g.log.info("Successfully set auth.reject option.")
# Setting auth.allow on d1 for client2 using ip
auth_dict = {'/d1': [self.mounts[1].client_system]}
ret = set_auth_allow(self.volname, self.mnode, auth_dict)
self.assertTrue(ret, "Failed to set auth.allow volume option")
- g.log.info("Successfully set auth.allow option.")
# Creating mount object for sub-directory mount on client1
mount_obj_client1 = copy.deepcopy(self.mounts[0])
@@ -291,13 +281,11 @@ class FuseAuthRejectAllow(GlusterBaseClass):
auth_dict = {'/d1': [hostname_client1.strip()]}
ret = set_auth_reject(self.volname, self.mnode, auth_dict)
self.assertTrue(ret, "Failed to set auth.reject volume option.")
- g.log.info("Successfully set auth.reject option.")
# Setting auth.allow on d1 for client2 using hostname
auth_dict = {'/d1': [hostname_client2.strip()]}
ret = set_auth_allow(self.volname, self.mnode, auth_dict)
self.assertTrue(ret, "Failed to set auth.allow volume option")
- g.log.info("Successfully set auth.allow option.")
# Trying to mount d1 on client1
self.unauthenticated_mount(mount_obj_client1)
@@ -322,8 +310,9 @@ class FuseAuthRejectAllow(GlusterBaseClass):
"""
Cleanup volume
"""
- g.log.info("Cleaning up volume")
ret = self.cleanup_volume()
if not ret:
raise ExecutionError("Failed to cleanup volume.")
- g.log.info("Volume cleanup was successful.")
+
+ # Calling GlusterBaseClass tearDown
+ self.get_super_method(self, 'tearDown')()
diff --git a/tests/functional/authentication/test_authentication_allow_blank.py b/tests/functional/authentication/test_authentication_allow_blank.py
index dab0baab3..4bef00f31 100644
--- a/tests/functional/authentication/test_authentication_allow_blank.py
+++ b/tests/functional/authentication/test_authentication_allow_blank.py
@@ -28,8 +28,7 @@ from glustolibs.gluster.volume_libs import cleanup_volume
@runs_on([['replicated', 'distributed-replicated', 'dispersed',
- 'distributed-dispersed'],
- ['glusterfs']])
+ 'distributed-dispersed'], ['glusterfs']])
class AuthAllowEmptyString(GlusterBaseClass):
"""
Tests to verify auth.allow functionality on Volume and Fuse subdir
@@ -38,13 +37,12 @@ class AuthAllowEmptyString(GlusterBaseClass):
"""
Setup Volume
"""
+ # Calling GlusterBaseClass Setup
+ self.get_super_method(self, 'setUp')()
+
ret = self.setup_volume()
if not ret:
raise ExecutionError("Failed to setup volume")
- g.log.info("Volume %s has been setup successfully", self.volname)
-
- # Calling GlusterBaseClass Setup
- self.get_super_method(self, 'setUp')()
def test_validate_authallow(self):
"""
@@ -76,5 +74,6 @@ class AuthAllowEmptyString(GlusterBaseClass):
if not ret:
raise ExecutionError("Failed to Cleanup the "
"Volume %s" % self.volname)
- g.log.info("Volume deleted successfully "
- ": %s", self.volname)
+
+ # Calling GlusterBaseClass tearDown
+ self.get_super_method(self, 'tearDown')()
diff --git a/tests/functional/authentication/test_fusereject.py b/tests/functional/authentication/test_fusereject.py
index 19bafdff7..6600f4e49 100644
--- a/tests/functional/authentication/test_fusereject.py
+++ b/tests/functional/authentication/test_fusereject.py
@@ -28,8 +28,7 @@ from glustolibs.gluster.mount_ops import (mount_volume, is_mounted,
from glustolibs.gluster.brick_libs import get_all_bricks, are_bricks_online
-@runs_on([['replicated'],
- ['glusterfs']])
+@runs_on([['replicated'], ['glusterfs']])
class AuthRejectVol(GlusterBaseClass):
"""
Create a replicated volume and start the volume and check
@@ -40,18 +39,18 @@ class AuthRejectVol(GlusterBaseClass):
"""
Creating a replicated volume and checking if it is started
"""
+ # Calling GlusterBaseClass Setup
+ self.get_super_method(self, 'setUp')()
+
ret = self.setup_volume()
if not ret:
raise ExecutionError("Failed to setup volume %s" % self.volname)
- g.log.info("Volume %s has been setup successfully", self.volname)
# Check if volume is started
volinfo = get_volume_info(self.mnode, self.volname)
if volinfo[self.volname]['statusStr'] != "Started":
raise ExecutionError("Volume has not Started")
g.log.info("Volume is started")
- # Calling GlusterBaseClass Setup
- self.get_super_method(self, 'setUp')()
def tearDown(self):
"""
@@ -64,13 +63,10 @@ class AuthRejectVol(GlusterBaseClass):
raise ExecutionError("Failed to unmount volume from client"
" %s" % client)
g.log.info("Unmounted Volume from client %s successfully", client)
- g.log.info("Cleaning up volume")
ret = self.cleanup_volume()
if not ret:
raise ExecutionError("Failed to Cleanup the "
"Volume %s" % self.volname)
- g.log.info("Volume deleted successfully "
- ": %s", self.volname)
# Calling GlusterBaseClass tearDown
self.get_super_method(self, 'tearDown')()
@@ -106,7 +102,6 @@ class AuthRejectVol(GlusterBaseClass):
# Fetching all the bricks
self.mountpoint = "/mnt/testvol"
- g.log.info("Fetching bricks for the volume : %s", self.volname)
bricks_list = get_all_bricks(self.mnode, self.volname)
self.assertIsNotNone(bricks_list, "Brick list is empty")
g.log.info("Brick List : %s", bricks_list)
@@ -114,7 +109,6 @@ class AuthRejectVol(GlusterBaseClass):
# Check are bricks online
ret = are_bricks_online(self.mnode, self.volname, bricks_list)
self.assertTrue(ret, "All bricks are not online")
- g.log.info("All bricks are online")
# Using this way to check because of bug 1586036
# Mounting volume
@@ -144,10 +138,8 @@ class AuthRejectVol(GlusterBaseClass):
# Mounting the vol on client2
# Check bricks are online
- g.log.info("Brick List : %s", bricks_list)
ret = are_bricks_online(self.mnode, self.volname, bricks_list)
self.assertTrue(ret, "All bricks are not online")
- g.log.info("All bricks are online")
# Mounting Volume
ret, _, _ = mount_volume(self.volname, self.mount_type,
@@ -162,7 +154,6 @@ class AuthRejectVol(GlusterBaseClass):
user='root')
self.assertTrue(out, "Volume %s has failed to mount"
% self.volname)
- g.log.info("Volume is mounted successfully %s", self.volname)
# Reset Volume
ret, _, _ = volume_reset(mnode=self.mnode, volname=self.volname)
@@ -170,10 +161,8 @@ class AuthRejectVol(GlusterBaseClass):
g.log.info("Volume %s reset operation is successful", self.volname)
# Checking if bricks are online
- g.log.info("Brick List : %s", bricks_list)
ret = are_bricks_online(self.mnode, self.volname, bricks_list)
self.assertTrue(ret, "All bricks are not online")
- g.log.info("All bricks are online")
# Mounting Volume
ret, _, _ = mount_volume(self.volname, self.mount_type,
diff --git a/tests/functional/authentication/test_verify_auth_reject_precedence.py b/tests/functional/authentication/test_verify_auth_reject_precedence.py
index d51e61443..ce8420690 100644
--- a/tests/functional/authentication/test_verify_auth_reject_precedence.py
+++ b/tests/functional/authentication/test_verify_auth_reject_precedence.py
@@ -28,8 +28,7 @@ from glustolibs.gluster.auth_ops import set_auth_allow, set_auth_reject
@runs_on([['replicated', 'distributed', 'distributed-replicated',
- 'dispersed', 'distributed-dispersed'],
- ['glusterfs']])
+ 'dispersed', 'distributed-dispersed'], ['glusterfs']])
class VerifyAuthRejectPrecedence(GlusterBaseClass):
"""
Tests to verify auth.reject precedence over auth.allow option.
@@ -41,13 +40,10 @@ class VerifyAuthRejectPrecedence(GlusterBaseClass):
"""
cls.get_super_method(cls, 'setUpClass')()
# Create and start volume
- g.log.info("Starting volume setup process %s", cls.volname)
ret = cls.setup_volume()
if not ret:
raise ExecutionError("Failed to setup "
"and start volume %s" % cls.volname)
- g.log.info("Successfully created and started the volume: %s",
- cls.volname)
def authenticated_mount(self, mount_obj):
"""
@@ -182,14 +178,12 @@ class VerifyAuthRejectPrecedence(GlusterBaseClass):
auth_dict = {'all': ['*']}
ret = set_auth_reject(self.volname, self.mnode, auth_dict)
self.assertTrue(ret, "Failed to set auth.reject volume option.")
- g.log.info("Successfully set auth.reject option on volume")
# Setting auth.allow on volume for client1 and client2 using ip
auth_dict = {'all': [self.mounts[0].client_system,
self.mounts[0].client_system]}
ret = set_auth_allow(self.volname, self.mnode, auth_dict)
self.assertTrue(ret, "Failed to set auth.allow volume option")
- g.log.info("Successfully set auth.allow option on volume")
# Trying to mount volume on client1
self.unauthenticated_mount(self.mounts[0])
@@ -230,7 +224,6 @@ class VerifyAuthRejectPrecedence(GlusterBaseClass):
auth_dict = {'all': [hostname_client1, hostname_client2]}
ret = set_auth_allow(self.volname, self.mnode, auth_dict)
self.assertTrue(ret, "Failed to set auth.allow volume option")
- g.log.info("Successfully set auth.allow option on volume")
# Trying to mount volume on client1
self.unauthenticated_mount(self.mounts[0])
@@ -255,14 +248,12 @@ class VerifyAuthRejectPrecedence(GlusterBaseClass):
auth_dict = {'/d1': ['*']}
ret = set_auth_reject(self.volname, self.mnode, auth_dict)
self.assertTrue(ret, "Failed to set auth.reject volume option.")
- g.log.info("Successfully set auth.reject option.")
# Setting auth.allow on d1 for client1 and client2 using ip
auth_dict = {'/d1': [self.mounts[0].client_system,
self.mounts[1].client_system]}
ret = set_auth_allow(self.volname, self.mnode, auth_dict)
self.assertTrue(ret, "Failed to set auth.allow volume option")
- g.log.info("Successfully set auth.allow option.")
# Creating mount object for sub-directory mount on client1
mount_obj_client1 = copy.deepcopy(self.mounts[0])
@@ -296,7 +287,6 @@ class VerifyAuthRejectPrecedence(GlusterBaseClass):
auth_dict = {'/d1': [hostname_client1, hostname_client2]}
ret = set_auth_allow(self.volname, self.mnode, auth_dict)
self.assertTrue(ret, "Failed to set auth.allow volume option")
- g.log.info("Successfully set auth.allow option.")
# Trying to mount d1 on client1
self.unauthenticated_mount(mount_obj_client1)
@@ -320,8 +310,9 @@ class VerifyAuthRejectPrecedence(GlusterBaseClass):
"""
Cleanup volume
"""
- g.log.info("Cleaning up volume")
ret = self.cleanup_volume()
if not ret:
raise ExecutionError("Failed to cleanup volume.")
- g.log.info("Volume cleanup was successful.")
+
+ # Calling GlusterBaseClass tearDown
+ self.get_super_method(self, 'tearDown')()
diff --git a/tests/functional/authentication/test_vol_auth.py b/tests/functional/authentication/test_vol_auth.py
index 646ab3520..fa5e34a2f 100644
--- a/tests/functional/authentication/test_vol_auth.py
+++ b/tests/functional/authentication/test_vol_auth.py
@@ -30,27 +30,26 @@ from glustolibs.gluster.brick_libs import get_all_bricks, are_bricks_online
from glustolibs.gluster.volume_libs import cleanup_volume
-@runs_on([['replicated'],
- ['glusterfs']])
+@runs_on([['replicated'], ['glusterfs']])
class AuthRejectVol(GlusterBaseClass):
"""
Create a replicated volume and start the volume and check
if volume is started
"""
def setUp(self):
+ # Calling GlusterBaseClass Setup
+ self.get_super_method(self, 'setUp')()
+
# Setup Volume to create a replicated volume
ret = self.setup_volume()
if not ret:
raise ExecutionError("Failed to setup volume %s" % self.volname)
- g.log.info("Volume %s has been setup successfully", self.volname)
# Check if volume is started
volinfo = get_volume_info(self.mnode, self.volname)
if volinfo[self.volname]['statusStr'] != "Started":
raise ExecutionError("Volume has not Started")
g.log.info("Volume is started.")
- # Calling GlusterBaseClass Setup
- self.get_super_method(self, 'setUp')()
def tearDown(self):
# tearDown for every test
@@ -59,8 +58,6 @@ class AuthRejectVol(GlusterBaseClass):
if not ret:
raise ExecutionError("Failed to Cleanup the "
"Volume %s" % self.volname)
- g.log.info("Volume deleted successfully "
- ": %s", self.volname)
# Calling GlusterBaseClass tearDown
self.get_super_method(self, 'tearDown')()
@@ -90,7 +87,6 @@ class AuthRejectVol(GlusterBaseClass):
for client in self.clients:
# Fetching all the bricks
self.mountpoint = '/mnt/testvol'
- g.log.info("Fetching bricks for the volume : %s", self.volname)
bricks_list = get_all_bricks(self.mnode, self.volname)
self.assertIsNotNone(bricks_list, "Brick list is empty")
g.log.info("Brick List : %s", bricks_list)
@@ -98,7 +94,6 @@ class AuthRejectVol(GlusterBaseClass):
# Check are bricks online
ret = are_bricks_online(self.mnode, self.volname, bricks_list)
self.assertTrue(ret, "All bricks are not online")
- g.log.info("All bricks are online")
# Creating directory to mount
cmd = ("mkdir -p /mnt/testvol")
@@ -138,7 +133,6 @@ class AuthRejectVol(GlusterBaseClass):
# Check if bricks are online and Mounting the vol on client1
# Fetching bricks
- g.log.info("Fetching bricks for the volume : %s", self.volname)
bricks_list = get_all_bricks(self.mnode, self.volname)
self.assertIsNotNone(bricks_list, "Brick list is empty")
g.log.info("Brick List : %s", bricks_list)
@@ -146,7 +140,6 @@ class AuthRejectVol(GlusterBaseClass):
# Checking if bricks are online
ret = are_bricks_online(self.mnode, self.volname, bricks_list)
self.assertTrue(ret, "All bricks are not online")
- g.log.info("All bricks are online")
# Creating directory to mount
cmd = ("mkdir -p /mnt/testvol")