# Copyright (C) 2021 Red Hat, Inc. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. """ Description: Test cases in this module tests the authentication allow feature """ from glusto.core import Glusto as g from glustolibs.gluster.gluster_base_class import (GlusterBaseClass, runs_on) from glustolibs.gluster.exceptions import ExecutionError from glustolibs.gluster.auth_ops import set_auth_allow from glustolibs.gluster.brick_libs import (get_all_bricks, bring_bricks_offline, bring_bricks_online, are_bricks_offline) from glustolibs.gluster.heal_ops import trigger_heal from glustolibs.gluster.heal_libs import (monitor_heal_completion, is_heal_complete) @runs_on([['distributed-replicated', 'distributed-dispersed'], ['glusterfs']]) class FuseAuthAllow(GlusterBaseClass): """ Tests to verify auth.allow feature on fuse mount. """ @classmethod def setUpClass(cls): """ Create and start volume """ cls.get_super_method(cls, 'setUpClass')() # Create and start volume ret = cls.setup_volume() if not ret: raise ExecutionError("Failed to setup " "and start volume %s" % cls.volname) def _authenticated_mount(self, mount_obj): """ Mount volume on authenticated client Args: mount_obj(obj): Object of GlusterMount class """ # Mount volume ret = mount_obj.mount() self.assertTrue(ret, ("Failed to mount %s on client %s" % (mount_obj.volname, mount_obj.client_system))) g.log.info("Successfully mounted %s on client %s", mount_obj.volname, mount_obj.client_system) # Verify mount ret = mount_obj.is_mounted() self.assertTrue(ret, ("%s is not mounted on client %s" % (mount_obj.volname, mount_obj.client_system))) g.log.info("Verified: %s is mounted on client %s", mount_obj.volname, mount_obj.client_system) def _brick_down_heal(self): # Create files on mount point using dd command cmd = ('cd %s;for i in {1..10};' 'do dd if=/dev/urandom bs=1024 count=1 of=file$i;done;' % (self.mounts[0].mountpoint)) ret, _, _ = g.run(self.mounts[0].client_system, cmd) self.assertEqual(ret, 0, "Failed to createfiles on mountpoint") g.log.info("Successfully created files on mountpoint") # Bring brick1 offline bricks_list = get_all_bricks(self.mnode, self.volname) ret = bring_bricks_offline(self.volname, bricks_list[1]) self.assertTrue(ret, 'Failed to bring brick1 offline') ret = are_bricks_offline(self.mnode, self.volname, [bricks_list[1]]) self.assertTrue(ret, 'Brick1 is not offline') g.log.info('Bringing brick1 offline is successful') # Bring brick1 back online ret = bring_bricks_online(self.mnode, self.volname, [bricks_list[1]]) self.assertTrue(ret, 'Failed to bring brick1 online') g.log.info('Bringing brick1 online is successful') # Start healing ret = trigger_heal(self.mnode, self.volname) self.assertTrue(ret, 'Heal is not started') g.log.info('Healing is started') # Monitor heal completion ret = monitor_heal_completion(self.mnode, self.volname) self.assertTrue(ret, 'Heal has not yet completed') # Check if heal is completed ret = is_heal_complete(self.mnode, self.volname) self.assertTrue(ret, 'Heal is not complete') g.log.info('Heal is completed successfully') def test_auth_allow_with_heal(self): """ Validating the FUSE authentication volume options with Heal. Steps: 1. Setup and start volume 2. Set auth.allow on volume for client1 using ip of client1 3. Mount volume on client1. 4. Create files on mount point using dd command 5. Bring down one brick of the volume 6. Bring the brick back up after few seconds using "gluster volume start force" 7. Start volume heal by using gluster volume heal 8. See the heal status using gluster volume heal info 9. Set auth.allow on volume for client1 using hostname of client1. 10. Repeat steps from 3 to 9 """ # Setting authentication on volume for client1 using ip auth_dict = {'all': [self.mounts[0].client_system]} ret = set_auth_allow(self.volname, self.mnode, auth_dict) self.assertTrue(ret, "Failed to set authentication") # Mounting volume on client1 self._authenticated_mount(self.mounts[0]) # Create files,bring brick down and check heal self._brick_down_heal() # Unmount volume from client1 ret = self.mounts[0].unmount() self.assertTrue(ret, ("Failed to unmount volume %s from client %s" % (self.volname, self.mounts[0].client_system))) # Obtain hostname of client1 ret, hostname_client1, _ = g.run(self.mounts[0].client_system, "hostname") self.assertEqual(ret, 0, ("Failed to obtain hostname of client %s" % self.mounts[0].client_system)) g.log.info("Obtained hostname of client. IP- %s, hostname- %s", self.mounts[0].client_system, hostname_client1.strip()) # Setting authentication on volume for client1 using hostname auth_dict = {'all': [hostname_client1.strip()]} ret = set_auth_allow(self.volname, self.mnode, auth_dict) self.assertTrue(ret, "Failed to set authentication") # Mounting volume on client1 self._authenticated_mount(self.mounts[0]) # Create files,bring brick down and check heal self._brick_down_heal() def tearDown(self): """ Cleanup volume """ ret = self.cleanup_volume() if not ret: raise ExecutionError("Failed to cleanup volume.") # Calling GlusterBaseClass tearDown self.get_super_method(self, 'tearDown')()