diff options
Diffstat (limited to 'tests')
6 files changed, 782 insertions, 2 deletions
diff --git a/tests/distaf/distaf_libs/distaflibs-gluster/distaflibs/gluster/class_setup_nfs_ganesha_vol.py b/tests/distaf/distaf_libs/distaflibs-gluster/distaflibs/gluster/class_setup_nfs_ganesha_vol.py new file mode 100644 index 00000000000..b9599f40174 --- /dev/null +++ b/tests/distaf/distaf_libs/distaflibs-gluster/distaflibs/gluster/class_setup_nfs_ganesha_vol.py @@ -0,0 +1,119 @@ +#!/usr/bin/env python +#  This file is part of DiSTAF +#  Copyright (C) 2015-2016  Red Hat, Inc. <http://www.redhat.com> +# +#  This program is free software; you can redistribute it and/or modify +#  it under the terms of the GNU General Public License as published by +#  the Free Software Foundation; either version 2 of the License, or +#  any later version. +# +#  This program is distributed in the hope that it will be useful, +#  but WITHOUT ANY WARRANTY; without even the implied warranty of +#  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the +#  GNU General Public License for more details. +# +#  You should have received a copy of the GNU General Public License along +#  with this program; if not, write to the Free Software Foundation, Inc., +#  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + + +import time +from distaflibs.gluster.mount_ops import mount_volume, umount_volume +from distaflibs.gluster.volume_ops import (setup_vol, get_volume_info, +                                           get_volume_status) +from distaf.util import testcase, tc +from distaflibs.gluster.ganesha import (setup_nfs_ganesha, +                                        teardown_nfs_ganesha_setup) +from distaflibs.gluster.gluster_base_class import GlusterBaseClass + + +class SetupNfsGaneshaVol(GlusterBaseClass): +    """ +        This is the base class for the ganesha-gluster tests +        It is a subclass of GlusterBaseClass. All ganesha-gluster +        tests can subclass this and then write test cases +    """ + +    def __init__(self, config_data, nfs_options="vers=3"): +        """ +            Initialise the class with the config values +            Kwargs: +                nfs_options (str): This argument takes the nfs options, +                    say vers=3 or vers=4. +                    Default value is vers=3. +        """ +        GlusterBaseClass.__init__(self, config_data) +        self.options = nfs_options +        self.no_of_ganesha_nodes = (config_data["gluster"] +                                    ["cluster_config"]["nfs_ganesha"] +                                    ["num_of_nfs_ganesha_nodes"]) +        self.no_of_ganesha_nodes = int(self.no_of_ganesha_nodes) +        self.vips = (config_data["gluster"]["cluster_config"] +                     ["nfs_ganesha"]["vips"]) + +    def setup(self): +        """ +            Function to setup ganesha and create volume for testing. +        """ +        ret = setup_nfs_ganesha(self.no_of_ganesha_nodes) +        if ret: +            tc.logger.info("setup of ganesha for %s node is successfull" +                           % self.no_of_ganesha_nodes) +        else: +            tc.logger.error("setup of ganesha for %s node is unsuccessfull" +                            % self.no_of_ganesha_nodes) +            return False +        ret = GlusterBaseClass.setup(self) +        if not ret: +            return False +        time.sleep(10) +        ret = get_volume_status(self.volname) +        if ret is None: +            return False +        ret = get_volume_info(self.volname) +        if ret is None: +            return False +        ret, out, err = tc.run(self.mnode, "showmount -e localhost") +        if ret != 0: +            return False +        ret, out, err = mount_volume(self.volname, self.mount_proto, +                                     self.mountpoint, self.vips[0], +                                     self.clients[0], self.options) +        if ret != 0: +            tc.logger.error("Mounting Volume %s failed on %s:%s" % +                            (self.volname, self.clients[0], self.mountpoint)) +            return False + +        return True + +    def teardown(self, teardown_ganesha_setup=False): +        """ +            The function to cleanup the test setup +            Kwargs: +                teardown_ganesha_setup (bool): If True teardowns ganesha setup, +                    else leaves the ganesha setup as it is. +                    Default value is False +        """ +        umount_volume(tc.clients[0], self.mountpoint) +        ret, out, err = tc.run(tc.clients[0], "rm -rf %s/*" % self.mountpoint) +        time.sleep(5) +        if ret != 0: +            tc.logger.error("rm -rf command failed on the mountpoint %s" +                            % self.mountpoint) +            return False +        if teardown_ganesha_setup: +            ret = teardown_nfs_ganesha_setup() +            return ret +        return True + +    def cleanup(Self, delete_vol=False): +        """ +            The function to cleanup the volume +            Kwargs: +                delete_vol (bool): If True deletes the volume. +                    else leaves the volume as it is. +                    Defualt value is False +        """ +        if not delete_vol: +            return True +        return GlusterBaseClass.cleanup(self) diff --git a/tests/distaf/distaf_libs/distaflibs-gluster/distaflibs/gluster/ganesha.py b/tests/distaf/distaf_libs/distaflibs-gluster/distaflibs/gluster/ganesha.py new file mode 100644 index 00000000000..8785bb3880e --- /dev/null +++ b/tests/distaf/distaf_libs/distaflibs-gluster/distaflibs/gluster/ganesha.py @@ -0,0 +1,592 @@ +#!/usr/bin/env python +#  This file is part of DiSTAF +#  Copyright (C) 2015-2016  Red Hat, Inc. <http://www.redhat.com> +# +#  This program is free software; you can redistribute it and/or modify +#  it under the terms of the GNU General Public License as published by +#  the Free Software Foundation; either version 2 of the License, or +#  any later version. +# +#  This program is distributed in the hope that it will be useful, +#  but WITHOUT ANY WARRANTY; without even the implied warranty of +#  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the +#  GNU General Public License for more details. +# +#  You should have received a copy of the GNU General Public License along +#  with this program; if not, write to the Free Software Foundation, Inc., +#  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +""" +    Description: Library for gluster NFS-Ganesha operations. +""" + +import re +import os +import time +import socket +from collections import OrderedDict +from distaf.util import tc +from distaflibs.gluster.volume_ops import get_volume_info, setup_meta_vol +from distaflibs.gluster.peer_ops import (peer_probe_servers, +                                         nodes_from_pool_list) + + +def vol_set_nfs_disable(volname, option=True, mnode=None): +    '''Enables/Disables nfs for the volume. +    Args: +        volname (str): Volume name. +    Kwargs: +        option (Optional[bool]): If True it disables nfs for +            that volume else enables nfs for that volume. +            Default value is True. +        mnode (Optional[str]): Node on which the command has +            to be executed. Default value is tc.servers[0]. +    Returns: +        bool: True if successful, False otherwise. +    ''' +    if mnode is None: +        mnode = tc.servers[0] +    if option: +        volinfo = get_volume_info(volname, mnode) +        nfs_disable = volinfo[volname]['options'].get('nfs.disable') +        if nfs_disable == "on": +            tc.logger.info(" nfs is already disabled for the volume %s" +                           % volname) +            return True +        ret, _, _ = tc.run(mnode, "gluster volume set %s nfs.disable on " +                           "--mode=script" % volname) +        if ret != 0: +            tc.logger.error("failed to set nfs.disable on %s" % volname) +            return False +    else: +        ret, _, _ = tc.run(mnode, "gluster volume set %s nfs.disable off " +                           "--mode=script" % volname) +        if ret != 0: +            return False + +    return True + + +def vol_set_ganesha(volname, option=True, mnode=None): +    '''Enables/Disables ganesha for the volume. +    Args: +        volname (str): Volume name. +    Kwargs: +        option (Optional[bool]): If True it enables ganesha for +            that volume else disables ganesha for that volume. +            Default value is True. +        mnode (Optional[str]): Node on which the command has +            to be executed. Default value is tc.servers[0]. +    Returns: +        bool: True if successful, False otherwise. +    ''' +    if mnode is None: +        mnode = tc.servers[0] +    if option: +        ret = vol_set_nfs_disable(volname) +        if not ret: +            return False +        volinfo = get_volume_info(volname, mnode) +        enable = volinfo[volname]['options'].get('ganesha.enable') +        if enable == "on": +            tc.logger.info(" ganesha is already enabled for the volume %s" +                           % volname) +            return True +        ret, _, _ = tc.run(mnode, "gluster volume set %s ganesha.enable on " +                           "--mode=script" % volname) +        if ret != 0: +            tc.logger.error("failed to set ganesha.enable on %s" % volname) +            return False +    else: +        ret, _, _ = tc.run(mnode, "gluster volume set %s ganesha.enable off " +                           "--mode=script" % volname) +        if ret != 0: +            return False + +    return True + + +def validate_ganesha_ha_status(mnode=None): +    '''Validates Ganesha HA Status. +    Kwargs: +        mnode (Optional[str]): Node on which the command has +            to be executed. Default value is tc.servers[0]. +    Returns: +        bool: True if successful(HA status is correct), +            False otherwise. +    ''' +    if mnode is None: +        mnode = tc.servers[0] +    ret, out, _ = tc.run(mnode, "/usr/libexec/ganesha/ganesha-ha.sh --status " +                         "| grep -v 'Online' | cut -d ' ' -f 1 | sed s/" +                         "'-cluster_ip-1'//g | sed s/'-trigger_ip-1'//g") +    if ret != 0: +        tc.logger.error("failed to execute the ganesha-ha status command") +        return False +    list1 = filter(None, out.split("\n")) + +    ret, out, _ = tc.run(mnode, "/usr/libexec/ganesha/ganesha-ha.sh --status " +                         "| grep -v 'Online' | cut -d ' ' -f 2") +    if ret != 0: +        tc.logger.error("failed to execute the ganesha-ha status command") +        return False +    list2 = filter(None, out.split("\n")) + +    if list1 == list2: +        tc.logger.info("ganesha ha status is correct") +        return True + +    tc.logger.error("ganesha ha status is incorrect") +    return False + + +def set_nfs_ganesha(option=True, mnode=None): +    '''Enables/Disables NFS-Ganesha Cluster +    Kwargs: +        option (Optional[bool]): If True it enables the nfs-ganesha +            HA Cluster, else disables the nfs-ganesha HA Cluster. +            Default value is True. +        mnode (Optional[str]): Node on which the command has +            to be executed. Default value is tc.servers[0]. +    Returns: +        bool: True if successful, False otherwise. +    ''' +    if mnode is None: +        mnode = tc.servers[0] +    servers = nodes_from_pool_list() +    no_of_servers = len(servers) +    if option: +        ret, _, _ = tc.run(mnode, "gluster nfs-ganesha enable --mode=script") +        if ret == 0: +            tc.logger.info("nfs-ganesha enable success") +            time.sleep(45) +            ret, _, _ = tc.run(mnode, "pcs status") +            ret = validate_ganesha_ha_status(mnode) +            if ret: +                return True +            else: +                return False +        else: +            tc.logger.error("nfs-ganesha enable falied") +            return False +    else: +        ret, _, _ = tc.run(tc.servers[0], "gluster nfs-ganesha disable " +                           "--mode=script") +        if ret == 0: +            tc.logger.info("nfs-ganesha disable success") +            time.sleep(10) +            for node in tc.servers[0:no_of_servers]: +                ret, _, _ = tc.run(node, "pcs status") +            return True +        else: +            tc.logger.error("nfs-ganesha disable falied") +            return False + + +def get_host_by_name(servers=None): +    '''Get hostname of the specified servers. +    Kwargs: +        servers (Optional[str]): Get hostnames of the specified servers. +    Returns: +        dict: dict with 'hostname or ip_address" of the server as key and +              'hostname' of the server as value. +    ''' +    if servers is None: +        servers = nodes_from_pool_list() + +    if not isinstance(servers, list): +        servers = [servers] + +    server_hostname_dict = OrderedDict() +    for server in servers: +        server_hostname_dict[server] = socket.gethostbyaddr(server)[0] + +    return server_hostname_dict + + +def create_nfs_passwordless_ssh(snodes=[], guser=None, mnode=None): +    '''Sets up the passwordless ssh between mnode and all other snodes. +    Args: +        snodes (list): List of nodes for which we require passwordless +            ssh from mnode. +    Kwargs: +        guser (Optional[str]): Username . Default value is root. +        mnode (Optional[str]): Node from which we require passwordless +            ssh to snodes. Default value is tc.servers[0]. +    Returns: +        bool: True if successfull, False otherwise +    ''' +    if guser is None: +        guser = 'root' +    if mnode is None: +        mnode = tc.servers[0] +    if not isinstance(snodes, list): +        snodes = [snodes] +    loc = "/var/lib/glusterd/nfs/" +    mconn = tc.get_connection(mnode, user='root') +    if not mconn.modules.os.path.isfile('/root/.ssh/id_rsa'): +        if not mconn.modules.os.path.isfile('%s/secret.pem' % loc): +            ret, _, _ = tc.run(mnode, "ssh-keygen -f /var/lib/glusterd/nfs/" +                               "secret.pem -q -N ''") +            if ret != 0: +                tc.logger.error("Unable to generate the secret pem file") +                return False +        mconn.modules.os.chmod("%s/secret.pem" % loc, 0600) +        mconn.modules.shutil.copyfile("%s/secret.pem" % loc, +                                      "/root/.ssh/id_rsa") +        mconn.modules.os.chmod("/root/.ssh/id_rsa", 0600) +        tc.logger.debug("Copying the secret.pem.pub to id_rsa.pub") +        mconn.modules.shutil.copyfile("%s/secret.pem.pub" % loc, +                                      "/root/.ssh/id_rsa.pub") +    else: +        mconn.modules.shutil.copyfile("/root/.ssh/id_rsa", +                                      "%s/secret.pem" % loc) +        mconn.modules.os.chmod("%s/secret.pem" % loc, 0600) +        tc.logger.debug("Copying the id_rsa.pub to secret.pem.pub") +        mconn.modules.shutil.copyfile("/root/.ssh/id_rsa.pub", +                                      "%s/secret.pem.pub" % loc) +    if not isinstance(snodes, list): +        snodes = [snodes] +    for snode in snodes: +        sconn = tc.get_connection(snode, user=guser) +        try: +            slocal = sconn.modules.os.path.expanduser('~') +            sfh = sconn.builtin.open("%s/.ssh/authorized_keys" % slocal, "a") +            with mconn.builtin.open("/root/.ssh/id_rsa.pub", 'r') as f: +                for line in f: +                    sfh.write(line) +        except: +            tc.logger.error("Unable to establish passwordless ssh %s@%s to " +                            "%s@%s" % ('root', mnode, guser, snode)) +            return False +        finally: +            sfh.close() +            sconn.close() +    mconn.close() +    time.sleep(30) +    for snode in snodes: +        ret, _, _ = tc.run(mnode, "ssh-keyscan -H %s  >> ~/.ssh/known_hosts" +                           % snode) +        if snode != mnode: +            ret, _, _ = tc.run(mnode, "scp /var/lib/glusterd/nfs/secret.*  " +                               "%s:/var/lib/glusterd/nfs/" % snode) +            if ret != 0: +                return False + +    return True + + +def validate_ganesha_ha_failover(mnode=None, snodes=None): +    '''Validates HA failover status +    Kwargs: +         mnode (Optional[str]): Node on which the ha status command has +            to be executed. Default value is tc.servers[0]. +         snodes (Optional[str]): Node/Nodes on which ganesha process is +            Killed/stopped or Node shutdown +    Returns: +         bool: True if successfull, False otherwise. +    ''' +    if mnode is None: +        mnode = tc.servers[0] +    if snodes is None: +        snodes = tc.servers[1] +    if not isinstance(snodes, list): +        snodes = [snodes] +    ha_flag = True +    ret, out, _ = tc.run(mnode, "/usr/libexec/ganesha/ganesha-ha.sh --status " +                         "| grep -v 'Online' | grep -v 'dead' | cut -d ' ' " +                         "-f 1 | sed s/'-cluster_ip-1'//g | sed s/" +                         "'-trigger_ip-1'//g") +    if ret == 0: +        list1 = filter(None, out.split("\n")) +    ret, out, _ = tc.run(mnode, "/usr/libexec/ganesha/ganesha-ha.sh --status " +                         "| grep -v 'Online' | grep -v 'dead' | cut -d ' ' " +                         "-f 2 | sed s/'-cluster_ip-1'//g | sed s/" +                         "'-trigger_ip-1'//g") +    if ret == 0: +        list2 = filter(None, out.split("\n")) +    server_hostname_dict = get_host_by_name() +    snodes_hostnames = [] +    for snode in snodes: +        snodes_hostnames.append(server_hostname_dict[snode]) +    for val1, val2 in zip(list1, list2): +        if val1 in snodes_hostnames: +            if val1 == val2: +                tc.logger.error("Failover dint happen, wrong failover status " +                                "-> %s %s" % (val1, val2)) +                ha_flag = False +            else: +                tc.logger.info("%s successfully failed over on %s" +                               % (val1, val2)) +        else: +            if val1 != val2: +                tc.logger.error("Failover not required, wrong failover status " +                                "-> %s %s" % (val1, val2)) +                ha_flag = False + +    return ha_flag + + +def get_ganesha_ha_failover_nodes(mnode=None, snodes=None): +    '''Returns HA status and dictionary of +    Kwargs: +         mnode (Optional[str]): Node on which the ha status command has +            to be executed. Default value is tc.servers[0]. +         snodes (Optional[str]): Node/Nodes on which ganesha process +            is Killed/stopped or Node shutdown +    Returns: +         bool,dict: If successfull True,dict +            False otherwise +    ''' +    if mnode is None: +        mnode = tc.servers[0] +    if snodes is None: +        snodes = tc.servers[1] +    if not isinstance(snodes, list): +        snodes = [snodes] +    ha_flag = True +    tnode = OrderedDict() +    ret, out, _ = tc.run(mnode, "/usr/libexec/ganesha/ganesha-ha.sh --status " +                         "| grep -v 'Online' | grep -v 'dead' | cut -d ' ' " +                         "-f 1 | sed s/'-cluster_ip-1'//g | sed s/" +                         "'-trigger_ip-1'//g") +    if ret == 0: +        list1 = filter(None, out.split("\n")) +    ret, out, _ = tc.run(mnode, "/usr/libexec/ganesha/ganesha-ha.sh --status " +                         "| grep -v 'Online' | grep -v 'dead' | cut -d ' ' " +                         "-f 2 | sed s/'-cluster_ip-1'//g | sed s/" +                         "'-trigger_ip-1'//g") +    if ret == 0: +        list2 = filter(None, out.split("\n")) +    server_hostname_dict = get_host_by_name() +    snodes_hostnames = [] +    for snode in snodes: +        snodes_hostnames.append(server_hostname_dict[snode]) +    for val1, val2 in zip(list1, list2): +        if val1 in snodes_hostnames: +            if val1 == val2: +                tc.logger.error("Failover dint happen, wrong failover status " +                                "-> %s %s" % (val1, val2)) +                ha_flag = False +            else: +                tnode[server_hostname_dict[val1]] = server_hostname_dict[val2] +                tc.logger.info("%s successfully failed over on %s" +                               % (val1, val2)) +        else: +            if val1 != val2: +                tc.logger.error("Failover not required, wrong failover status " +                                "-> %s %s" % (val1, val2)) +                ha_flag = False + +    return (ha_flag, tnode) + + +def update_ganesha_ha_conf(no_of_servers=None): +    '''Updates the ganesha-ha.conf file, with VIPs and hostnames. +    Kwargs: +        no_of_servers (Optional[int]): The number of nodes on which we have +            to modify the ganesha-ha.conf file. Default it takes +            the number of servers from the pool list. +    Returns: +        bool: True if successfull, False otherwise. +    ''' +    if no_of_servers is None: +        servers = nodes_from_pool_list() +        no_of_servers = len(servers) +    else: +        servers = tc.servers[0:no_of_servers] +    server_hostname_dict = get_host_by_name(servers) +    hostnames = server_hostname_dict.values() +    hosts = ','.join(hostnames) +    file_src_path = "/etc/ganesha/ganesha-ha.conf.sample" +    file_dest_path = "/etc/ganesha/ganesha-ha.conf" +    ha_server = tc.run(tc.servers[0], "hostname") +    conn = tc.get_connection(tc.servers[0], "root") +    if conn.modules.os.path.isfile(file_src_path) == True: +        tc.logger.info("%s file available and should be updated as " +                       "ganesha-ha.conf" % file_src_path) +        try: +            conn.modules.shutil.copy(file_src_path, file_dest_path) +            FH = conn.builtin.open(file_dest_path, "r+") +        except IOError as e: +            tc.logger.error(e) +            return False +    lines = FH.readlines() +    FH.seek(0) +    FH.truncate() +    for i in range(len(lines)): +        if re.search("HA_NAME", lines[i]) != None: +            lines[i] = re.sub(r'^HA_NAME.*', "HA_NAME=\"G"+str(time.time()) + +                              "\"", lines[i]) +        if re.search("HA_VOL_SERVER", lines[i]) != None: +            lines[i] = re.sub(r'^HA_VOL_SERVER.*', "HA_VOL_SERVER=\"" + +                              ha_server[1].strip()+"\"", lines[i]) +        if re.search("HA_CLUSTER_NODES", lines[i]) != None: +            lines[i] = re.sub(r'^HA_CLUSTER_NODES.*', "HA_CLUSTER_NODES=\"" + +                              hosts+"\"", lines[i]) +        if re.search("VIP_", lines[i]) != None: +            lines[i] = re.sub(r'.*VIP_.*\n', "", lines[i]) +    vips = (tc.global_config["gluster"]["cluster_config"] +            ["nfs_ganesha"]["vips"]) +    for i in range(no_of_servers): +        lines += "VIP_%s=\"%s\"\n" % (hostnames[i], vips[i]) +    FH.write(''.join(lines)) +    # create a local copy of this ha.conf file +    f = open("/tmp/ganesha-ha.conf", "w") +    f.write(''.join(lines)) +    f.close() +    FH.close() +    conn.close() +    # copy this ha.conf file to all the other nodes +    for node in tc.servers[1:no_of_servers]: +        ret = tc.upload(node, "/tmp/ganesha-ha.conf", file_dest_path) + +    return True + + +def cluster_auth_setup(no_of_servers=None): +    '''Sets the hacluster password, starts pcsd service and runs +       pcs cluster auth command. +    Kwargs: +        no_of_servers (Optional[int]): The number of nodes on which we have +            to setup the HA cluster. Default it takes the number +            of servers from the pool list. +    Returns: +        bool: True if successfull, False otherwise. +    ''' +    if no_of_servers is None: +        servers = nodes_from_pool_list() +        no_of_servers = len(servers) +    else: +        servers = tc.servers[0:no_of_servers] +    result = True +    for node in tc.servers[0:no_of_servers]: +        ret, _, _ = tc.run(node, "echo hacluster | passwd --stdin hacluster") +        if ret != 0: +            tc.logger.error("unable to set password for hacluster on %s" +                            % node) +            return False +        else: +            ret, _, _ = tc.run(node, "service pcsd start") +            if ret != 0: +                tc.looger.error("service pcsd start command failed on %s" +                                % node) +                return False +    server_hostname_dict = get_host_by_name(servers) +    for node in tc.servers[0:no_of_servers]: +        val = "" +        for key in server_hostname_dict: +            val += server_hostname_dict[key] +            val += " " +        ret, _, _ = tc.run(node, "pcs cluster auth %s -u hacluster -p " +                           "hacluster" % val) +        if ret != 0: +                tc.logger.error("pcs cluster auth command failed on %s" % node) +                result = False + +    return result + + +def setup_nfs_ganesha(no_of_servers=None): +    '''Setup NFS-Ganesha HA cluster. +    Kwargs: +        no_of_servers (Optional[int]): The number of nodes on which we have +            to setup the HA cluster. Default it takes the number +            of servers from the pool list. +    Returns: +        bool: True if successfull, False otherwise. +    ''' +    if ('setup_nfs_ganesha' in tc.global_flag and +            tc.global_flag['setup_nfs_ganesha'] == True): +        tc.logger.debug("The setup nfs-ganesha is already setup, returning...") +        return True +    if no_of_servers is None: +        servers = tc.servers +        no_of_servers = len(servers) +    servers = tc.servers[0:no_of_servers] +    no_of_servers = int(no_of_servers) +    # Step 1: Peer probe +    ret = peer_probe_servers(tc.servers[1:no_of_servers], mnode=tc.servers[0]) +    if not ret: +        return False +    # Step 2: Passwordless ssh for nfs +    ret = create_nfs_passwordless_ssh(snodes=tc.servers[0:no_of_servers], +                                      mnode=tc.servers[0]) +    if ret: +        tc.logger.info("passwordless ssh between nodes successfull") +    else: +        tc.logger.error("passwordless ssh between nodes unsuccessfull") +        return False +    # Step 3: Update ganesha-ha.conf file +    ret = update_ganesha_ha_conf(no_of_servers) +    if ret: +        tc.logger.info("ganesha-ha.conf files succeessfully updated on all " +                       "the nodes") +    else: +        tc.logger.error("ganesha-ha.conf files not succeessfully updated on " +                        "all the nodes") +        return False +    # Step 4: Cluster setup +    ret = cluster_auth_setup(no_of_servers) +    if ret: +        tc.logger.info("successfull cluster setup") +    else: +        tc.logger.error("unsuccessfull cluster setup") +        return False +    # Step 5: Using CLI to create shared volume +    ret, _, _ = tc.run(tc.servers[0], "gluster v list | grep " +                       "'gluster_shared_storage'") +    if ret != 0: +        ret, _, _ = tc.run(tc.servers[0], "gluster volume set all " +                           "cluster.enable-shared-storage enable") +        if ret != 0: +            tc.logger.error("shared volume creation unsuccessfull") +            return False +        else: +            tc.logger.info("shared volume creation successfull") +            time.sleep(10) +    else: +        tc.logger.info("shared volume already exists") +    time.sleep(60) +    # Step 6: Enable NFS-Ganesha +    ret = set_nfs_ganesha(True) +    if ret: +        tc.logger.info("gluster nfs-ganesha enable success") +    else: +        tc.logger.error("gluster nfs-ganesha enable failed") +        return False +    # Setting globalflag to True +    tc.global_flag["setup_nfs_ganesha"] = True + +    return True + + +def teardown_nfs_ganesha_setup(mnode=None): +    '''Teardowns the NFS-Ganesha HA setup. +    Kwargs: +        mnode (Optional[str]): Node on which the command has +            to be executed. Default value is tc.servers[0]. +    Returns: +        bool: True if successful, False otherwise. +    ''' +    if mnode is None: +        mnode = tc.servers[0] +    # Step 1: Disable NFS-Ganesha +    ret = set_nfs_ganesha(False) +    if ret: +        tc.logger.info("gluster nfs-ganesha disable success") +    else: +        tc.logger.error("gluster nfs-ganesha disable failed") +        return False +    # Step 2: Using CLI to delete the shared volume +    ret, _, _ = tc.run(mnode, "gluster volume set all " +                       "cluster.enable-shared-storage disable --mode=script") +    if ret != 0: +        tc.logger.error("shared volume deletion unsuccessfull") +        return False +    else: +        tc.logger.info("shared volume deletion successfull") +    # Setting globalflag to False +    tc.global_flag["setup_nfs_ganesha"] = False + +    return True diff --git a/tests/distaf/distaf_libs/distaflibs-gluster/distaflibs/gluster/mount_ops.py b/tests/distaf/distaf_libs/distaflibs-gluster/distaflibs/gluster/mount_ops.py index 00bb49bf82d..2d435f40d53 100644 --- a/tests/distaf/distaf_libs/distaflibs-gluster/distaflibs/gluster/mount_ops.py +++ b/tests/distaf/distaf_libs/distaflibs-gluster/distaflibs/gluster/mount_ops.py @@ -36,7 +36,7 @@ def mount_volume(volname, mtype='glusterfs', mpoint='/mnt/glusterfs', \      if options != '':          options = "-o %s" % options      if mtype == 'nfs' and options != '': -        options = "%s,vers=3" % options +        options = "%s" % options      elif mtype == 'nfs' and options == '':          options = '-o vers=3'      ret, _, _ = tc.run(mclient, "mount | grep %s | grep %s | grep \"%s\"" \ @@ -45,7 +45,7 @@ def mount_volume(volname, mtype='glusterfs', mpoint='/mnt/glusterfs', \          tc.logger.debug("Volume %s is already mounted at %s" \          % (volname, mpoint))          return (0, '', '') -    mcmd = "mount -t %s %s %s:%s %s" % \ +    mcmd = "mount -t %s %s %s:/%s %s" % \              (mtype, options, mserver, volname, mpoint)      tc.run(mclient, "test -d %s || mkdir -p %s" % (mpoint, mpoint), \              verbose=False) diff --git a/tests/distaf/distaf_libs/distaflibs-gluster/distaflibs/gluster/volume_ops.py b/tests/distaf/distaf_libs/distaflibs-gluster/distaflibs/gluster/volume_ops.py index a77c0ae2729..5a4a3444b93 100644 --- a/tests/distaf/distaf_libs/distaflibs-gluster/distaflibs/gluster/volume_ops.py +++ b/tests/distaf/distaf_libs/distaflibs-gluster/distaflibs/gluster/volume_ops.py @@ -290,6 +290,12 @@ def setup_vol(volname='', dist='', rep='', dispd='', red='', stripe='', \      if not ret:          tc.logger.error("volume start %s failed" % volname)          return False +    if tc.global_config["gluster"]["cluster_config"]["nfs_ganesha"]["enable"]: +        from distaflibs.gluster.ganesha import vol_set_ganesha +        ret = vol_set_ganesha(volname) +        if not ret: +            tc.logger.error("failed to set the ganesha option for %s" % volname) +            return False      tc.global_flag[volname] = True      return True diff --git a/tests/distaf/tests_d/nfs_ganesha/__init__.py b/tests/distaf/tests_d/nfs_ganesha/__init__.py new file mode 100644 index 00000000000..e69de29bb2d --- /dev/null +++ b/tests/distaf/tests_d/nfs_ganesha/__init__.py diff --git a/tests/distaf/tests_d/nfs_ganesha/test_nfs_ganesha_setup.py b/tests/distaf/tests_d/nfs_ganesha/test_nfs_ganesha_setup.py new file mode 100644 index 00000000000..b95d3c9af90 --- /dev/null +++ b/tests/distaf/tests_d/nfs_ganesha/test_nfs_ganesha_setup.py @@ -0,0 +1,63 @@ +#!/usr/bin/env python +#  This file is part of DiSTAF +#  Copyright (C) 2015-2016  Red Hat, Inc. <http://www.redhat.com> +# +#  This program is free software; you can redistribute it and/or modify +#  it under the terms of the GNU General Public License as published by +#  the Free Software Foundation; either version 2 of the License, or +#  any later version. +# +#  This program is distributed in the hope that it will be useful, +#  but WITHOUT ANY WARRANTY; without even the implied warranty of +#  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the +#  GNU General Public License for more details. +# +#  You should have received a copy of the GNU General Public License along +#  with this program; if not, write to the Free Software Foundation, Inc., +#  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + + +from distaf.util import tc, testcase +from distaflibs.gluster.class_setup_nfs_ganesha_vol import SetupNfsGaneshaVol + + +class TestSetupNfsGaneshaVol(SetupNfsGaneshaVol): +    """ +        Test case to setup NFS-Ganesha +    """ +    def __init__(self, config_data, nfs_options="vers=3"): +        """ +            Initialise the class with the config values +            Kwargs: +                nfs_options (str): This argument takes the nfs options, +                    say vers=3 or vers=4. +                    Default value is vers=3 +        """ +        tc.logger.info("Testcase to setup NFS-Ganesha volume %s" +                       % nfs_options) +        SetupNfsGaneshaVol.__init__(self, config_data, nfs_options) + +    def run(self): +        return True + + +@testcase("test_setup_nfs_ganesha_vol_v3") +class TestSetupNfsGaneshaVolV3(TestSetupNfsGaneshaVol): +    """ +        Test case to setup NFS-Ganesha and +        export volume with vers=3 +    """ +    def ___init__(self, config_data): +        TestSetupNfsGaneshaVol.__init__(self, config_data, +                                        nfs_options="vers=3") + + +@testcase("test_setup_nfs_ganesha_vol_v4") +class TestSetupNfsGaneshaVolV4(TestSetupNfsGaneshaVol): +    """ +        Test case to setup NFS-Ganesha and +        export volume with vers=3 +    """ +    def ___init__(self, config_data): +        TestSetupNfsGaneshaVol.__init__(self, config_data, +                                        nfs_options="vers=4")  | 
