summaryrefslogtreecommitdiffstats
path: root/glustolibs-gluster/glustolibs/gluster/lib_utils.py
diff options
context:
space:
mode:
Diffstat (limited to 'glustolibs-gluster/glustolibs/gluster/lib_utils.py')
-rwxr-xr-xglustolibs-gluster/glustolibs/gluster/lib_utils.py466
1 files changed, 373 insertions, 93 deletions
diff --git a/glustolibs-gluster/glustolibs/gluster/lib_utils.py b/glustolibs-gluster/glustolibs/gluster/lib_utils.py
index 29172b2ea..b04976b1c 100755
--- a/glustolibs-gluster/glustolibs/gluster/lib_utils.py
+++ b/glustolibs-gluster/glustolibs/gluster/lib_utils.py
@@ -1,5 +1,5 @@
#!/usr/bin/env python
-# Copyright (C) 2015-2016 Red Hat, Inc. <http://www.redhat.com>
+# Copyright (C) 2015-2021 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -26,8 +26,6 @@ import re
import time
from collections import OrderedDict
import tempfile
-import subprocess
-import random
ONE_GB_BYTES = 1073741824.0
@@ -53,23 +51,16 @@ def append_string_to_file(mnode, filename, str_to_add_in_file,
Returns:
True, on success, False otherwise
"""
- try:
- conn = g.rpyc_get_connection(mnode, user=user)
- if conn is None:
- g.log.error("Unable to get connection to 'root' of node %s"
- " in append_string_to_file()" % mnode)
- return False
-
- with conn.builtin.open(filename, 'a') as _filehandle:
- _filehandle.write(str_to_add_in_file)
-
- return True
- except IOError:
- g.log.error("Exception occurred while adding string to "
- "file %s in append_string_to_file()", filename)
+ cmd = "echo '{0}' >> {1}".format(str_to_add_in_file,
+ filename)
+ ret, out, err = g.run(mnode, cmd, user)
+ if ret or out or err:
+ g.log.error("Unable to append string '{0}' to file "
+ "'{1}' on node {2} using user {3}"
+ .format(str_to_add_in_file, filename,
+ mnode, user))
return False
- finally:
- g.rpyc_close_connection(host=mnode, user=user)
+ return True
def search_pattern_in_file(mnode, search_pattern, filename, start_str_to_parse,
@@ -268,31 +259,19 @@ def list_files(mnode, dir_path, parse_str="", user="root"):
NoneType: None if command execution fails, parse errors.
list: files with absolute name
"""
-
- try:
- conn = g.rpyc_get_connection(mnode, user=user)
- if conn is None:
- g.log.error("Unable to get connection to 'root' of node %s"
- % mnode)
- return None
-
- filepaths = []
- for root, directories, files in conn.modules.os.walk(dir_path):
- for filename in files:
- if parse_str != "":
- if parse_str in filename:
- filepath = conn.modules.os.path.join(root, filename)
- filepaths.append(filepath)
- else:
- filepath = conn.modules.os.path.join(root, filename)
- filepaths.append(filepath)
- return filepaths
- except StopIteration:
- g.log.error("Exception occurred in list_files()")
+ if parse_str == "":
+ cmd = "find {0} -type f".format(dir_path)
+ else:
+ cmd = "find {0} -type f | grep {1}".format(dir_path,
+ parse_str)
+ ret, out, err = g.run(mnode, cmd, user)
+ if ret or err:
+ g.log.error("Unable to get the list of files on path "
+ "{0} on node {1} using user {2} due to error {3}"
+ .format(dir_path, mnode, user, err))
return None
-
- finally:
- g.rpyc_close_connection(host=mnode, user=user)
+ file_list = out.split('\n')
+ return file_list[0:len(file_list)-1]
def get_servers_bricks_dict(servers, servers_info):
@@ -308,7 +287,7 @@ def get_servers_bricks_dict(servers, servers_info):
get_servers_bricks_dict(g.config['servers'], g.config['servers_info'])
"""
servers_bricks_dict = OrderedDict()
- if isinstance(servers, str):
+ if not isinstance(servers, list):
servers = [servers]
for server in servers:
server_info = servers_info[server]
@@ -342,7 +321,7 @@ def get_servers_used_bricks_dict(mnode, servers):
get_servers_used_bricks_dict(g.config['servers'][0]['host'],
g.config['servers'])
"""
- if isinstance(servers, str):
+ if not isinstance(servers, list):
servers = [servers]
servers_used_bricks_dict = OrderedDict()
@@ -389,7 +368,7 @@ def get_servers_unused_bricks_dict(mnode, servers, servers_info):
g.config['servers'],
g.config['servers_info'])
"""
- if isinstance(servers, str):
+ if not isinstance(servers, list):
servers = [servers]
dict1 = get_servers_bricks_dict(servers, servers_info)
dict2 = get_servers_used_bricks_dict(mnode, servers)
@@ -408,7 +387,8 @@ def get_servers_unused_bricks_dict(mnode, servers, servers_info):
return servers_unused_bricks_dict
-def form_bricks_list(mnode, volname, number_of_bricks, servers, servers_info):
+def form_bricks_list(mnode, volname, number_of_bricks, servers, servers_info,
+ dirname=None):
"""Forms bricks list for create-volume/add-brick given the num_of_bricks
servers and servers_info.
@@ -421,6 +401,9 @@ def form_bricks_list(mnode, volname, number_of_bricks, servers, servers_info):
needs to be selected for creating the brick list.
servers_info (dict): dict of server info of each servers.
+ kwargs:
+ dirname (str): Name of the directory for glusterfs brick
+
Returns:
list - List of bricks to use with volume-create/add-brick
None - if number_of_bricks is greater than unused bricks.
@@ -429,7 +412,7 @@ def form_bricks_list(mnode, volname, number_of_bricks, servers, servers_info):
form_bricks_path(g.config['servers'](0), "testvol", 6,
g.config['servers'], g.config['servers_info'])
"""
- if isinstance(servers, str):
+ if not isinstance(servers, list):
servers = [servers]
dict_index = 0
bricks_list = []
@@ -458,10 +441,18 @@ def form_bricks_list(mnode, volname, number_of_bricks, servers, servers_info):
list(servers_unused_bricks_dict.values())[dict_index])
brick_path = ''
if current_server_unused_bricks_list:
- brick_path = ("%s:%s/%s_brick%s" %
- (current_server,
- current_server_unused_bricks_list[0], volname, num))
- bricks_list.append(brick_path)
+ if dirname and (" " not in dirname):
+ brick_path = ("%s:%s/%s_brick%s" %
+ (current_server,
+ current_server_unused_bricks_list[0], dirname,
+ num))
+ bricks_list.append(brick_path)
+ else:
+ brick_path = ("%s:%s/%s_brick%s" %
+ (current_server,
+ current_server_unused_bricks_list[0], volname,
+ num))
+ bricks_list.append(brick_path)
# Remove the added brick from the current_server_unused_bricks_list
list(servers_unused_bricks_dict.values())[dict_index].pop(0)
@@ -483,7 +474,7 @@ def is_rhel6(servers):
Returns:
bool:Returns True, if its RHEL-6 else returns false
"""
- if isinstance(servers, str):
+ if not isinstance(servers, list):
servers = [servers]
results = g.run_parallel(servers, "cat /etc/redhat-release")
@@ -509,7 +500,7 @@ def is_rhel7(servers):
Returns:
bool:Returns True, if its RHEL-7 else returns false
"""
- if isinstance(servers, str):
+ if not isinstance(servers, list):
servers = [servers]
results = g.run_parallel(servers, "cat /etc/redhat-release")
@@ -544,22 +535,13 @@ def get_disk_usage(mnode, path, user="root"):
Example:
get_disk_usage("abc.com", "/mnt/glusterfs")
"""
-
- inst = random.randint(10, 100)
- conn = g.rpyc_get_connection(mnode, user=user, instance=inst)
- if conn is None:
- g.log.error("Failed to get rpyc connection")
- return None
- cmd = 'stat -f ' + path
- p = conn.modules.subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,
- stderr=subprocess.PIPE)
- out, err = p.communicate()
- ret = p.returncode
- if ret != 0:
- g.log.error("Failed to execute stat command")
+ cmd = 'stat -f {0}'.format(path)
+ ret, out, err = g.run(mnode, cmd, user)
+ if ret:
+ g.log.error("Unable to get stat of path {0} on node {1} "
+ "using user {2} due to error {3}".format(path, mnode,
+ user, err))
return None
-
- g.rpyc_close_connection(host=mnode, user=user, instance=inst)
res = ''.join(out)
match = re.match(r'.*Block size:\s(\d+).*Blocks:\sTotal:\s(\d+)\s+?'
r'Free:\s(\d+)\s+?Available:\s(\d+).*Inodes:\s'
@@ -680,7 +662,7 @@ def install_epel(servers):
Example:
install_epel(["abc.com", "def.com"])
"""
- if isinstance(servers, str):
+ if not isinstance(servers, list):
servers = [servers]
rt = True
@@ -734,7 +716,7 @@ def inject_msg_in_logs(nodes, log_msg, list_of_dirs=None, list_of_files=None):
Returns:
bool: True if successfully injected msg on all log files.
"""
- if isinstance(nodes, str):
+ if not isinstance(nodes, list):
nodes = [nodes]
if list_of_dirs is None:
@@ -779,10 +761,11 @@ def inject_msg_in_logs(nodes, log_msg, list_of_dirs=None, list_of_files=None):
def is_core_file_created(nodes, testrun_timestamp,
- paths=['/', '/var/log/core', '/tmp']):
+ paths=['/', '/var/log/core',
+ '/tmp', '/var/crash', '~/']):
'''
- Listing directories and files in "/", /var/log/core, /tmp
- directory for checking if the core file created or not
+ Listing directories and files in "/", /var/log/core, /tmp,
+ "/var/crash", "~/" directory for checking if the core file created or not
Args:
@@ -795,7 +778,7 @@ def is_core_file_created(nodes, testrun_timestamp,
of test case 'date +%s'
paths(list):
By default core file will be verified in "/","/tmp",
- "/var/log/core"
+ "/var/log/core", "/var/crash", "~/"
If test case need to verify core file in specific path,
need to pass path from test method
'''
@@ -805,8 +788,16 @@ def is_core_file_created(nodes, testrun_timestamp,
cmd = ' '.join(['cd', path, '&&', 'ls', 'core*'])
cmd_list.append(cmd)
- # Checks for core file in "/", "/var/log/core", "/tmp" directory
+ # Checks for core file in "/", "/var/log/core", "/tmp" "/var/crash",
+ # "~/" directory
for node in nodes:
+ ret, logfiles, err = g.run(node, 'grep -r "time of crash" '
+ '/var/log/glusterfs/')
+ if ret == 0:
+ g.log.error(" Seems like there was a crash, kindly check "
+ "the logfiles, even if you don't see a core file")
+ for logfile in logfiles.strip('\n').split('\n'):
+ g.log.error("Core was found in %s " % logfile.split(':')[0])
for cmd in cmd_list:
ret, out, _ = g.run(node, cmd)
g.log.info("storing all files and directory names into list")
@@ -824,7 +815,8 @@ def is_core_file_created(nodes, testrun_timestamp,
file_timestamp = file_timestamp.strip()
if(file_timestamp > testrun_timestamp):
count += 1
- g.log.error("New core file created %s " % file1)
+ g.log.error("New core file was created and found "
+ "at %s " % file1)
else:
g.log.info("Old core file Found")
# return the status of core file
@@ -848,10 +840,10 @@ def remove_service_from_firewall(nodes, firewall_service, permanent=False):
bool: True|False(Firewall removed or Failed)
"""
- if isinstance(nodes, str):
+ if not isinstance(nodes, list):
nodes = [nodes]
- if isinstance(firewall_service, str):
+ if not isinstance(firewall_service, list):
firewall_service = [firewall_service]
_rc = True
@@ -892,10 +884,10 @@ def add_services_to_firewall(nodes, firewall_service, permanent=False):
bool: True|False(Firewall Enabled or Failed)
"""
- if isinstance(nodes, str):
+ if not isinstance(nodes, list):
nodes = [nodes]
- if isinstance(firewall_service, str):
+ if not isinstance(firewall_service, list):
firewall_service = [firewall_service]
_rc = True
@@ -944,30 +936,47 @@ def get_size_of_mountpoint(node, mount_point):
return out
-def add_user(host, uname):
+def add_user(servers, username, group=None):
"""
- Add user with default home directory
+ Add user with default home directory
+
Args:
- host (str): hostname/ip of the system
- uname (str): username
- Returns always True
- """
+ servers(list|str): hostname/ip of the system
+ username(str): username of the user to be created.
+ Kwargs:
+ group(str): Group name to which user is to be
+ added.(Default:None)
- command = "useradd -m %s -d /home/%s" % (uname, uname)
- ret, _, err = g.run(host, command)
- if 'already exists' in err:
- g.log.warn("User %s is already exists", uname)
+ Returns:
+ bool : True if user add is successful on all servers.
+ False otherwise.
+ """
+ # Checking if group is given or not.
+ if not group:
+ cmd = "useradd -m %s -d /home/%s" % (username, username)
else:
- g.log.info("User %s is created successfully", uname)
+ cmd = "useradd -G %s %s" % (group, username)
+
+ if not isinstance(servers, list):
+ servers = [servers]
+
+ results = g.run_parallel(servers, cmd)
+ for server, ret_value in list(results.items()):
+ retcode, _, err = ret_value
+ if retcode != 0 and "already exists" not in err:
+ g.log.error("Unable to add user on %s", server)
+ return False
return True
def del_user(host, uname):
"""
- Delete user with home directory
+ Delete user with home directory
+
Args:
host (str): hostname/ip of the system
uname (str): username
+
Return always True
"""
command = "userdel -r %s" % (uname)
@@ -977,3 +986,274 @@ def del_user(host, uname):
else:
g.log.info("User %s successfully deleted", uname)
return True
+
+
+def group_add(servers, groupname):
+ """
+ Creates a group in all the servers.
+
+ Args:
+ servers(list|str): Nodes on which cmd is to be executed.
+ groupname(str): Name of the group to be created.
+
+ Returns:
+ bool: True if add group is successful on all servers.
+ False otherwise.
+
+ """
+ if not isinstance(servers, list):
+ servers = [servers]
+
+ cmd = "groupadd %s" % groupname
+ results = g.run_parallel(servers, cmd)
+
+ for server, ret_value in list(results.items()):
+ retcode, _, err = ret_value
+ if retcode != 0 and "already exists" not in err:
+ g.log.error("Unable to add group %s on server %s",
+ groupname, server)
+ return False
+ return True
+
+
+def group_del(servers, groupname):
+ """
+ Deletes a group in all the servers.
+
+ Args:
+ servers(list|str): Nodes on which cmd is to be executed.
+ groupname(str): Name of the group to be removed.
+
+ Return always True
+ """
+ if not isinstance(servers, list):
+ servers = [servers]
+
+ cmd = "groupdel %s" % groupname
+ results = g.run_parallel(servers, cmd)
+
+ for server, ret_value in list(results.items()):
+ retcode, _, err = ret_value
+ if retcode != 0 and "does not exist" in err:
+ g.log.error("Group %s on server %s already removed",
+ groupname, server)
+ return True
+
+
+def ssh_keygen(mnode):
+ """
+ Creates a pair of ssh private and public key if not present
+
+ Args:
+ mnode (str): Node on which cmd is to be executed
+ Returns:
+ bool : True if ssh-keygen is successful on all servers.
+ False otherwise. It also returns True if ssh key
+ is already present
+
+ """
+ cmd = 'echo -e "n" | ssh-keygen -f ~/.ssh/id_rsa -q -N ""'
+ ret, out, _ = g.run(mnode, cmd)
+ if ret and "already exists" not in out:
+ return False
+ return True
+
+
+def ssh_copy_id(mnode, tonode, passwd, username="root"):
+ """
+ Copies the default ssh public key onto tonode's
+ authorized_keys file.
+
+ Args:
+ mnode (str): Node on which cmd is to be executed
+ tonode (str): Node to which ssh key is to be copied
+ passwd (str): passwd of the user of tonode
+ Kwargs:
+ username (str): username of tonode(Default:root)
+
+ Returns:
+ bool: True if ssh-copy-id is successful to tonode.
+ False otherwise. It also returns True if ssh key
+ is already present
+
+ """
+ cmd = ('sshpass -p "%s" ssh-copy-id -o StrictHostKeyChecking=no %s@%s' %
+ (passwd, username, tonode))
+ ret, _, _ = g.run(mnode, cmd)
+ if ret:
+ return False
+ return True
+
+
+def set_passwd(servers, username, passwd):
+ """
+ Sets password for a given username.
+
+ Args:
+ servers(list|str): list of nodes on which cmd is to be executed.
+ username(str): username of user for which password is to be set.
+ passwd(str): Password to be set.
+
+ Returns:
+ bool : True if password set is successful on all servers.
+ False otherwise.
+
+ """
+ if not isinstance(servers, list):
+ servers = [servers]
+ cmd = "echo %s:%s | chpasswd" % (username, passwd)
+ results = g.run_parallel(servers, cmd)
+
+ for server, ret_value in list(results.items()):
+ retcode, _, _ = ret_value
+ if retcode != 0:
+ g.log.error("Unable to set passwd for user %s on %s",
+ username, server)
+ return False
+ return True
+
+
+def is_user_exists(servers, username):
+ """
+ Checks if user is present on the given servers or not.
+
+ Args:
+ servers(str|list): list of nodes on which you need to
+ check if the user is present or not.
+ username(str): username of user whose presence has to be checked.
+
+ Returns:
+ bool: True if user is present on all nodes else False.
+ """
+ if not isinstance(servers, list):
+ servers = [servers]
+
+ cmd = "id %s" % username
+ results = g.run_parallel(servers, cmd)
+
+ for server, (ret_value, _, _) in results.items():
+ if not ret_value:
+ g.log.error("User %s doesn't exists on server %s.",
+ (username, server))
+ return False
+ return True
+
+
+def is_group_exists(servers, group):
+ """
+ Checks if group is present on the given servers.
+
+ Args:
+ servers(str|list): list of nodes on which you need to
+ check if group is present or not.
+ group(str): groupname of group whose presence has
+ to be checked.
+
+ Returns:
+ bool: True if group is present on all nodes else False.
+ """
+ if not isinstance(servers, list):
+ servers = [servers]
+
+ cmd = "grep -q %s /etc/group" % group
+ results = g.run_parallel(servers, cmd)
+
+ for server, (ret_value, _, _) in results.items():
+ if not ret_value:
+ g.log.error("Group %s doesn't exists on server %s.",
+ (group, server))
+ return False
+ return True
+
+
+def is_passwordless_ssh_configured(fromnode, tonode, username):
+ """
+ Checks if passwordless ssh is configured between nodes or not.
+
+ Args:
+ fromnode: Server from which passwordless ssh has to be
+ configured.
+ tonode: Server to which passwordless ssh has to be
+ configured.
+ username: username of user to be used for checking
+ passwordless ssh.
+ Returns:
+ bool: True if configured else false.
+ """
+ cmd = ("ssh %s@%s hostname" % (username, tonode))
+ ret, out, _ = g.run(fromnode, cmd)
+ _, hostname, _ = g.run(tonode, "hostname")
+ if ret or hostname not in out:
+ g.log.error("Passwordless ssh not configured "
+ "from server %s to server %s using user %s.",
+ (fromnode, tonode, username))
+ return False
+ return True
+
+
+def collect_bricks_arequal(bricks_list):
+ """Collects arequal for all bricks in list
+
+ Args:
+ bricks_list (list): List of bricks.
+ Example:
+ bricks_list = 'gluster.blr.cluster.com:/bricks/brick1/vol'
+
+ Returns:
+ tuple(bool, list):
+ On success returns (True, list of arequal-checksums of each brick)
+ On failure returns (False, list of arequal-checksums of each brick)
+ arequal-checksum for a brick would be 'None' when failed to
+ collect arequal for that brick.
+
+ Example:
+ >>> all_bricks = get_all_bricks(self.mnode, self.volname)
+ >>> ret, arequal = collect_bricks_arequal(all_bricks)
+ >>> ret
+ True
+ """
+ # Converting a bricks_list to list if not.
+ if not isinstance(bricks_list, list):
+ bricks_list = [bricks_list]
+
+ return_code, arequal_list = True, []
+ for brick in bricks_list:
+
+ # Running arequal-checksum on the brick.
+ node, brick_path = brick.split(':')
+ cmd = ('arequal-checksum -p {} -i .glusterfs -i .landfill -i .trashcan'
+ .format(brick_path))
+ ret, arequal, _ = g.run(node, cmd)
+
+ # Generating list accordingly
+ if ret:
+ g.log.error('Failed to get arequal on brick %s', brick)
+ return_code = False
+ arequal_list.append(None)
+ else:
+ g.log.info('Successfully calculated arequal for brick %s', brick)
+ arequal_list.append(arequal)
+
+ return (return_code, arequal_list)
+
+
+def get_usable_size_per_disk(brickpath, min_free_limit=10):
+ """Get the usable size per disk
+
+ Args:
+ brickpath(str): Brick path to be used to calculate usable size
+
+ Kwargs:
+ min_free_limit(int): Min free disk limit to be used
+
+ Returns:
+ (int): Usable size in GB. None in case of errors.
+ """
+ node, brick_path = brickpath.split(':')
+ size = get_size_of_mountpoint(node, brick_path)
+ if not size:
+ return None
+ size = int(size)
+ min_free_size = size * min_free_limit // 100
+ usable_size = ((size - min_free_size) // 1048576) + 1
+ return usable_size