summaryrefslogtreecommitdiffstats
path: root/glustolibs-io/shared_files/scripts
diff options
context:
space:
mode:
Diffstat (limited to 'glustolibs-io/shared_files/scripts')
-rwxr-xr-xglustolibs-io/shared_files/scripts/fd_writes.py68
-rwxr-xr-xglustolibs-io/shared_files/scripts/file_dir_ops.py473
-rw-r--r--glustolibs-io/shared_files/scripts/file_lock.py51
-rwxr-xr-xglustolibs-io/shared_files/scripts/generate_io.py246
-rw-r--r--glustolibs-io/shared_files/scripts/memory_and_cpu_logger.py108
5 files changed, 629 insertions, 317 deletions
diff --git a/glustolibs-io/shared_files/scripts/fd_writes.py b/glustolibs-io/shared_files/scripts/fd_writes.py
index 87358f45a..e3ebccb63 100755
--- a/glustolibs-io/shared_files/scripts/fd_writes.py
+++ b/glustolibs-io/shared_files/scripts/fd_writes.py
@@ -15,14 +15,15 @@
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+from __future__ import print_function
import argparse
-import random
-import os
-import time
-import string
import datetime
from multiprocessing import Process
+import os
+import random
+import string
import sys
+import time
def is_root(path):
@@ -35,9 +36,9 @@ def is_root(path):
True if path is '/' , False otherwise
"""
if os.path.realpath(os.path.abspath(path)) == '/':
- print ("Directory '%s' is the root of filesystem. "
- "Not performing any operations on the root of filesystem" %
- os.path.abspath(path))
+ print("Directory '%s' is the root of filesystem. "
+ "Not performing any operations on the root of filesystem" % (
+ os.path.abspath(path)))
return True
else:
return False
@@ -72,26 +73,25 @@ def create_dir(dir_path):
try:
os.makedirs(dir_abs_path)
except (OSError, IOError):
- print "Unable to create dir: %s" % dir_abs_path
+ print("Unable to create dir: %s" % dir_abs_path)
return 1
return 0
def fd_write_file(filename, file_size, chunk_sizes_list, write_time,
delay_between_writes=10, log_level='INFO'):
- """Write random data to the file until write_time
- """
+ """Write random data to the file until write_time."""
rc = 0
time_counter = 0
try:
fd = open(filename, "w+b")
- fd.seek(file_size-1)
- fd.write("0")
+ fd.seek(file_size - 1)
+ fd.write(bytes(str("0").encode("utf-8")))
fd.flush()
except IOError as e:
- print ("Unable to open file %s for writing : %s" % (filename,
- e.strerror))
+ print("Unable to open file %s for writing : %s" % (
+ filename, e.strerror))
return 1
while time_counter < write_time:
@@ -102,18 +102,18 @@ def fd_write_file(filename, file_size, chunk_sizes_list, write_time,
range(current_chunk_size)))
offset = random.randint(0, (actual_file_size - current_chunk_size))
if log_level.upper() == 'DEBUG':
- print ("\tFileName: %s, File Size: %s, "
- "Writing to offset: %s, "
- "Data Length: %d, Time Counter: %d" %
- (filename, actual_file_size, offset, len(write_data),
- time_counter))
+ print("\tFileName: %s, File Size: %s, "
+ "Writing to offset: %s, "
+ "Data Length: %d, Time Counter: %d" % (
+ filename, actual_file_size, offset, len(write_data),
+ time_counter))
fd.seek(offset)
- fd.write(write_data)
+ fd.write(bytes(str(write_data).encode("utf-8")))
fd.seek(0)
fd.flush()
except IOError as e:
- print ("Unable to write to file '%s' : %s at time count: %dS" %
- (filename, e.strerror, time_counter))
+ print("Unable to write to file '%s' : %s at time count: %dS" % (
+ filename, e.strerror, time_counter))
rc = 1
time.sleep(delay_between_writes)
@@ -129,11 +129,11 @@ def fd_writes(args):
base_file_name = args.base_file_name
file_sizes_list = args.file_sizes_list
if file_sizes_list:
- file_sizes_list = filter(None, args.file_sizes_list.split(","))
+ file_sizes_list = list(filter(None, args.file_sizes_list.split(",")))
chunk_sizes_list = args.chunk_sizes_list
if chunk_sizes_list:
- chunk_sizes_list = map(int, filter(None,
- args.chunk_sizes_list.split(",")))
+ chunk_sizes_list = list(
+ map(int, filter(None, args.chunk_sizes_list.split(","))))
write_time = int(args.write_time)
delay_between_writes = int(args.delay_between_writes)
log_level = args.log_level
@@ -150,11 +150,11 @@ def fd_writes(args):
file_sizes_dict = {
'k': 1024,
'K': 1024,
- 'm': 1024*1024,
- 'M': 1024*1024,
- 'g': 1024*1024*1024,
- 'G': 1024*1024*1024
- }
+ 'm': 1024 ** 2,
+ 'M': 1024 ** 2,
+ 'g': 1024 ** 3,
+ 'G': 1024 ** 3,
+ }
file_sizes_expanded_list = []
for size in file_sizes_list:
@@ -240,15 +240,15 @@ if __name__ == "__main__":
parser.set_defaults(func=fd_writes)
- print "Starting Script: %s" % ' '.join(sys.argv)
- print "StarTime :'%s' " % (datetime.datetime.now())
+ print("Starting Script: %s" % ' '.join(sys.argv))
+ print("StarTime :'%s' " % datetime.datetime.now())
test_start_time = datetime.datetime.now().replace(microsecond=0)
args = parser.parse_args()
rc = args.func(args)
test_end_time = datetime.datetime.now().replace(microsecond=0)
- print "Execution time: %s" % (test_end_time - test_start_time)
- print "EndTime :'%s' " % (datetime.datetime.now())
+ print("Execution time: %s" % (test_end_time - test_start_time))
+ print("EndTime :'%s' " % datetime.datetime.now())
sys.exit(rc)
diff --git a/glustolibs-io/shared_files/scripts/file_dir_ops.py b/glustolibs-io/shared_files/scripts/file_dir_ops.py
index 96e53262d..908a48c8e 100755
--- a/glustolibs-io/shared_files/scripts/file_dir_ops.py
+++ b/glustolibs-io/shared_files/scripts/file_dir_ops.py
@@ -1,5 +1,5 @@
#!/usr/bin/env python
-# Copyright (C) 2015-2018 Red Hat, Inc. <http://www.redhat.com>
+# Copyright (C) 2015-2019 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -20,18 +20,22 @@
"""
from __future__ import print_function
-import os
import argparse
-import sys
-import random
-import string
+import contextlib
import datetime
from multiprocessing import Process
-import subprocess
-from docx import Document
-import contextlib
+from multiprocessing.pool import ThreadPool
+import os
import platform
+import random
import shutil
+import string
+import subprocess
+import sys
+
+from docx import Document
+import numpy as np
+from sh import rsync as sh_rsync
if platform.system() == "Windows":
path_sep = "\\"
@@ -49,9 +53,9 @@ def is_root(path):
True if path is '/' , False otherwise
"""
if os.path.realpath(os.path.abspath(path)) == '/':
- print ("Directory '%s' is the root of filesystem. "
- "Not performing any operations on the root of filesystem" %
- os.path.abspath(path))
+ print("Directory '%s' is the root of filesystem. "
+ "Not performing any operations on the root of filesystem" % (
+ os.path.abspath(path)))
return True
else:
return False
@@ -106,7 +110,7 @@ def create_dir(dir_path):
try:
os.makedirs(dir_abs_path)
except (OSError, IOError):
- print ("Unable to create dir: %s" % dir_abs_path)
+ print("Unable to create dir: %s" % dir_abs_path)
return 1
return 0
@@ -138,16 +142,16 @@ def create_dirs(dir_path, depth, num_of_dirs, num_of_files=0,
base_file_name, file_types)
except (OSError, IOError) as e:
if 'File exists' not in e.strerror:
- print ("Unable to create dir '%s' : %s"
- % (dir_path, e.strerror))
+ print("Unable to create dir '%s' : %s" % (
+ dir_path, e.strerror))
with open("/tmp/file_dir_ops_create_dirs_rc", "w") as fd:
try:
fd.write("1")
fd.flush()
fd.close()
- except IOError as e:
- print ("Unable to write the rc to the "
- "/tmp/file_dir_ops_create_dirs_rc file")
+ except IOError:
+ print("Unable to write the rc to the "
+ "/tmp/file_dir_ops_create_dirs_rc file")
if depth == 0:
return 0
for i in range(num_of_dirs):
@@ -183,9 +187,10 @@ def create_deep_dirs(args):
for i in range(dirname_start_num, (dirname_start_num + dir_length)):
num_of_dirs = random.choice(range(1, max_num_of_dirs + 1))
process_dir_path = os.path.join(dir_path, "user%d" % i)
- process_list.append(Process(target=create_dirs,
- args=(process_dir_path, dir_depth,
- num_of_dirs)))
+ process_list.append(Process(
+ target=create_dirs,
+ args=(process_dir_path, dir_depth, num_of_dirs)
+ ))
for each_process in process_list:
each_process.start()
@@ -237,11 +242,11 @@ def create_deep_dirs_with_files(args):
for i in range(dirname_start_num, (dirname_start_num + dir_length)):
num_of_dirs = random.choice(range(1, max_num_of_dirs + 1))
process_dir_path = os.path.join(dir_path, "user%d" % i)
- process_list.append(Process(target=create_dirs,
- args=(process_dir_path, dir_depth,
- num_of_dirs, num_of_files,
- fixed_file_size, base_file_name,
- file_types)))
+ process_list.append(Process(
+ target=create_dirs,
+ args=(process_dir_path, dir_depth, num_of_dirs, num_of_files,
+ fixed_file_size, base_file_name, file_types)
+ ))
for each_process in process_list:
each_process.start()
@@ -256,6 +261,48 @@ def create_deep_dirs_with_files(args):
return int(rc)
+def _create_file(file_abs_path, file_type, file_size):
+ rc = 0
+
+ if file_type == 'txt':
+ file_abs_path += ".txt"
+
+ with open(file_abs_path, "w+") as new_file:
+ try:
+ new_file.write(''.join(
+ np.random.choice(list(string.printable), file_size)))
+ new_file.flush()
+ new_file.close()
+ except IOError as err:
+ print("Unable to write to file '%s' : %s" % (
+ file_abs_path, err.strerror))
+ rc = 1
+
+ elif file_type == 'docx':
+ file_abs_path += ".docx"
+ try:
+ document = Document()
+ str_to_write = list(string.ascii_letters + string.digits)
+ file_str = ''.join(np.random.choice(str_to_write, file_size))
+ document.add_paragraph(file_str)
+ document.save(file_abs_path)
+ except Exception as err:
+ print("Unable to write to file '%s' : %s" % (
+ file_abs_path, err.strerror))
+ rc = 1
+
+ elif file_type == 'empty_file':
+ try:
+ with open(file_abs_path, "w+") as new_file:
+ new_file.close()
+ except IOError as err:
+ print("Unable to write to file '%s' : %s" % (
+ file_abs_path, err.strerror))
+ rc = 1
+
+ return rc
+
+
def _create_files(dir_path, num_of_files, fixed_file_size=None,
base_file_name='testfile', file_types='txt'):
rc = 0
@@ -264,62 +311,38 @@ def _create_files(dir_path, num_of_files, fixed_file_size=None,
'1k': 1024,
'10k': 10240,
'512k': 524288,
- '1M': 1048576
- }
+ '1M': 1048576,
+ }
# Create dir_path
rc = create_dir(dir_path)
if rc != 0:
return rc
- for count in range(num_of_files):
- fname = base_file_name + str(count)
- fname_abs_path = os.path.join(dir_path, fname)
- if fixed_file_size is None:
- file_size = (
- file_sizes_dict[random.choice(list(file_sizes_dict.keys()))])
- else:
- try:
- file_size = file_sizes_dict[fixed_file_size]
- except KeyError as e:
- print ("File sizes can be [1k, 10k, 512k, 1M]")
- return 1
+ fname_abs_path = os.path.join(dir_path, base_file_name)
+ if fixed_file_size is None:
+ # this generator yields file tuples: (file name, file type, file size)
+ files = ((fname_abs_path + str(num),
+ random.choice(file_types_list),
+ random.choice(list(file_sizes_dict.values())))
+ for num in range(num_of_files))
+ else:
+ try:
+ files = ((fname_abs_path + str(num),
+ random.choice(file_types_list),
+ file_sizes_dict[fixed_file_size])
+ for num in range(num_of_files))
+ except KeyError:
+ print("File sizes can be [1k, 10k, 512k, 1M]")
+ return 1
- type = random.choice(file_types_list)
- if type == 'txt':
- fname_abs_path = fname_abs_path + ".txt"
+ # Thread per filetype (for now)
+ pool = ThreadPool(len(file_types_list))
+ ret = pool.map(lambda file_tuple: _create_file(*file_tuple), files)
+ pool.close()
+ pool.join()
+ rc = 1 if any(ret) else 0
- with open(fname_abs_path, "w+") as fd:
- try:
- fd.write(''.join(random.choice(string.printable) for x in
- range(file_size)))
- fd.flush()
- fd.close()
- except IOError as e:
- print ("Unable to write to file '%s' : %s" %
- (fname_abs_path, e.strerror))
- rc = 1
- elif type == 'docx':
- fname_abs_path = fname_abs_path + ".docx"
- try:
- document = Document()
- str_to_write = string.ascii_letters + string.digits
- file_str = (''.join(random.choice(str_to_write)
- for x in range(file_size)))
- document.add_paragraph(file_str)
- document.save(fname_abs_path)
- except Exception as e:
- print ("Unable to write to file '%s' : %s" %
- (fname_abs_path, e.strerror))
- rc = 1
- elif type == 'empty_file':
- try:
- with open(fname_abs_path, "w+") as fd:
- fd.close()
- except IOError as e:
- print ("Unable to write to file '%s' : %s" %
- (fname_abs_path, e.strerror))
- rc = 1
return rc
@@ -367,7 +390,7 @@ def rename(args):
# Check if dir_path exists
if not path_exists(dir_path):
- print ("Directory '%s' does not exist" % dir_path)
+ print("Directory '%s' does not exist" % dir_path)
return 1
rc = 0
@@ -381,7 +404,7 @@ def rename(args):
os.rename(old, new)
except OSError:
rc = 1
- print ("Unable to rename %s -> %s" % (old, new))
+ print("Unable to rename %s -> %s" % (old, new))
# rename dirs
if dirName != dir_path:
@@ -391,19 +414,18 @@ def rename(args):
os.rename(old, new)
except OSError:
rc = 1
- print ("Unable to rename %s -> %s" % (old, new))
+ print("Unable to rename %s -> %s" % (old, new))
return rc
def ls(args):
- """Recursively list all the files/dirs under 'dir'
- """
+ """Recursively list all the files/dirs under 'dir'."""
dir_path = os.path.abspath(args.dir)
log_file_name = args.log_file_name
# Check if dir_path exists
if not path_exists(dir_path):
- print ("Directory '%s' does not exist" % dir_path)
+ print("Directory '%s' does not exist" % dir_path)
return 1
with open_file_to_write(log_file_name) as file_handle:
@@ -423,12 +445,10 @@ def ls(args):
def _get_path_stats(path):
- """Get the stat of a specified path.
- """
+ """Get the stat of a specified path."""
rc = 0
path = os.path.abspath(args.path)
file_stats = {}
- file_stats = {}
if platform.system() == "Linux":
cmd = "stat -c " + "'%A %U %G' " + path
@@ -455,8 +475,8 @@ def _get_path_stats(path):
'mtime': stat.st_mtime,
'ctime': stat.st_ctime,
'inode': stat.st_ino,
- 'stat': stat
- })
+ 'stat': stat,
+ })
except Exception:
rc = 1
err = "Unable to get the stat of path %s" % path
@@ -465,41 +485,39 @@ def _get_path_stats(path):
def get_path_stats(args):
- """Get file/dir Stat
- """
+ """Get file/dir Stat."""
path = os.path.abspath(args.path)
recursive = args.recursive
log_file_name = args.log_file_name
# Check if dir_path exists
if not path_exists(path):
- print ("PATH '%s' does not exist" % path)
+ print("PATH '%s' does not exist" % path)
return 1
file_stats = {}
if os.path.isfile(path):
- file_stats[path] = (_get_path_stats(path))
+ file_stats[path] = _get_path_stats(path)
if os.path.isdir(path):
if recursive:
for dirName, subdirList, fileList in os.walk(path, topdown=False):
- file_stats[dirName] = (_get_path_stats(dirName))
+ file_stats[dirName] = _get_path_stats(dirName)
for fname in fileList:
fname_abs_path = os.path.join(dirName, fname)
- file_stats[fname_abs_path] = (_get_path_stats(
- fname_abs_path))
+ file_stats[fname_abs_path] = _get_path_stats(
+ fname_abs_path)
else:
- file_stats[path] = (_get_path_stats(path))
+ file_stats[path] = _get_path_stats(path)
rc = 0
with open_file_to_write(log_file_name) as file_handle:
if log_file_name:
time_str = _get_current_time()
- file_handle.write("Starting 'stat %s' : %s" % (
- path, time_str))
+ file_handle.write("Starting 'stat %s' : %s" % (path, time_str))
for key in file_stats.keys():
file_handle.write("\nFile: %s" % key)
ret, file_stat, err = file_stats[key]
@@ -510,8 +528,7 @@ def get_path_stats(args):
file_handle.write("\t%s\n" % file_stat)
if log_file_name:
time_str = _get_current_time()
- file_handle.write("Ending 'stat %s' : %s" % (
- path, time_str))
+ file_handle.write("Ending 'stat %s' : %s" % (path, time_str))
file_handle.write("\n")
return rc
@@ -531,7 +548,7 @@ def compress(args):
# Check if dir_path exists
if not path_exists(dir_path):
- print ("Directory '%s' does not exist" % dir_path)
+ print("Directory '%s' does not exist" % dir_path)
return 1
# Create dir_path
@@ -546,16 +563,16 @@ def compress(args):
proc_list = []
for each_dir in dirs:
if compress_type == '7z':
- file_name = (dest_dir + path_sep +
- os.path.basename(each_dir) + "_7z.7z")
+ file_name = dest_dir + path_sep + os.path.basename(
+ each_dir) + "_7z.7z"
cmd = "7z a -t7z " + file_name + " " + each_dir
elif compress_type == 'gzip':
- tmp_file_name = (dir_path + path_sep +
- os.path.basename(each_dir) + "_tar.tar")
- file_name = (dest_dir + path_sep +
- os.path.basename(each_dir) + "_tgz.tgz")
- cmd = ("7z a -ttar -so " + tmp_file_name + " " +
- each_dir + " | 7z a -si " + file_name)
+ tmp_file_name = dir_path + path_sep + os.path.basename(
+ each_dir) + "_tar.tar"
+ file_name = dest_dir + path_sep + os.path.basename(
+ each_dir) + "_tgz.tgz"
+ cmd = ("7z a -ttar -so " + tmp_file_name + " "
+ + each_dir + " | 7z a -si " + file_name)
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, shell=True)
proc_list.append(proc)
@@ -570,12 +587,12 @@ def compress(args):
file_name = dest_dir + path_sep + os.path.basename(dir_path) + "_7z.7z"
cmd = "7z a -t7z " + file_name + " " + dir_path
elif compress_type == 'gzip':
- tmp_file_name = (dest_dir + path_sep + os.path.basename(dir_path) +
- "_tar.tar")
- file_name = (dest_dir + path_sep + os.path.basename(dir_path) +
- "_tgz.tgz")
- cmd = ("7z a -ttar -so " + tmp_file_name + " " + dir_path +
- " | 7z a -si " + file_name)
+ tmp_file_name = (dest_dir + path_sep + os.path.basename(dir_path)
+ + "_tar.tar")
+ file_name = dest_dir + path_sep + os.path.basename(
+ dir_path) + "_tgz.tgz"
+ cmd = ("7z a -ttar -so " + tmp_file_name + " " + dir_path
+ + " | 7z a -si " + file_name)
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, shell=True)
proc.communicate()
@@ -587,13 +604,12 @@ def compress(args):
def uncompress(args):
- """UnCompress the given compressed file
- """
+ """UnCompress the given compressed file."""
compressed_file = os.path.abspath(args.compressed_file)
dest_dir = args.dest_dir
date_time = datetime.datetime.now().strftime("%I_%M%p_%B_%d_%Y")
- cmd = ("7z x " + compressed_file + " -o" + dest_dir + path_sep +
- "uncompress_" + date_time + " -y")
+ cmd = ("7z x " + compressed_file + " -o" + dest_dir + path_sep
+ + "uncompress_" + date_time + " -y")
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, shell=True)
proc.communicate()
@@ -605,13 +621,12 @@ def uncompress(args):
def uncompress_dir(args):
- """UnCompress all compressed files in destination directory
- """
+ """UnCompress all compressed files in destination directory."""
dir_path = os.path.abspath(args.dir)
dest_dir = args.dest_dir
date_time = datetime.datetime.now().strftime("%I_%M%p_%B_%d_%Y")
- cmd = ("7z x " + dir_path + " -o" + dest_dir + path_sep +
- "uncompress_" + date_time + " -y")
+ cmd = ("7z x " + dir_path + " -o" + dest_dir + path_sep
+ + "uncompress_" + date_time + " -y")
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, shell=True)
proc.communicate()
@@ -623,7 +638,7 @@ def uncompress_dir(args):
def create_hard_links(args):
- """Creates hard link"""
+ """Create hard link."""
src_dir = os.path.abspath(args.src_dir)
dest_dir = args.dest_dir
@@ -633,7 +648,7 @@ def create_hard_links(args):
# Check if src_dir exists
if not path_exists(src_dir):
- print ("Directory '%s' does not exist" % src_dir)
+ print("Directory '%s' does not exist" % src_dir)
return 1
# Create dir_path
@@ -650,8 +665,8 @@ def create_hard_links(args):
rc = create_dir(dest_dir + path_sep + tmp_dir)
if rc != 0:
rc = 1
- link_file = (dest_dir + path_sep + tmp_dir + path_sep +
- new_fname + "_h")
+ link_file = (dest_dir + path_sep + tmp_dir + path_sep
+ + new_fname + "_h")
target_file = os.path.join(dir_name, fname)
if platform.system() == "Windows":
cmd = "mklink /H " + link_file + " " + target_file
@@ -702,9 +717,7 @@ def read(args):
def copy(args):
- """
- Copies files/dirs under 'dir' to destination directory
- """
+ """Copy files/dirs under 'dir' to destination directory."""
src_dir = os.path.abspath(args.src_dir)
dest_dir = args.dest_dir
@@ -714,7 +727,7 @@ def copy(args):
# Check if src_dir exists
if not path_exists(src_dir):
- print ("Directory '%s' does not exist" % src_dir)
+ print("Directory '%s' does not exist" % src_dir)
return 1
# Create dest_dir
@@ -735,8 +748,8 @@ def copy(args):
if dir_name != src_dir:
try:
src = dir_name
- dst = (dest_dir + path_sep +
- os.path.basename(os.path.normpath(src)))
+ dst = (dest_dir + path_sep
+ + os.path.basename(os.path.normpath(src)))
shutil.copytree(src, dst)
except OSError:
rc = 1
@@ -744,9 +757,7 @@ def copy(args):
def delete(args):
- """
- Deletes files/dirs under 'dir'
- """
+ """Delete files/dirs under 'dir'."""
dir_path = os.path.abspath(args.dir)
# Check if dir_path is '/'
@@ -755,7 +766,7 @@ def delete(args):
# Check if dir_path exists
if not path_exists(dir_path):
- print ("Directory '%s' does not exist" % dir_path)
+ print("Directory '%s' does not exist" % dir_path)
return 1
rc = 0
@@ -774,8 +785,137 @@ def delete(args):
return rc
+sizes_dict = {
+ '1k': 1024,
+ '10k': 10240,
+ '512k': 524288,
+ '1M': 1048576,
+ '0.5k': 513
+}
+
+
+def append(args):
+ """
+ Appends all files under 'dir' with randomly sized data.
+ """
+ dir_path = os.path.abspath(args.dir)
+ if not path_exists(args.dir):
+ return 1
+ rc = 0
+
+ for dir_name, subdir_list, file_list in os.walk(dir_path, topdown=False):
+ for fname in file_list:
+ append_size = sizes_dict[
+ random.choice(list(sizes_dict.keys()))]
+ try:
+ file = os.path.join(dir_name, fname)
+ with open(file, "a") as fd:
+ try:
+ fd.write(''.join(random.choice(string.printable)
+ for x in range(append_size)))
+ fd.flush()
+ except IOError as e:
+ print("Unable to append to file '%s' : %s" %
+ (file, e.strerror))
+ rc = 1
+
+ except OSError:
+ rc = 1
+
+ return rc
+
+
+def overwrite(args):
+ """
+ Truncates everything present and overwrites the file with new data.
+ """
+ dir_path = os.path.abspath(args.dir)
+ if not path_exists(args.dir):
+ return 1
+ rc = 0
+
+ for dir_name, subdir_list, file_list in os.walk(dir_path, topdown=False):
+ for fname in file_list:
+ new_size = sizes_dict[
+ random.choice(list(sizes_dict.keys()))]
+ try:
+ file = os.path.join(dir_name, fname)
+ with open(file, "w+") as fd:
+ try:
+ fd.write(''.join(random.choice(string.printable)
+ for x in range(new_size)))
+ fd.flush()
+ except IOError as e:
+ print("Unable to write to file '%s' : %s" %
+ (file, e.strerror))
+ rc = 1
+ except OSError:
+ rc = 1
+ return rc
+
+
+def truncate(args):
+ """
+ Truncates files to a certain size calculated randomly.
+ """
+ dir_path = os.path.abspath(args.dir)
+ if not path_exists(args.dir):
+ return 1
+ rc = 0
+
+ for dir_name, subdir_list, file_list in os.walk(dir_path, topdown=False):
+ for fname in file_list:
+ try:
+ file = os.path.join(dir_name, fname)
+ with open(file, "a+") as fd:
+ try:
+ fsize = os.path.getsize(file)
+ new_size = random.randrange(
+ 0, fsize//random.choice([2, 3, 4, 5]))
+ fd.truncate(new_size)
+
+ except IOError as e:
+ print("Unable to truncate file '%s' : %s" %
+ (file, e.strerror))
+ rc = 1
+ except OSError:
+ rc = 1
+ return rc
+
+
+def rsync(args):
+ """
+ rsync files from source to destination.
+ """
+ src_dir = os.path.abspath(args.src_dir)
+ remote_dir = args.remote_dir
+
+ if platform.system() == "Windows":
+ print("rsync not supported on Windows,Exiting!")
+ return 1
+
+ # Check if src_dir exists
+ if not path_exists(src_dir):
+ print("Directory '%s' does not exist" % src_dir)
+ return 1
+
+ # Create dest_dir
+ rc = create_dir(remote_dir)
+ if rc != 0:
+ return rc
+ rc = 0
+
+ try:
+ sh_rsync("-r", remote_dir, src_dir)
+
+ except Exception as e:
+ print("Can't rsync! : %s" % e.strerror)
+ rc = 1
+ return rc
+
+
if __name__ == "__main__":
- print ("Starting File/Dir Ops: %s" % _get_current_time())
+ print("Starting File/Dir Ops: %s" % _get_current_time())
test_start_time = datetime.datetime.now().replace(microsecond=0)
parser = argparse.ArgumentParser(
@@ -1019,7 +1159,66 @@ if __name__ == "__main__":
help="Directory on which operations has to be performed")
read_parser.set_defaults(func=read)
- # copy all files/directories under dir
+ # Appends files under dir
+ append_parser = subparsers.add_parser(
+ 'append',
+ help=("Appends data to already created files. "),
+ formatter_class=argparse.ArgumentDefaultsHelpFormatter)
+ append_parser.add_argument(
+ '--log-file', help="Output log filename to log the "
+ "contents of file",
+ metavar=('log_file'), dest='log_file',
+ type=str, default=default_log_file)
+ append_parser.add_argument(
+ 'dir', metavar='DIR', type=str,
+ help="Directory on which operations has to be performed")
+ append_parser.set_defaults(func=append)
+
+ # Overwrites files under dir
+ overwrite_parser = subparsers.add_parser(
+ 'overwrite',
+ help=("Overwrites existing files with new data "),
+ formatter_class=argparse.ArgumentDefaultsHelpFormatter)
+ overwrite_parser.add_argument(
+ '--log-file', help="Output log filename to log the "
+ "contents of file",
+ metavar=('log_file'), dest='log_file',
+ type=str, default=default_log_file)
+ overwrite_parser.add_argument(
+ 'dir', metavar='DIR', type=str,
+ help="Directory on which operations has to be performed")
+ overwrite_parser.set_defaults(func=overwrite)
+
+ # rsync dir to a remote directory
+ rsyncs_parser = subparsers.add_parser(
+ 'rsync',
+ help=("Rsync all dirs in a remote location to 'dir'. "),
+ formatter_class=argparse.ArgumentDefaultsHelpFormatter)
+ rsyncs_parser.add_argument(
+ '--remote-dir', help="Remote location to rsync from)",
+ metavar=('remote_dir'), dest='remote_dir',
+ type=str)
+ rsyncs_parser.add_argument(
+ 'src_dir', metavar='src_dir', type=str,
+ help="Directory on which operations has to be performed")
+ rsyncs_parser.set_defaults(func=rsync)
+
+ # Truncates files under dir
+ truncate_parser = subparsers.add_parser(
+ 'truncate',
+ help=("Truncates existing files "),
+ formatter_class=argparse.ArgumentDefaultsHelpFormatter)
+ truncate_parser.add_argument(
+ '--log-file', help="Output log filename to log the "
+ "contents of file",
+ metavar=('log_file'), dest='log_file',
+ type=str, default=default_log_file)
+ truncate_parser.add_argument(
+ 'dir', metavar='DIR', type=str,
+ help="Directory on which operations has to be performed")
+ truncate_parser.set_defaults(func=truncate)
+
+ # Copy all files/directories under dir
copy_parser = subparsers.add_parser(
'copy',
help=("Copy all files/directories under 'dir'. "),
@@ -1047,6 +1246,6 @@ if __name__ == "__main__":
rc = args.func(args)
test_end_time = datetime.datetime.now().replace(microsecond=0)
- print ("Execution time: %s" % (test_end_time - test_start_time))
- print ("Ending File/Dir Ops %s" % _get_current_time())
+ print("Execution time: %s" % (test_end_time - test_start_time))
+ print("Ending File/Dir Ops %s" % _get_current_time())
sys.exit(rc)
diff --git a/glustolibs-io/shared_files/scripts/file_lock.py b/glustolibs-io/shared_files/scripts/file_lock.py
new file mode 100644
index 000000000..e29fd1b1d
--- /dev/null
+++ b/glustolibs-io/shared_files/scripts/file_lock.py
@@ -0,0 +1,51 @@
+#!/usr/bin/env python
+# Copyright (C) 2020 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+from fcntl import flock, LOCK_EX, LOCK_NB, LOCK_UN
+from time import sleep
+from argparse import ArgumentParser
+
+
+def get_file_lock(args):
+ """
+ Gets the lock to a file and releases it after timeout
+ """
+ file_name = args.f
+ timeout = args.t
+ f = open(file_name, 'w')
+ flock(f.fileno(), LOCK_EX | LOCK_NB)
+ sleep(int(timeout))
+ flock(f.fileno(), LOCK_UN)
+
+
+if __name__ == "__main__":
+ file_lock_parser = ArgumentParser(
+ prog="file_lock.py", description="Program to validate file lock ops")
+
+ file_lock_req_args = file_lock_parser.add_argument_group(
+ 'required named arguments')
+ file_lock_req_args.add_argument(
+ '-f', type=str, required=True,
+ help="File on which lock has to be applied")
+ file_lock_req_args.add_argument(
+ '-t', help="time for which lock has to be retained", type=int,
+ required=True)
+
+ file_lock_parser.set_defaults(func=get_file_lock)
+
+ args = file_lock_parser.parse_args()
+ rc = args.func(args)
diff --git a/glustolibs-io/shared_files/scripts/generate_io.py b/glustolibs-io/shared_files/scripts/generate_io.py
index d07bda7b0..d80389fd3 100755
--- a/glustolibs-io/shared_files/scripts/generate_io.py
+++ b/glustolibs-io/shared_files/scripts/generate_io.py
@@ -15,29 +15,29 @@
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-import subprocess
-import re
-import time
+"""
+Script for generating IO on client
+"""
+
+from __future__ import print_function
+import argparse
+import datetime
import multiprocessing
-import tempfile
import os
+import re
import shutil
import signal
-import argparse
+import subprocess
import sys
+import tempfile
+import time
import yaml
-import datetime
-ONE_GB_BYTES = 1073741824.0
-
-"""
-Script for generating IO on client
-"""
+ONE_GB_BYTES = float(1024 ** 3)
def get_disk_usage(path):
- """
- This module gets disk usage of the given path
+ """Get disk usage of the given path.
Args:
path (str): path for which disk usage to be calculated
@@ -45,7 +45,6 @@ def get_disk_usage(path):
Returns:
dict: disk usage in dict format on success
None Type, on failure
-
"""
cmd = 'stat -f ' + path
@@ -73,30 +72,27 @@ def get_disk_usage(path):
print("Regex mismatch in get_disk_usage()")
return None
- usage_info = dict()
- keys = ['b_size', 'b_total', 'b_free', 'b_avail', 'i_total', 'i_free']
- val = list(match.groups())
- info = dict(zip(keys, val))
- usage_info['total'] = ((int(info['b_total']) * int(info['b_size'])) /
- ONE_GB_BYTES)
- usage_info['free'] = ((int(info['b_free']) * int(info['b_size'])) /
- ONE_GB_BYTES)
- usage_info['used_percent'] = (100 - (100.0 * usage_info['free'] /
- usage_info['total']))
- usage_info['total_inode'] = int(info['i_total'])
- usage_info['free_inode'] = int(info['i_free'])
- usage_info['used_percent_inode'] = ((100 -
- (100.0 * usage_info['free_inode']) /
- usage_info['total_inode']))
+ keys = ('b_size', 'b_total', 'b_free', 'b_avail', 'i_total', 'i_free')
+ values = list(match.groups())
+ data = dict(zip(keys, values))
+ usage_info = {'total': (
+ int(data['b_total']) * int(data['b_size']) // ONE_GB_BYTES)}
+ usage_info['free'] = (
+ int(data['b_free']) * int(data['b_size']) // ONE_GB_BYTES)
+ usage_info['used_percent'] = (
+ 100 - (100.0 * usage_info['free'] // usage_info['total']))
+ usage_info['total_inode'] = int(data['i_total'])
+ usage_info['free_inode'] = int(data['i_free'])
+ usage_info['used_percent_inode'] = (
+ 100 - (100.0 * usage_info['free_inode']) // usage_info['total_inode'])
usage_info['used'] = usage_info['total'] - usage_info['free']
- usage_info['used_inode'] = (usage_info['total_inode'] -
- usage_info['free_inode'])
+ usage_info['used_inode'] = (
+ usage_info['total_inode'] - usage_info['free_inode'])
return usage_info
def get_disk_used_percent(dirname):
- """
- Module to get disk used percent
+ """Get disk used percentage.
Args:
dirname (str): absolute path of directory
@@ -107,21 +103,18 @@ def get_disk_used_percent(dirname):
Example:
get_disk_used_percent("/mnt/glusterfs")
-
"""
output = get_disk_usage(dirname)
if output is None:
- print("Failed to get disk used percent for %s"
- % dirname)
+ print("Failed to get disk used percent for %s" % dirname)
return None
return output['used_percent']
def check_if_percent_to_fill_or_timeout_is_met(dirname, percent_to_fill,
timeout):
- """
- Module to check if percent to fill or timeout is met.
+ """Check if percent to fill or timeout is met.
Args:
dirname (str): absolute path of directory
@@ -133,8 +126,7 @@ def check_if_percent_to_fill_or_timeout_is_met(dirname, percent_to_fill,
is met, False otherwise
Example:
- check_if_percent_to_fill_or_timeout_is_met("/mnt/glusterfs",
- 10, 60)
+ check_if_percent_to_fill_or_timeout_is_met("/mnt/glusterfs", 10, 60)
"""
flag = 0
count = 0
@@ -145,11 +137,11 @@ def check_if_percent_to_fill_or_timeout_is_met(dirname, percent_to_fill,
if int(percent_to_fill) > int(used):
remaining_to_fill = int(percent_to_fill) - int(used)
- print("Remaining space left to fill data in directory %s is %s"
- % (dirname, str(remaining_to_fill)))
+ print("Remaining space left to fill data in directory %s is %s" % (
+ dirname, str(remaining_to_fill)))
time_str = datetime.datetime.now().strftime('%Y_%m_%d_%H_%M_%S')
- print("Directory %s used percent at time %s: %s"
- % (dirname, time_str, used))
+ print("Directory %s used percent at time %s: %s" % (
+ dirname, time_str, used))
if int(percent_to_fill) <= int(used):
flag = 1
break
@@ -157,8 +149,8 @@ def check_if_percent_to_fill_or_timeout_is_met(dirname, percent_to_fill,
count = count + 5
else:
print("Directory %s is filled with given percent already. "
- "Percentage filled: %s"
- % (dirname, str(percent_to_fill)))
+ "Percentage filled: %s" % (
+ dirname, str(percent_to_fill)))
flag = 1
break
@@ -169,19 +161,15 @@ def check_if_percent_to_fill_or_timeout_is_met(dirname, percent_to_fill,
else:
print("Timeout %s seconds reached before filling directory with "
"given percentage %s" % (str(timeout), str(percent_to_fill)))
- return True
- return False
+ return False
def run_check_if_percent_to_fill_or_timeout_is_met(dirname,
percent_to_fill,
timeout, event):
- """
- Helper Module to check if percent to fill or timeout is met.
- """
- ret = check_if_percent_to_fill_or_timeout_is_met(dirname,
- percent_to_fill,
- timeout)
+ """Check if percent to fill or timeout is met."""
+ ret = check_if_percent_to_fill_or_timeout_is_met(
+ dirname, percent_to_fill, timeout)
if ret:
event.set()
return True
@@ -189,10 +177,8 @@ def run_check_if_percent_to_fill_or_timeout_is_met(dirname,
return False
-def run_fio(proc_queue, script_path, dirname,
- job_files_list, log_file):
- """
- Module to invoke IOs using fio tool
+def run_fio(proc_queue, script_path, dirname, job_files_list, log_file):
+ """Invoke IOs using fio tool.
Args:
proc_queue (obj): multiprocessing queue object
@@ -204,7 +190,6 @@ def run_fio(proc_queue, script_path, dirname,
Returns:
bool: True, if fio starts to write data and stops when it
gets "STOP" string in queue, False otherwise
-
"""
tmpdir = tempfile.mkdtemp()
job_files_list_to_run = []
@@ -213,23 +198,17 @@ def run_fio(proc_queue, script_path, dirname,
shutil.copy(job_file, job_file_to_run)
job_files_list_to_run.append(job_file_to_run)
+ python_bin = "/usr/bin/env python%d" % sys.version_info.major
+ cmd = "%s %s --job-files '%s' %s" % (
+ python_bin, script_path, ' '.join(job_files_list_to_run), dirname)
if log_file is not None:
with open(log_file, "w") as fd:
time_str = datetime.datetime.now().strftime('%Y_%m_%d_%H_%M_%S')
- title = ("=========STARTING FIO-" + time_str +
- "=======\n")
+ title = ("=========STARTING FIO-" + time_str + "=======\n")
fd.write(title)
fd.close()
- cmd = ("python " + script_path +
- " --job-files '" + ' '.join(job_files_list_to_run) + "' " +
- dirname + " >> " + log_file + " 2>&1")
-
- else:
- cmd = ("python " + script_path +
- " --job-files '" + ' '.join(job_files_list_to_run) +
- "' " + dirname)
- p = subprocess.Popen(cmd, shell=True,
- preexec_fn=os.setsid)
+ cmd += " >> %s 2>&1" % log_file
+ p = subprocess.Popen(cmd, shell=True, preexec_fn=os.setsid)
time.sleep(10)
if p is None:
print("Unable to trigger IO using fio")
@@ -241,8 +220,7 @@ def run_fio(proc_queue, script_path, dirname,
with open(log_file, "a") as fd:
time_str = (datetime.datetime.now().
strftime('%Y_%m_%d_%H_%M_%S'))
- title = ("=========ENDING FIO-" + time_str +
- "=======\n")
+ title = ("=========ENDING FIO-" + time_str + "=======\n")
fd.write(title)
fd.close()
break
@@ -251,10 +229,8 @@ def run_fio(proc_queue, script_path, dirname,
return True
-def start_populate_data(mount_point, io_dict,
- percent_to_fill, timeout):
- """
- Starts populating data on the directory
+def start_populate_data(mount_point, io_dict, percent_to_fill, timeout):
+ """Start populating data on a directory.
Args:
mount_point(str): Directory name to fill data
@@ -264,29 +240,23 @@ def start_populate_data(mount_point, io_dict,
Returns:
bool: returns True, if IO succeeds. False, otherwise
-
"""
dirname = mount_point
m = multiprocessing.Manager()
event = m.Event()
- proc_list = []
- proc_queue = []
-
+ proc_list, proc_queue = [], []
for each_io in io_dict.keys():
q = multiprocessing.Queue()
proc_queue.append(q)
workload_type = io_dict[each_io]['workload_type']
- proc = multiprocessing.Process(target=(io_dict[each_io]
- ['function_addr']),
- args=(q,
- (io_dict[each_io]
- ['script_path']),
- dirname,
- (io_dict[each_io]['job_files']
- [workload_type]),
- io_dict[each_io]['log_file']))
+ proc = multiprocessing.Process(
+ target=io_dict[each_io]['function_addr'],
+ args=(q, io_dict[each_io]['script_path'], dirname,
+ io_dict[each_io]['job_files'][workload_type],
+ io_dict[each_io]['log_file'])
+ )
proc_list.append(proc)
time.sleep(5)
proc.start()
@@ -304,8 +274,7 @@ def start_populate_data(mount_point, io_dict,
def stop_populate_data(proc_list, proc_queue, mevent=None):
- """
- Stops populating data on the directory
+ """Stop populating data on a directory.
Args:
proc_list (list): List of processes to kill
@@ -338,27 +307,22 @@ def stop_populate_data(proc_list, proc_queue, mevent=None):
proc.terminate()
return True
except Exception as e:
- print("Exception occurred in stop_populate_data(): %s"
- % e)
+ print("Exception occurred in stop_populate_data(): %s" % e)
return False
def call_get_disk_usage(args):
- """
- Main method for getting disk usage
- """
+ """Main method for getting disk usage."""
disk_usage = get_disk_usage(args.dir)
if disk_usage is None:
return 1
- print disk_usage
+ print(disk_usage)
return 0
def call_start_populate_data(args):
- """
- Main method for populating data
- """
+ """Main method for populating data."""
dirname = args.dir
config_file_list = args.c.split()
@@ -386,24 +350,18 @@ def call_start_populate_data(args):
# case4: If -i | -w | -i and -w is not specified , run all the tools
# specified in the config file
- if args.i is not None:
- io_list = args.i.split()
- else:
- io_list = []
-
+ io_list = [] if args.i is None else args.i.split()
workload_type = ""
if workload is not None:
- if (('workload' in config_data['io'] and
- config_data['io']['workload'] and
- workload in config_data['io']['workload'])):
+ if workload in (config_data['io'].get('workload', []) or []):
if not io_list:
io_list = config_data['io']['workload'][workload]
else:
io_list_from_user = io_list
- io_list_for_given_workload = (config_data['io']
- ['workload'][workload])
- io_list = (list(set(io_list_from_user).
- intersection(io_list_for_given_workload)))
+ io_list_for_given_workload = (
+ config_data['io']['workload'][workload])
+ io_list = (list(set(io_list_from_user).intersection(
+ io_list_for_given_workload)))
workload_type = workload
else:
if not io_list:
@@ -425,41 +383,41 @@ def call_start_populate_data(args):
time_str = datetime.datetime.now().strftime('%Y_%m_%d_%H_%M_%S')
log_file = filename + "_" + time_str + file_ext
- print "GENERATE IO Log file: %s" % log_file
+ print("GENERATE IO Log file: %s" % log_file)
- if('io' in config_data and 'tools' in config_data['io']):
+ if 'io' in config_data and 'tools' in config_data['io']:
config_data_io = dict(config_data['io']['tools'])
else:
- print "io tools info is not given in config file"
+ print("io tools info is not given in config file")
return 1
- if('io' in config_data and 'scripts' in config_data['io']):
+ if 'io' in config_data and 'scripts' in config_data['io']:
config_data_io.update(config_data['io']['scripts'])
else:
- print "io scripts info is not given in config file"
+ print("io scripts info is not given in config file")
return 1
io_details = {}
for io in io_list:
if io in config_data_io.keys():
config_data_io[io]['function_addr'] = eval("run_" + io)
- config_data_io[io]['log_file'] = (log_file_dir + "/" +
- io + "_log.log")
+ config_data_io[io]['log_file'] = (
+ log_file_dir + "/" + io + "_log.log")
config_data_io[io]['workload_type'] = workload_type
io_details[io] = config_data_io[io]
else:
- print ("The IO tool/script - '%s' details not present in config "
- "file. Skipping the IO - '%s'" % (io, io))
+ print("The IO tool/script - '%s' details not present in config "
+ "file. Skipping the IO - '%s'" % (io, io))
if not io_details:
- print "Config file doesn't have IO details for %s" % ','.join(io_list)
+ print("Config file doesn't have IO details for %s" % ','.join(io_list))
return 1
# Starts generating IO
# If -t and -p bot are passed as options, runs all the io's as specified
# until '-t' or '-p' is reached. i.e which ever reaches first.
ret = start_populate_data(dirname, io_details, percent, timeout)
- print "Disk Usage Details of %s: %s" % (dirname, get_disk_usage(dirname))
+ print("Disk Usage Details of %s: %s" % (dirname, get_disk_usage(dirname)))
fd_list = []
for io in io_details.keys():
@@ -472,8 +430,8 @@ def call_start_populate_data(args):
for each_fh in fd_list:
fd.write(each_fh.read())
each_fh.close()
- fd.write("\nDisk Usage Details of %s: %s" % (dirname,
- get_disk_usage(dirname)))
+ fd.write("\nDisk Usage Details of %s: %s" % (
+ dirname, get_disk_usage(dirname)))
fd.close()
if ret:
@@ -483,39 +441,35 @@ def call_start_populate_data(args):
if __name__ == "__main__":
- print "Starting IO Generation..."
+ print("Starting IO Generation...")
test_start_time = datetime.datetime.now().replace(microsecond=0)
- write_data_parser = argparse.ArgumentParser(prog="generate_io.py",
- description=("Program for "
- "generating io"))
+ write_data_parser = argparse.ArgumentParser(
+ prog="generate_io.py", description="Program for generating io")
write_data_required_parser = write_data_parser.add_argument_group(
- 'required named arguments')
-
+ 'required named arguments')
write_data_required_parser.add_argument(
'dir', metavar='DIR', type=str,
help="Directory on which operations has to be performed")
- write_data_required_parser.add_argument('-c', help="space separated list "
- "of config files",
- required=True)
- write_data_parser.add_argument('-i', help="space separated list of "
- "io tools")
+ write_data_required_parser.add_argument(
+ '-c', help="space separated list of config files", required=True)
+ write_data_parser.add_argument(
+ '-i', help="space separated list of io tools")
write_data_parser.add_argument('-w', help="Workload type")
- write_data_parser.add_argument('-p', help="percentage to fill the"
- "directory",
- type=int, default=100)
- write_data_parser.add_argument('-t', help="timeout value in seconds.",
- type=int)
+ write_data_parser.add_argument(
+ '-p', help="percentage to fill the directory", type=int, default=100)
+ write_data_parser.add_argument(
+ '-t', help="timeout value in seconds.", type=int)
default_log_file = "/var/tmp/generate_io/generate_io.log"
- write_data_parser.add_argument('-l', help="log file name.",
- default=default_log_file)
+ write_data_parser.add_argument(
+ '-l', help="log file name.", default=default_log_file)
write_data_parser.set_defaults(func=call_start_populate_data)
args = write_data_parser.parse_args()
rc = args.func(args)
test_end_time = datetime.datetime.now().replace(microsecond=0)
- print "Execution time: %s" % (test_end_time - test_start_time)
- print "Ending IO Generation"
+ print("Execution time: %s" % (test_end_time - test_start_time))
+ print("Ending IO Generation")
sys.exit(rc)
diff --git a/glustolibs-io/shared_files/scripts/memory_and_cpu_logger.py b/glustolibs-io/shared_files/scripts/memory_and_cpu_logger.py
new file mode 100644
index 000000000..d2ee80d6c
--- /dev/null
+++ b/glustolibs-io/shared_files/scripts/memory_and_cpu_logger.py
@@ -0,0 +1,108 @@
+#!/usr/bin/env python
+# Copyright (C) 2020 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+"""
+A tool to monitor and log memory consumption processes.
+"""
+from __future__ import print_function
+
+import argparse
+import csv
+from time import sleep
+import subprocess
+
+
+def run_command(cmd):
+ """
+ Run command using Popen and return output
+ """
+ ret = subprocess.Popen(cmd, stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE, shell=True)
+ output = ret.stdout.read().decode('utf8').split('\n')[:-1]
+ return output
+
+
+def get_memory_and_cpu_consumption(proc_name):
+ """
+ Get the memory and cpu consumed by a given process
+ """
+ # The command gives an output as shown below:
+ # [2020-08-07 09:34:48] 16422 0.0 9.99609
+ #
+ # Where,
+ # [2020-08-07 09:34:48] is UTC timestamp.
+ # 16422 is the process ID.
+ # 0.0 is the CPU usage.
+ # 9.99609 is memory consumption in MB.
+ cmd = ("ps u -p `pgrep " + proc_name + "` | "
+ "awk 'NR>1 && $11~/" + proc_name + "$/{print "
+ "strftime(\"[%Y-%d-%m %H:%M:%S]\", "
+ "systime(), 1), $2,$3,$6/1024}'")
+ memory_and_cpu_consumed = run_command(cmd)
+ return memory_and_cpu_consumed
+
+
+def main():
+ """
+ Main function of the tool.
+ """
+ # Setting up command line arguments
+ parser = argparse.ArgumentParser(
+ description="A tool to log memory usage of a given process"
+ )
+ parser.add_argument(
+ "-p", "--process_name", type=str, dest="process_name", required=True,
+ help="Name of process for which cpu and memory is to be logged")
+ parser.add_argument(
+ "-i", "--interval", type=int, dest="interval", default=60,
+ help="Time interval to wait between consecutive logs(Default:60)")
+ parser.add_argument(
+ "-c", "--count", type=int, dest="count", default=10,
+ help="Number of times memory and CPU has to be logged (Default:10)")
+ parser.add_argument(
+ '-t', '--testname', type=str, dest="testname", required=True,
+ help="Test name for which memory is logged")
+ args = parser.parse_args()
+
+ # Declare all three parameters
+ process_name = args.process_name
+ count = args.count
+ interval = args.interval
+
+ # Generating CSV file header
+ with open('{}.csv'.format(process_name), 'a') as file:
+ csv_writer_obj = csv.writer(file)
+ csv_writer_obj.writerow([args.testname, '', '', ''])
+ csv_writer_obj.writerow([
+ 'Time stamp', 'Process ID', 'CPU Usage', 'Memory Usage'])
+
+ # Taking memory output for a given
+ # number of times
+ for counter in range(0, count):
+ print("Iteration: {}".format(counter))
+ data = get_memory_and_cpu_consumption(process_name)
+
+ # Logging information to csv file
+ for line in data:
+ info = line.split(" ")
+ csv_writer_obj.writerow([" ".join(info[:2]), info[2],
+ info[3], info[4]])
+ sleep(interval)
+
+
+if __name__ == "__main__":
+ main()