diff options
96 files changed, 5020 insertions, 1620 deletions
diff --git a/arequal/README b/arequal/README new file mode 100644 index 0000000..8c9d952 --- /dev/null +++ b/arequal/README @@ -0,0 +1,77 @@ +arequal - Tool to test data security of GlusterFS +======= + +0. Install arequal +------------------ + sh# ./autogen.sh + sh# ./configure + sh# make + sh# make install + + Perform the above installation steps on both clients and servers + + +1. Generate data set +-------------------- + Create a data set to be used for the test. This data set should +have file sizes and file count similar to the data set to be used +in production. You could also use existing data (like /usr) as +your dataset as it will not be modified. This document will use +/usr as the example source directory. + + +2. Mount GlusterFS +------------------ + Install, configure and start glusterfs servers and client. If +the Replicate module is loaded, this tool can be used to perform +data consistency check among the replicas. This document will +use /mnt/glusterfs as the example mount point. + + +3. Start the test +----------------- + sh# arequal-run.sh /usr/ /mnt/gluster/usr + + +4. Verify the output +-------------------- + The tool outputs two sets of checksums one after another on +the standard output. Verify that all the values match against +each other. This ensures that the data has been copied over +properly into the GlusterFS mountpoint. + + +5. Extensive Replicate testing +------------------------------ + The rest of the document is for testing the high availability +and healing features of Replicate. + + +6. High availability testing +---------------------------- + Restart step 3. While the script is in progress, kill one of the +servers. Let the script continue to completion. The script should +not fail because of one of the server getting killed. The checksums +should still match. + + +7. Consistency testing +---------------------- + After step 3, run the following command on both the servers + + sh# arequal-checksum /export/directory + + The output values should match + + +8. Recovery testing +------------------- + If step 7 is performed after step 6, the output values will not +match since changes performed when one of the servers was down has +not propagated to the backend. + + Bring back the server up again. On the same mountpoint, run an +ls -lR to force an access to all the files on the files involved. + + Now calculate the checksums on both the backends as described +in step 7. The output values should match. diff --git a/arequal/arequal-checksum.c b/arequal/arequal-checksum.c new file mode 100644 index 0000000..107c0df --- /dev/null +++ b/arequal/arequal-checksum.c @@ -0,0 +1,611 @@ +/* + Copyright (c) 2006-2011 Gluster, Inc. <http://www.gluster.com/> + This file is part of GlusterFS. + + GlusterFS is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published + by the Free Software Foundation; either version 3 of the License, + or (at your option) any later version. + + GlusterFS is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see + <http://www.gnu.org/licenses/>. +*/ + +#ifndef _GNU_SOURCE +#define _GNU_SOURCE +#endif + +#define _XOPEN_SOURCE 600 + +#include <ftw.h> +#include <stdio.h> +#include <stdlib.h> +#include <sys/types.h> +#include <sys/stat.h> +#include <unistd.h> +#include <errno.h> +#include <string.h> +#include <dirent.h> +#include <stdlib.h> +#include <libgen.h> +#include <stdint.h> +#include <alloca.h> +#include <dirent.h> +#include <argp.h> + + +int debug = 0; + +typedef struct { + char test_directory[4096]; + char **ignored_directory; + unsigned int directories_ignored; +} arequal_config_t; + +static arequal_config_t arequal_config; + +static error_t +arequal_parse_opts (int key, char *arg, struct argp_state *_state); + +static struct argp_option arequal_options[] = { + { "ignore", 'i', "IGNORED", 0, + "entry in the given path to be ignored"}, + { "path", 'p', "PATH", 0, "path where arequal has to be run"}, + {0, 0, 0, 0, 0} +}; + +#define DBG(fmt ...) do { \ + if (debug) { \ + fprintf (stderr, "D "); \ + fprintf (stderr, fmt); \ + } \ + } while (0) + +void +add_to_list (char *arg); +void +get_absolute_path (char directory[], char *arg); + +static inline int roof(int a, int b) +{ + return ((((a)+(b)-1)/((b)?(b):1))*(b)); +} + +void +add_to_list (char *arg) +{ + char *string = NULL; + int index = 0; + + index = arequal_config.directories_ignored - 1; + string = strdup (arg); + + if (!arequal_config.ignored_directory) { + arequal_config.ignored_directory = calloc (1, sizeof (char *)); + } else + arequal_config.ignored_directory = + realloc (arequal_config.ignored_directory, + sizeof (char *) * (index+1)); + + arequal_config.ignored_directory[index] = string; +} + +static error_t +arequal_parse_opts (int key, char *arg, struct argp_state *_state) +{ + switch (key) { + case 'i': + { + arequal_config.directories_ignored++; + add_to_list (arg); + } + break; + case 'p': + { + if (arg[0] == '/') + strcpy (arequal_config.test_directory, arg); + else + get_absolute_path (arequal_config.test_directory, arg); + + if (arequal_config.test_directory + [strlen(arequal_config.test_directory) - 1] == '/') + arequal_config.test_directory + [strlen(arequal_config.test_directory) - 1] = '\0'; + } + break; + + case ARGP_KEY_NO_ARGS: + break; + case ARGP_KEY_ARG: + break; + case ARGP_KEY_END: + if (_state->argc == 1) { + argp_usage (_state); + } + + } + + return 0; +} + +void +get_absolute_path (char directory[], char *arg) +{ + char cwd[4096] = {0,}; + + if (getcwd (cwd, sizeof (cwd)) == NULL) + printf ("some error in getting cwd\n"); + + if (strcmp (arg, ".") != 0) { + if (cwd[strlen(cwd)] != '/') + cwd[strlen (cwd)] = '/'; + strcat (cwd, arg); + } + strcpy (directory, cwd); +} + +static struct argp argp = { + arequal_options, + arequal_parse_opts, + "", + "arequal - Tool which calculates the checksum of all the entries" + "present in a given directory" +}; + +/* All this runs in single thread, hence using 'global' variables */ + +unsigned long long avg_uid_file = 0; +unsigned long long avg_uid_dir = 0; +unsigned long long avg_uid_symlink = 0; +unsigned long long avg_uid_other = 0; + +unsigned long long avg_gid_file = 0; +unsigned long long avg_gid_dir = 0; +unsigned long long avg_gid_symlink = 0; +unsigned long long avg_gid_other = 0; + +unsigned long long avg_mode_file = 0; +unsigned long long avg_mode_dir = 0; +unsigned long long avg_mode_symlink = 0; +unsigned long long avg_mode_other = 0; + +unsigned long long global_ctime_checksum = 0; + + +unsigned long long count_dir = 0; +unsigned long long count_file = 0; +unsigned long long count_symlink = 0; +unsigned long long count_other = 0; + + +unsigned long long checksum_file1 = 0; +unsigned long long checksum_file2 = 0; +unsigned long long checksum_dir = 0; +unsigned long long checksum_symlink = 0; +unsigned long long checksum_other = 0; + + +unsigned long long +checksum_path (const char *path) +{ + unsigned long long csum = 0; + unsigned long long *nums = 0; + int len = 0; + int cnt = 0; + + len = roof (strlen (path), sizeof (csum)); + cnt = len / sizeof (csum); + + nums = alloca (len); + memset (nums, 0, len); + strcpy ((char *)nums, path); + + while (cnt) { + csum ^= *nums; + nums++; + cnt--; + } + + return csum; +} + +int +checksum_md5 (const char *path, const struct stat *sb) +{ + uint64_t this_data_checksum = 0; + FILE *filep = NULL; + char *cmd = NULL; + char strvalue[17] = {0,}; + int ret = -1; + int len = 0; + const char *pos = NULL; + char *cpos = NULL; + + /* Have to escape single-quotes in filename. + * First, calculate the size of the buffer I'll need. + */ + for (pos = path; *pos; pos++) { + if ( *pos == '\'' ) + len += 4; + else + len += 1; + } + + cmd = malloc(sizeof(char) * (len + 20)); + cmd[0] = '\0'; + + /* Now, build the command with single quotes escaped. */ + + cpos = cmd; + strcpy(cpos, "md5sum '"); + cpos += 8; + + /* Add the file path, with every single quotes replaced with this sequence: + * '\'' + */ + + for (pos = path; *pos; pos++) { + if ( *pos == '\'' ) { + strcpy(cpos, "'\\''"); + cpos += 4; + } else { + *cpos = *pos; + cpos++; + } + } + + /* Add on the trailing single-quote and null-terminate. */ + strcpy(cpos, "'"); + + filep = popen (cmd, "r"); + if (!filep) { + perror (path); + goto out; + } + + if (fread (strvalue, sizeof (char), 16, filep) != 16) { + fprintf (stderr, "%s: short read\n", path); + goto out; + } + + this_data_checksum = strtoull (strvalue, NULL, 16); + if (-1 == this_data_checksum) { + fprintf (stderr, "%s: %s\n", strvalue, strerror (errno)); + goto out; + } + checksum_file1 ^= this_data_checksum; + + if (fread (strvalue, sizeof (char), 16, filep) != 16) { + fprintf (stderr, "%s: short read\n", path); + goto out; + } + + this_data_checksum = strtoull (strvalue, NULL, 16); + if (-1 == this_data_checksum) { + fprintf (stderr, "%s: %s\n", strvalue, strerror (errno)); + goto out; + } + checksum_file2 ^= this_data_checksum; + + ret = 0; +out: + if (filep) + pclose (filep); + + if (cmd) + free(cmd); + + return ret; +} + +int +checksum_filenames (const char *path, const struct stat *sb) +{ + DIR *dirp = NULL; + struct dirent *entry = NULL; + unsigned long long csum = 0; + int i = 0; + int found = 0; + + dirp = opendir (path); + if (!dirp) { + perror (path); + goto out; + } + + errno = 0; + while ((entry = readdir (dirp))) { + /* do not calculate the checksum of the entries which user has + told to ignore and proceed to other siblings.*/ + if (arequal_config.ignored_directory) { + for (i = 0;i < arequal_config.directories_ignored;i++) { + if ((strcmp (entry->d_name, + arequal_config.ignored_directory[i]) + == 0)) { + found = 1; + DBG ("ignoring the entry %s\n", + entry->d_name); + break; + } + } + if (found == 1) { + found = 0; + continue; + } + } + csum = checksum_path (entry->d_name); + checksum_dir ^= csum; + } + + if (errno) { + perror (path); + goto out; + } + +out: + if (dirp) + closedir (dirp); + + return 0; +} + + +int +process_file (const char *path, const struct stat *sb) +{ + int ret = 0; + + count_file++; + + avg_uid_file ^= sb->st_uid; + avg_gid_file ^= sb->st_gid; + avg_mode_file ^= sb->st_mode; + + ret = checksum_md5 (path, sb); + + return ret; +} + + +int +process_dir (const char *path, const struct stat *sb) +{ + unsigned long long csum = 0; + + count_dir++; + + avg_uid_dir ^= sb->st_uid; + avg_gid_dir ^= sb->st_gid; + avg_mode_dir ^= sb->st_mode; + + csum = checksum_filenames (path, sb); + + checksum_dir ^= csum; + + return 0; +} + + +int +process_symlink (const char *path, const struct stat *sb) +{ + int ret = 0; + char buf[4096] = {0, }; + unsigned long long csum = 0; + + count_symlink++; + + avg_uid_symlink ^= sb->st_uid; + avg_gid_symlink ^= sb->st_gid; + avg_mode_symlink ^= sb->st_mode; + + ret = readlink (path, buf, 4096); + if (ret < 0) { + perror (path); + goto out; + } + + DBG ("readlink (%s) => %s\n", path, buf); + + csum = checksum_path (buf); + + DBG ("checksum_path (%s) => %llx\n", buf, csum); + + checksum_symlink ^= csum; + + ret = 0; +out: + return ret; +} + + +int +process_other (const char *path, const struct stat *sb) +{ + count_other++; + + avg_uid_other ^= sb->st_uid; + avg_gid_other ^= sb->st_gid; + avg_mode_other ^= sb->st_mode; + + checksum_other ^= sb->st_rdev; + + return 0; +} + + +int +process_entry (const char *path, const struct stat *sb, + int typeflag, struct FTW *ftwbuf) +{ + int ret = 0; + char *name = NULL; + char *bname = NULL; + char *dname = NULL; + int i = 0; + + /* The if condition below helps in ignoring some directories in + the given path. If the name of the entry is one of the directory + names that the user told to ignore, then that directory will not + be processed and will return FTW_SKIP_SUBTREE to nftw which will + not crawl this directory and move on to other siblings. + Note that for nftw to recognize FTW_SKIP_TREE, FTW_ACTIONRETVAL + should be passed as an argument to nftw. + + This mainly helps in calculating the checksum of network filesystems + (client-server), where the server might have some hidden directories + for managing the filesystem. So to calculate the sanity of filesytem + one has to get the checksum of the client and then the export directory + of server by telling arequal to ignore some of the directories which + are not part of the namespace. + */ + + if (arequal_config.ignored_directory) { + name = strdup (path); + + name[strlen(name)] == '\0'; + + bname = strrchr (name, '/'); + if (bname) + bname++; + + dname = dirname (name); + for ( i = 0; i < arequal_config.directories_ignored; i++) { + if ((strcmp (bname, arequal_config.ignored_directory[i]) + == 0) && (strcmp (arequal_config.test_directory, + dname) == 0)) { + DBG ("ignoring %s\n", bname); + ret = FTW_SKIP_SUBTREE; + if (name) + free (name); + return ret; + } + } + } + + DBG ("processing entry %s\n", path); + + switch ((S_IFMT & sb->st_mode)) { + case S_IFDIR: + ret = process_dir (path, sb); + break; + case S_IFREG: + ret = process_file (path, sb); + break; + case S_IFLNK: + ret = process_symlink (path, sb); + break; + default: + ret = process_other (path, sb); + break; + } + + if (name) + free (name); + return ret; +} + + +int +display_counts (FILE *fp) +{ + fprintf (fp, "\n"); + fprintf (fp, "Entry counts\n"); + fprintf (fp, "Regular files : %lld\n", count_file); + fprintf (fp, "Directories : %lld\n", count_dir); + fprintf (fp, "Symbolic links : %lld\n", count_symlink); + fprintf (fp, "Other : %lld\n", count_other); + fprintf (fp, "Total : %lld\n", + (count_file + count_dir + count_symlink + count_other)); + + return 0; +} + + +int +display_checksums (FILE *fp) +{ + fprintf (fp, "\n"); + fprintf (fp, "Checksums\n"); + fprintf (fp, "Regular files : %llx%llx\n", checksum_file1, checksum_file2); + fprintf (fp, "Directories : %llx\n", checksum_dir); + fprintf (fp, "Symbolic links : %llx\n", checksum_symlink); + fprintf (fp, "Other : %llx\n", checksum_other); + fprintf (fp, "Total : %llx\n", + (checksum_file1 ^ checksum_file2 ^ checksum_dir ^ checksum_symlink ^ checksum_other)); + + return 0; +} + + +int +display_metadata (FILE *fp) +{ + fprintf (fp, "\n"); + fprintf (fp, "Metadata checksums\n"); + fprintf (fp, "Regular files : %llx\n", + (avg_uid_file + 13) * (avg_gid_file + 11) * (avg_mode_file + 7)); + fprintf (fp, "Directories : %llx\n", + (avg_uid_dir + 13) * (avg_gid_dir + 11) * (avg_mode_dir + 7)); + fprintf (fp, "Symbolic links : %llx\n", + (avg_uid_symlink + 13) * (avg_gid_symlink + 11) * (avg_mode_symlink + 7)); + fprintf (fp, "Other : %llx\n", + (avg_uid_other + 13) * (avg_gid_other + 11) * (avg_mode_other + 7)); + + return 0; +} + +int +display_stats (FILE *fp) +{ + display_counts (fp); + + display_metadata (fp); + + display_checksums (fp); + + return 0; +} + + +int +main(int argc, char *argv[]) +{ + int ret = 0; + int i = 0; + + ret = argp_parse (&argp, argc, argv, 0, 0, NULL); + if (ret != 0) { + fprintf (stderr, "parsing arguments failed\n"); + return -2; + } + + /* Use FTW_ACTIONRETVAL to take decision on what to do depending upon */ + /* the return value of the callback function */ + /* (process_entry in this case) */ + ret = nftw (arequal_config.test_directory, process_entry, 30, + FTW_ACTIONRETVAL|FTW_PHYS|FTW_MOUNT); + if (ret != 0) { + fprintf (stderr, "ftw (%s) returned %d (%s), terminating\n", + argv[1], ret, strerror (errno)); + return 1; + } + + display_stats (stdout); + + if (arequal_config.ignored_directory) { + for (i = 0; i < arequal_config.directories_ignored; i++) { + if (arequal_config.ignored_directory[i]) + free (arequal_config.ignored_directory[i]); + } + free (arequal_config.ignored_directory); + } + + return 0; +} diff --git a/arequal/arequal-run.sh b/arequal/arequal-run.sh new file mode 100755 index 0000000..597cf26 --- /dev/null +++ b/arequal/arequal-run.sh @@ -0,0 +1,45 @@ +#!/bin/bash + + + +function do_copy() +{ + local src="$1"; + local dst="$2"; + + rsync -avz $src $dst; +} + + +function calc_checksum() +{ + local dir="$1"; + + echo "Calculating checksum on directory $dir ..." + arequal-checksum "$dir"; + echo "-------------------------------------" + echo +} + + +function main() +{ + local src="$1"; + local dst="$2"; + + if [ $# -ne 2 ]; then + echo "Usage: $0 <src> <dst>"; + echo " e.g: $0 /usr /mnt/glusterfs/usr"; + fi + + do_copy "$src" "$dst"; + + echo "Calculating checksums on source and destination"; + echo "==============================================="; + + calc_checksum "$src"; + + calc_checksum "$dst"; +} + +main "$@" diff --git a/helper_scripts/glusterfs-precreate.sh b/helper_scripts/glusterfs-precreate.sh new file mode 100755 index 0000000..c5b62a2 --- /dev/null +++ b/helper_scripts/glusterfs-precreate.sh @@ -0,0 +1,32 @@ +#!/bin/bash + +set -e; + +if [ $# -eq 0 ]; then + echo "Usage: $0 <dir1> [dir2 [dir3 [dir4 ...]]]"; + exit 1 +fi + +for dir in "$@"; do + if [ ! -d "$dir" ]; then + echo "$0: $dir not a directory" + exit 1; + fi +done + +subdirs="{00" +for i in {1..255}; do + n=$(printf "%02x" $i); + subdirs="$subdirs,$n"; +done +subdirs="$subdirs}" + +mkdir -vp "$dir/.glusterfs"; + +for dir in $@; do + for i in {0..255}; do + n=$(printf "%02x" $i); + mkdir -vp "$dir/.glusterfs/$n" + eval "mkdir -vp $dir/.glusterfs/$n/$subdirs" + done +done
\ No newline at end of file diff --git a/perf-framework/.runfile b/perf-framework/.runfile new file mode 100644 index 0000000..c227083 --- /dev/null +++ b/perf-framework/.runfile @@ -0,0 +1 @@ +0
\ No newline at end of file diff --git a/perf-framework/README b/perf-framework/README new file mode 100644 index 0000000..7733c35 --- /dev/null +++ b/perf-framework/README @@ -0,0 +1,85 @@ +Brief desciption of individual scripts: +---------------------------------------- +acl - Enable/Disable acl on the backend filesystem. +analyze_perf_data - Create plots using the data generated during perf runs. +batchrun - Used for running multiple tarballs one after the other. +buildit - Build scripts used by deploy_gluster for parallel builds. +calc_all - Print results of all 3 runs performed in perf.sh +calc_avg - Print average of 3 runs performed in perf.sh +calc_best - Print best of the 3 runs performed in perf.sh +calc_worst - Print worst of the 3 runs performed in perf.sh +check_install - Print checksums of built and installed binaries +check_install.new - Check if installation succeeded on all machines. +create_gluster_vol - Create a gluster volume using parameters in gf_perf_conig +deploy_gluster - Install gluster from source on multiple machines +gf_perf_config - Example config file. Most scripts depend on this config file. +perf.sh - Perf test wrapper that runs perf-test.sh thrice. +perf-test.sh - Perf test +quota_gsync_run - Used to run different Quota/Gsync combinations +run - Used to run any command on multiple systems +setbatch - Set the tarballs for batch run +setrun - Set the tarball for a single run +start_perf_measure - Launch statistics collection script on all machines +involved in a perf run, start the perf run and save statistics post run. +stat_collect - Helper script to collect system statistics during perf runs. + +diff_perfrun +------------ +<File Begin> +DATA FOR BASELINE - Output of calc_avg for the baseline +followed by +DATA FOR THE NEW RUN - Output of calc_avg for the new run +<FILE END> + +It also assumes that there are 4 columns - one each for Quota/Gsync combination. +You can then put in a criteria for percentage deviation from the baseline which should send out an alert. + + +Setting up the environment: +------------------------------------ +* Initialize the required variables by modifying the example gf_perf_config +present in the git repo. + +Building and deploying GlusterFS +------------------------------------ +* Populate the BRICKS variable in gf_perf_config +* If you want to deploy a tarball, update the tarball in gf_perf_config +and then run "deploy_gluster". This will copy the tarball to all the bricks and +will build it there. This uses a script called buildit which is part of the +repo. + +Running the tests +------------------------------------ +* Use start_perf_measure to run the tests. The test run details can be changed +in gf_perf_config before the run. You can specify whether the configuration is +a distribute, replicate or distributed-replicate (distrep). +* This will generate statistics in the log_repo for the current run under the +director - "$log_repo/<current run>" +* The run "id" is maintained by a ".runfile" which gets generated on the first +run and is later updated as required during subsequent runs. +* NOTE: You can use "batchrun" to start tests for all combinations. Downloading +the tarball to the cwd is the responsibility of the user. + +Analyzing the output +------------------------------------ +* Use analyze_perf_measure to generate plots of resource utilization for all the +bricks and the client. + +Running Quota and Gsync combinations: +------------------------------------ +* After an initial run (as described above) is complete, you can run different +combinations of Quota/Gsync by running quota_gsync_run. +* NOTE: Before running this script, it is important to comment out the line - +./create_gluster_vol in the script start_perf_measure. +Otherwise, a new volume will be created for every combination, which will wipe +out the Quota/Gsync settings. Look at the usage in batchrun. +TODO: Need to use the gluster command line to enable/disable quota/gsync instead +of directly modifying the volume file. + +Setting up automated-email setup: +-------------------------------- +* Setup automated emails to send results as soon as the runs are done. The whole +email setup is done using Lakshmipathi's inotify program available at +http://github.com/gluster/qa/blob/master/c_pgms/inotify.c & when a file is scpd +to the particular destination, inotify sends across the mail from a shell.gluster.com +account.
\ No newline at end of file diff --git a/perf-framework/acl b/perf-framework/acl new file mode 100755 index 0000000..c971fee --- /dev/null +++ b/perf-framework/acl @@ -0,0 +1,42 @@ +#!/bin/bash + +source gf_perf_config + +function usage() +{ + echo "Usage: $0 <on|off>" + exit 1 +} + +function get_mount() +{ + brick=$1 + export_dir=$2 + ssh -l root $brick ls -d $export_dir > /dev/null + if [ $? -ne 0 ] + then + echo "FATAL : Export directory does not exist. Exiting..." + exit 1 + fi + echo `ssh -l root $brick df -h $export_dir | grep -v ^Filesystem | awk '{print $NF}'` +} + + +if [ $# -ne 1 ] +then + usage +fi + +for brick in $BRICK_IP_ADDRS +do + mount_point=`get_mount $brick $SERVER_EXPORT_DIR` + if [ "$1" == "on" ] + then + ssh -l root $brick mount -o remount,acl $mount_point + elif [ "$1" == "off" ] + then + ssh -l root $brick mount -o remount,noacl $mount_point + else + usage + fi +done diff --git a/perf-framework/analyze_perf_data b/perf-framework/analyze_perf_data new file mode 100755 index 0000000..129a1e9 --- /dev/null +++ b/perf-framework/analyze_perf_data @@ -0,0 +1,466 @@ +#!/bin/bash -u + +source gf_perf_config + +if [ $# -le 0 -o $# -gt 2 ] +then + echo "Usage : $0 <run directory>" + exit 1 +fi + +rundir=$1 +cmpdir="" + +if [ $# -eq 2 ] +then + cmpdir=$2 +fi + +function gen_cpu_data() +{ + time=0 + sum=0 + + if [ -f times ] + then + rm times + fi + + for op in `cat ops` + do + time=0; + for i in `grep -w ^$op $PERFLOG | awk '{print $2}'| cut -f1 -d'.'` + do + time=$((time+$i)) + done; + time=$((time/3)) # Average over three runs + sum=$((sum + $time)) + echo $sum >> times + done + sed -i 's/$/ 100/g' times + + num_procs=`grep -w ^processor $SYSINFO | wc -l` + echo `grep idle $MPSTAT_LOG | head -1 | awk '{print $NF}'` | grep -o idle + idle_col_tweak=$? + echo `grep CPU $MPSTAT_LOG | head -1 | awk '{print $3}'` | grep -o CPU > /dev/null + cpu_col_tweak=$? + for i in "all" 0 `seq $((num_procs-1))` + do + egrep -v 'Linux|^$|idle' $MPSTAT_LOG | awk -v v1=$cpu_col_tweak -v v2=$idle_col_tweak '{print $(3-v1) " " $(NF-v2)}' | grep -w ^$i | cut -f 2 -d' '| sed 's/^/100-/g' | bc -l > cpu-$i; + cat -n cpu-$i > cpu-$i.new + done +} + +function plot_cpu_usage() +{ + xrange=$((`tail -1 times | awk '{print $1}'`+50)) + mpstat_interval=5 + plot_info=pinfo.$$ + num_procs=`grep -w ^processor $SYSINFO | wc -l` + ltype=1 + identity=$1 + + for i in "all" 0 `seq $((num_procs-1))` + do + echo -ne "\"cpu-$i.new\" using (\$1*$mpstat_interval):2 title 'cpu-$i' with lines lt $ltype lw 2,\\c" >> $plot_info + ltype=$((ltype+1)) + done + echo "\"times\" using 1:2 title '' with impulse lt 2 lw 1" >> $plot_info + + gnuplot <<EOF + set autoscale + set grid + set title "CPU utilization : All CPUs ($identity)" + set xlabel "Time" + set ylabel "% CPU utilization" + set xr [0:$xrange] + set yr [0:100] + set terminal png nocrop size 1024,768 + set output "$CPU_PLOT_OUTPUT" + plot `cat $plot_info` +EOF + rm $plot_info +} + +function gen_vm_data() +{ + egrep -v 'memory|free|^$' $VMSTAT_LOG | awk '{print $4}' > vm_datafile + totalmem=`grep -w ^MemTotal $SYSINFO | awk '{print $2}'` + cat vm_datafile | sed "s/^/$totalmem-/g" | bc > memfile + cat -n memfile > memfile.new +} + +function plot_vm_usage() +{ + vmstat_interval=5 + total_mem=`grep -w ^MemTotal $SYSINFO | awk '{print $2}'` + xrange=$((`tail -1 times | awk '{print $1}'`+50)) + identity=$1 + + gnuplot <<EOF + set autoscale + set grid + set title "Memory utilization ($identity)" + set xlabel "Time" + set ylabel "Memory utilization in bytes" + set xr [0:$xrange] + set yr [0:$total_mem] + set terminal png nocrop size 1024,768 + set output "$VM_PLOT_OUTPUT" + plot "memfile.new" using (\$1*$vmstat_interval):2 title 'memory-usage' with lines lt 2 lw 2,\ + "times" using 1:(\$2*$total_mem/100) title '' with impulse lt 2 lw 1 +EOF +} + +function gen_iostats() +{ + brick=$1 + dev=`ssh $brick "df -h $SERVER_EXPORT_DIR" | tail -1 | awk '{print $1}' | cut -f3 -d'/'` + egrep -v 'Device|^$' $IOSTAT_LOG |grep -w ^$dev | awk '{print $10}' | cut -f1 -d'.' > io_await + cat -n io_await > io_await.new + egrep -v 'Device|^$' $IOSTAT_LOG |grep -w ^$dev | awk '{print $6}' | cut -f1 -d'.' > read_tput + cat -n read_tput > read_tput.new + egrep -v 'Device|^$' $IOSTAT_LOG |grep -w ^$dev | awk '{print $7}' | cut -f1 -d'.' > write_tput + cat -n write_tput > write_tput.new +} + +function plot_iostats() +{ + iostat_interval=5 + max_wait=$((`sort -n io_await | tail -1` + 50)) + max_read=$(((`sort -n read_tput | tail -1`) / 2 + 100)) + max_write=$(((`sort -n write_tput | tail -1`) / 2 + 100)) + max_io=$max_write + if [ $max_read -gt $max_write ] + then + max_io=$max_read; + fi + xrange=$((`tail -1 times | awk '{print $1}'`+50)) + identity=$1 + + gnuplot <<EOF1 + set autoscale + set grid + set title "IO Wait times ($identity)" + set xlabel "Time in seconds" + set ylabel "IO Wait times in milliseconds" + set xr [0:$xrange] + set yr [0:$max_wait] + set terminal png nocrop size 1024,768 + set output "$IO_TIMES_PLOT_OUTPUT" + plot "io_await.new" using (\$1*$iostat_interval):2 title 'IO wait times' with lines lt 3 lw 2,\ + "times" using 1:(\$2*$max_wait/100) title '' with impulse lt 2 lw 1 +EOF1 + gnuplot <<EOF2 + set autoscale + set grid + set title "Disk Read-Write throughput ($identity)" + set xlabel "Time in seconds" + set ylabel "Throughput in KB/sec" + set xr [0:$xrange] + set yr [0:$max_io] + set terminal png nocrop size 1024,768 + set output "$IO_TPUT_PLOT_OUTPUT" + plot "read_tput.new" using (\$1*$iostat_interval):(\$2/2) title 'Read throughput' with lines lt 4 lw 2,\ + "write_tput.new" using (\$1*$iostat_interval):(\$2/2) title 'Write throughput' with lines lt 5 lw 2,\ + "times" using 1:(\$2*$max_io/100) title '' with impulse lt 2 lw 1 +EOF2 +} + +function gen_cmp_data() +{ + perflog_baseline=$1 + perflog_current=$2 + + time=0 + for op in `cat ops` + do + time=0; + for i in `grep -w ^$op $perflog_baseline | awk '{print $2}'| cut -f1 -d'.'` + do + time=$((time+$i)) + done; + time=$((time/3)) # Average over three runs + echo $time >> btimes.$$ + done + + for op in `cat ops` + do + time=0; + for i in `grep -w ^$op $perflog_current | awk '{print $2}'| cut -f1 -d'.'` + do + time=$((time+$i)) + done; + time=$((time/3)) # Average over three runs + echo $time >> ctimes.$$ + done + + paste -d " " ops btimes.$$ ctimes.$$ > $CMP_DATAFILE + rm btimes.$$ ctimes.$$ +} + +function plot_comparison() +{ + a=`cat $CMP_DATAFILE | awk '{print $2"\n"$3}' | sort -n | tail -1` + yrange=`echo $a + $a/5 | bc` + b=`wc -l $CMP_DATAFILE | awk '{print $1}'` + xrange=`echo $b - 0.5 | bc` + + gnuplot <<EOF + reset + set key at graph 0.15, 0.85 horizontal samplen 0.1 + set style data histogram + set style histogram cluster gap 1 + set style fill solid border -1 + set boxwidth 0.8 + set xtic rotate by 90 scale 0 + unset ytics + set y2tics rotate by 90 + set terminal png nocrop size 1024,768 + set xlabel ' ' + set size 0.6, 1 + set yrange [0:$yrange]; set xrange [-0.5:$xrange] + set y2label '$XLABEL' offset -2 + set label 1 '$YLABEL' at graph 0.5, -0.4 centre rotate by 180 + set label 2 '$LEGEND_A' at graph 0.05, 0.85 left rotate by 90 + set label 3 '$LEGEND_B' at graph 0.12, 0.85 left rotate by 90 + set label 4 '$PLOT_TITLE' at graph -0.01, 0.5 center rotate by 90 + set output "tmp.$$.png" + p '$CMP_DATAFILE' u 2 title ' ', '' u 3 title ' ', '' u 0:(0):xticlabel(1) w l title '' +EOF + convert tmp.$$.png -rotate 90 $CMP_PLOT_OUTPUT + rm tmp.$$.png +} + +function gen_intr_csw_stats() +{ + egrep -v 'memory|free|^$' $VMSTAT_LOG | awk '{print $11}' > intrstat + cat -n intrstat > intrstat.new + egrep -v 'memory|free|^$' $VMSTAT_LOG | awk '{print $12}' > cswstat + cat -n cswstat > cswstat.new +} + +function plot_intr_csw_stats() +{ + vmstat_interval=5 + xrange=$((`tail -1 times | awk '{print $1}'` + 50)) + max_intr=$((`sort -n intrstat | tail -1` + 100)) + max_csw=$((`sort -n cswstat | tail -1` + 100)) + max_val=$max_csw + if [ $max_intr -gt $max_csw ] + then + max_val=$max_intr; + fi + identity=$1 + + gnuplot <<EOF + set autoscale + set grid + set title "Interrupts and context switches ($identity)" + set xlabel "Time in seconds" + set ylabel "Interrupts/Context Switches" + set xr [0:$xrange] + set yr [0:$max_val] + set terminal png nocrop size 1024,768 + set output "$INTR_CSW_PLOT_OUTPUT" + plot "intrstat.new" using (\$1*$vmstat_interval):2 title 'Interrupts' with lines lt 4 lw 2,\ + "cswstat.new" using (\$1*$vmstat_interval):2 title 'Context Switches' with lines lt 5 lw 2,\ + "times" using 1:(\$2*$max_val/100) title '' with impulse lt 2 lw 1 +EOF +} + +function gen_netstats() +{ + ip_addr=$1 + dev=`ssh $ip_addr ifconfig | grep -B1 $ip_addr | head -1 | cut -f1 -d' '` + egrep -v 'IFACE|^$' $SAR_NETSTAT_LOG |grep -w $dev | awk '{print $3}' | cut -f1 -d'.' > rpkts + cat -n rpkts > rpkts.new + egrep -v 'IFACE|^$' $SAR_NETSTAT_LOG |grep -w $dev | awk '{print $4}' | cut -f1 -d'.' > wpkts + cat -n wpkts > wpkts.new + egrep -v 'IFACE|^$' $SAR_NETSTAT_LOG |grep -w $dev | awk '{print $5}' | cut -f1 -d'.' > rkbytes + cat -n rkbytes > rkbytes.new + egrep -v 'IFACE|^$' $SAR_NETSTAT_LOG |grep -w $dev | awk '{print $6}' | cut -f1 -d'.' > wkbytes + cat -n wkbytes > wkbytes.new +} + +function plot_netstats() +{ + sar_netstat_interval=5 + max_read_pkts=$((`sort -n rpkts | tail -1` + 50)) + max_write_pkts=$((`sort -n wpkts | tail -1` + 50)) + max_read_kbytes=$(((`sort -n rkbytes | tail -1`)/1024 + 100)) + max_write_kbytes=$(((`sort -n wkbytes | tail -1`)/1024 + 100)) + + max_pkts=$max_write_pkts + if [ $max_read_pkts -gt $max_write_pkts ] + then + max_pkts=$max_read_pkts; + fi + + max_kbytes=$max_write_kbytes + if [ $max_read_kbytes -gt $max_write_kbytes ] + then + max_kbytes=$max_read_kbytes; + fi + xrange=$((`tail -1 times | awk '{print $1}'`+50)) + identity=$1 + + gnuplot <<EOF1 + set autoscale + set grid + set title "Network statistics - Packet Read/Write ($identity)" + set xlabel "Time in seconds" + set ylabel "Number of Packets" + set xr [0:$xrange] + set yr [0:$max_pkts] + set terminal png nocrop size 1024,768 + set output "$NET_PKTS_PLOT_OUTPUT" + plot "rpkts.new" using (\$1*$sar_netstat_interval):2 title 'Read Packets' with lines lt 3 lw 2,\ + "wpkts.new" using (\$1*$sar_netstat_interval):2 title 'Write Packets' with lines lt 4 lw 2,\ + "times" using 1:(\$2*$max_pkts/100) title '' with impulse lt 2 lw 1 +EOF1 + gnuplot <<EOF2 + set autoscale + set grid + set title "Network Read-Write throughput ($identity)" + set xlabel "Time in seconds" + set ylabel "Throughput in KB/sec" + set xr [0:$xrange] + set yr [0:$max_kbytes] + set terminal png nocrop size 1024,768 + set output "$NET_TPUT_PLOT_OUTPUT" + plot "rkbytes.new" using (\$1*$sar_netstat_interval):(\$2/1024) title 'Read throughput' with lines lt 3 lw 2,\ + "wkbytes.new" using (\$1*$sar_netstat_interval):(\$2/1024) title 'Write throughput' with lines lt 4 lw 2,\ + "times" using 1:(\$2*$max_kbytes/100) title '' with impulse lt 2 lw 1 +EOF2 +} + +function analyse_plot_data() +{ + identity=$1 + + if [ $identity != "client" ] + then + brick=$2 + fi + # Generate CPU data + gen_cpu_data + # plot CPU data + plot_cpu_usage $identity + + # Generate VM data + gen_vm_data + # plot VM data + plot_vm_usage $identity + + if [ $identity != "client" ] + then + # Generate io-times and io-throughput data + # This makes sense only for the bricks since the client is not involved in disk IO + gen_iostats $brick + # plot io-times and io-throughput data + plot_iostats $identity + fi + + # Generate interrupt and context switch data + gen_intr_csw_stats + # plot interrupt and context switch data + plot_intr_csw_stats $identity + + # Generate network packet statistics and throughput data + if [ $identity != "client" ] + then + gen_netstats $brick + else + gen_netstats $CLIENT_IP_ADDR + fi + # Generate network packet statistics and throughput data + plot_netstats $identity + + # cleanup tmp files + rm vm_datafile memfile* + rm cpu-* times + if [ $identity != "client" ] + then + rm io_await* read_tput* write_tput* + fi + rm cswstat* intrstat* + rm rpkts* wpkts* rkbytes* wkbytes* +} + +function analyse_client_data() +{ + MPSTAT_LOG=$rundir/client/mpstat_log + VMSTAT_LOG=$rundir/client/vmstat_log + IOSTAT_LOG=$rundir/client/iostat_log + SAR_NETSTAT_LOG=$rundir/client/sar_netstat_log + SYSINFO=$rundir/client/sysinfo + PERFLOG=$rundir/client/perf-test.log + CPU_PLOT_OUTPUT=$rundir/client/cpu.png + VM_PLOT_OUTPUT=$rundir/client/vm.png + IO_TIMES_PLOT_OUTPUT=$rundir/client/io_times.png + IO_TPUT_PLOT_OUTPUT=$rundir/client/io_tput.png + INTR_CSW_PLOT_OUTPUT=$rundir/client/intr_csw.png + NET_PKTS_PLOT_OUTPUT=$rundir/client/net-pkts.png + NET_TPUT_PLOT_OUTPUT=$rundir/client/net-tput.png + identity="client" + analyse_plot_data $identity +} + +function analyse_brick_data() +{ + ind=0 + for b in $BRICK_IP_ADDRS + do + ind=$((ind+1)) + MPSTAT_LOG=$rundir/brick$ind*mpstat-log + VMSTAT_LOG=$rundir/brick$ind*vmstat-log + IOSTAT_LOG=$rundir/brick$ind*iostat-log + SAR_NETSTAT_LOG=$rundir/brick$ind-*sar_netstat-log + SYSINFO=$rundir/brick$ind*sysinfo-log + PERFLOG=$rundir/client/perf-test.log + CPU_PLOT_OUTPUT=$rundir/brick$ind-cpu.png + VM_PLOT_OUTPUT=$rundir/brick$ind-vm.png + IO_TIMES_PLOT_OUTPUT=$rundir/brick$ind-io_times.png + IO_TPUT_PLOT_OUTPUT=$rundir/brick$ind-io_tput.png + INTR_CSW_PLOT_OUTPUT=$rundir/brick$ind-intr_csw.png + NET_PKTS_PLOT_OUTPUT=$rundir/brick$ind-net-pkts.png + NET_TPUT_PLOT_OUTPUT=$rundir/brick$ind-net-tput.png + + identity="brick$ind" + analyse_plot_data $identity $b + done +} + +function do_comparison() +{ + # Generate comparison data + gen_cmp_data $1 $2 + + # plot perf comparison + plot_comparison +} + +cp ops $LOCAL_LOG_REPO +cd $LOCAL_LOG_REPO +analyse_client_data +analyse_brick_data +rm ops + +if [ "$cmpdir" != "" ] +then + CMP_PLOT_OUTPUT=$cmpdir-$rundir-cmp.png + CMP_DATAFILE=$rundir/client/cmp-with-$cmpdir-data.dat + + XLABEL="Time in seconds" + YLABEL="Operations" + LEGEND_A="$cmpdir" + LEGEND_B="$rundir" + PLOT_TITLE="Performance comparison - $cmpdir vs $rundir" + do_comparison $cmpdir/client/perf-test.log $rundir/client/perf-test.log +fi + +# Create tarball of the plots +echo "Creating plots.tar..." +cd $rundir +tar cf plots.tar *.png client/*.png diff --git a/perf-framework/automate_pwl_ssh b/perf-framework/automate_pwl_ssh new file mode 100755 index 0000000..78e4c47 --- /dev/null +++ b/perf-framework/automate_pwl_ssh @@ -0,0 +1,43 @@ +#!/bin/bash -u + +expect_log=./expectlog + +if [ ! -f /root/.ssh/id_rsa.pub ] +then + ssh-keygen -q -t rsa -N "" -f /root/.ssh/id_rsa +fi + +if [ ! -f /usr/bin/expect ] +then + echo "expect not found. Attempting to install..." + if grep -i centos /etc/issue > /dev/null 2>&1 + then + echo "CentOS. Using yum" + yum install -y `yum whatprovides expect | grep -o "expect.*x86_64"` > /dev/null 2>&1 + if [ $? -ne 0 ] + then + echo "Failed installing expect. Exiting..." + exit 1 + fi + elif grep -i ubuntu /etc/issue > /dev/null 2>&1 + then + echo "Ubuntu. Using apt-get" + apt-get install -y expect > /dev/null 2>&1 + if [ $? -ne 0 ] + then + echo "Failed installing expect. Exiting..." + exit 1 + fi + else + echo "Unknown distribution" + echo "Install expect and try again." + exit 1 + fi +fi + +echo "Attempting passwordless ssh setup on multiple hosts." +echo "log can be found in $expect_log" + +export ROOT_PW1 +export ROOT_PW2 +./pwl_ssh $expect_log $SSH_HOSTS diff --git a/perf-framework/batchrun b/perf-framework/batchrun new file mode 100755 index 0000000..d45455e --- /dev/null +++ b/perf-framework/batchrun @@ -0,0 +1,50 @@ +#!/bin/bash + +CONFIG_FILE=gf_perf_config +source $CONFIG_FILE +RUNLIST="glusterfs-3git.tar.gz" + +function do_run() +{ + FIRST=`cat .runfile` + ./start_perf_measure +# sed -i 's/^\.\/create_gluster_vol/#&/' start_perf_measure +# sleep 10 +# if [ $MINOR -gt 1 ]; then +# LAST=$(($FIRST+3)) +# ./quota_gsync_run $run +# sed -i 's/^#\.\/create_gluster_vol/\.\/create_gluster_vol/' start_perf_measure +# fi +} + +function extract_release() +{ + MAJOR=`echo $runlabel | cut -f1 -d'.' | grep -o "^[0-9]"` + MINOR=`echo $runlabel | cut -f2 -d'.' | grep -o "^[0-9]"` +} + +for run in $RUNLIST +do + cp tarballs/$run . + ./setrun $run + ./deploy_gluster + ./check_install.new + if [ $? -ne 0 ] + then + echo "Installation of run $run failed. Continuing with next run" + continue + fi + echo "Sleeping for 10 seconds.." + runlabel=`echo $run|sed -e 's/^glusterfs-//' -e 's/\.tar\.gz//'` + echo "run`cat .runfile` - $MOUNT_TYPE - $runlabel - $GF_CONFIG - (quota off, gsync off)" >> $RUNLOG + sleep 10 + extract_release + do_run + rm $run + sed -i 's/ENABLE_ACL=no/ENABLE_ACL=yes/' gf_perf_config + if [ $MINOR -gt 1 ]; then + ./send_mail `seq $FIRST $LAST` + else + ./send_mail $FIRST + fi +done diff --git a/perf-framework/buildit b/perf-framework/buildit new file mode 100755 index 0000000..df3f176 --- /dev/null +++ b/perf-framework/buildit @@ -0,0 +1,6 @@ +#!/bin/bash + +./autogen.sh > autogen.out.remote 2> autogen.err.remote +./configure CFLAGS="-g -O0 -DDEBUG" > configure.out.remote 2> configure.err.remote +make > make.out.remote 2> make.err.remote +make install > make_install.out.remote 2> make_install.err.remote diff --git a/perf-framework/calc_all b/perf-framework/calc_all new file mode 100755 index 0000000..01e4c91 --- /dev/null +++ b/perf-framework/calc_all @@ -0,0 +1,49 @@ +#!/bin/bash -ue + +egrep_patt="" +num="" +for run in "$@" +do + egrep_patt+="^`echo run$run\|`" + PERFLOG="log_repo/run$run/client/perf-test.log" + if [ $run -lt 9 ] + then + run="0$run" + fi + echo "RUN$run " > /tmp/run$run.$$ + echo "----------------" >> /tmp/run$run.$$ + for op in `cat ops` + do + printf "%-6s" `grep -w ^$op $PERFLOG | awk '{print $2}'| cut -f1 -d'.' | tr '\n' '\t'|sed 's/ $//'` >> /tmp/run$run.$$ + printf "\n" >> /tmp/run$run.$$ + done +done +egrep_patt+="^zzz" + +echo "" +egrep -w "$egrep_patt" log_repo/runlog +echo "" +echo "Operations " > /tmp/tmp_ops.$$ +echo "-------------------------" >> /tmp/tmp_ops.$$ +awk '{ printf("%-25s\n", $0) }' ops >> /tmp/tmp_ops.$$ + +if [ $# -lt 2 ]; then + paste /tmp/tmp_ops.$$ /tmp/run*.$$; +else + for list in "$@"; do + if [ $list -lt 9 ]; then + list="0$list" + fi + if [ "$1" -eq $list ]; then + num="{$list" + else + num="$num,$list"; + fi + + if [ "${@: -1}" -eq $list ]; then + num="$num}" + fi + done + eval "paste /tmp/tmp_ops.$$ /tmp/run$num.$$"; +fi +rm /tmp/run*.$$ /tmp/tmp_ops.$$ diff --git a/perf-framework/calc_avg b/perf-framework/calc_avg new file mode 100755 index 0000000..596554f --- /dev/null +++ b/perf-framework/calc_avg @@ -0,0 +1,54 @@ +#!/bin/bash -ue + +egrep_patt="" +num="" +for run in "$@" +do + egrep_patt+="^`echo run$run\|`" + PERFLOG="log_repo/run$run/client/perf-test.log" + if [ $run -lt 9 ] + then + run="0$run" + fi + echo "RUN$run" > /tmp/run$run.$$ + echo "-------" >> /tmp/run$run.$$ + for op in `cat ops` + do + time=0; + for i in `grep -w ^$op $PERFLOG | awk '{print $2}'| cut -f1 -d'.'` + do + time=$((time+$i)) + done; + time=$((time/3)) # Average over three runs + echo $time >> /tmp/run$run.$$ + done +done +egrep_patt+="^zzz" + +echo "" +egrep -w "$egrep_patt" log_repo/runlog +echo "" +echo "Operations " > /tmp/tmp_ops.$$ +echo "-------------------------" >> /tmp/tmp_ops.$$ +awk '{ printf("%-25s\n", $0) }' ops >> /tmp/tmp_ops.$$ + +if [ $# -lt 2 ]; then + paste /tmp/tmp_ops.$$ /tmp/run*.$$; +else + for list in "$@"; do + if [ $list -lt 9 ]; then + list="0$list" + fi + if [ "$1" -eq $list ]; then + num="{$list" + else + num="$num,$list"; + fi + + if [ "${@: -1}" -eq $list ]; then + num="$num}" + fi + done + eval "paste /tmp/tmp_ops.$$ /tmp/run$num.$$"; +fi +rm /tmp/run*.$$ /tmp/tmp_ops.$$ diff --git a/perf-framework/calc_best b/perf-framework/calc_best new file mode 100755 index 0000000..fed9fda --- /dev/null +++ b/perf-framework/calc_best @@ -0,0 +1,31 @@ +#!/bin/bash -ue + +egrep_patt="" + +for run in "$@" +do + egrep_patt+="^`echo run$run\|`" + PERFLOG="log_repo/run$run/client/perf-test.log" + if [ $run -lt 9 ] + then + run="0$run" + fi + echo "RUN$run " > /tmp/run$run.$$ + echo "------------" >> /tmp/run$run.$$ + for op in `cat ops` + do + printf "%-6s" `grep -w ^$op $PERFLOG | awk '{print $2}'| cut -f1 -d'.' | cat -n | sort -n -k2 | head -1` >> /tmp/run$run.$$ + printf "\n" >> /tmp/run$run.$$ + done +done +egrep_patt+="^zzz" + +echo "" +egrep -w "$egrep_patt" log_repo/runlog +echo "" +echo "Operations " > /tmp/tmp_ops.$$ +echo "-------------------------" >> /tmp/tmp_ops.$$ +awk '{ printf("%-25s\n", $0) }' ops >> /tmp/tmp_ops.$$ + +paste /tmp/tmp_ops.$$ /tmp/run*.$$ +rm /tmp/run*.$$ /tmp/tmp_ops.$$ diff --git a/perf-framework/calc_worst b/perf-framework/calc_worst new file mode 100755 index 0000000..363ac58 --- /dev/null +++ b/perf-framework/calc_worst @@ -0,0 +1,31 @@ +#!/bin/bash -ue + +egrep_patt="" + +for run in "$@" +do + egrep_patt+="^`echo run$run\|`" + PERFLOG="log_repo/run$run/client/perf-test.log" + if [ $run -lt 9 ] + then + run="0$run" + fi + echo "RUN$run " > /tmp/run$run.$$ + echo "------------" >> /tmp/run$run.$$ + for op in `cat ops` + do + printf "%-6s" `grep -w ^$op $PERFLOG | awk '{print $2}'| cut -f1 -d'.' | cat -n | sort -n -k2 | tail -1` >> /tmp/run$run.$$ + printf "\n" >> /tmp/run$run.$$ + done +done +egrep_patt+="^zzz" + +echo "" +egrep -w "$egrep_patt" log_repo/runlog +echo "" +echo "Operations " > /tmp/tmp_ops.$$ +echo "-------------------------" >> /tmp/tmp_ops.$$ +awk '{ printf("%-25s\n", $0) }' ops >> /tmp/tmp_ops.$$ + +paste /tmp/tmp_ops.$$ /tmp/run*.$$ +rm /tmp/run*.$$ /tmp/tmp_ops.$$ diff --git a/perf-framework/check_install b/perf-framework/check_install new file mode 100755 index 0000000..aa72447 --- /dev/null +++ b/perf-framework/check_install @@ -0,0 +1,13 @@ +#!/bin/bash -ue + +source gf_perf_config + +gluster_build_dir=`tar tvf $RELEASE_TARBALL | head -1 | awk '{print $NF}'` +def_gfd_loc=/usr/local/sbin/glusterfsd + +echo "Checking server installation" +./run cksum $SERVER_BUILD_DIR/$gluster_build_dir/glusterfsd/src/.libs/glusterfsd $def_gfd_loc + +echo "" +echo "Checking client installation" +cksum $CLIENT_BUILD_DIR/$gluster_build_dir/glusterfsd/src/.libs/glusterfsd $def_gfd_loc diff --git a/perf-framework/check_install.new b/perf-framework/check_install.new new file mode 100755 index 0000000..d571ae1 --- /dev/null +++ b/perf-framework/check_install.new @@ -0,0 +1,33 @@ +#!/bin/bash -ue + +source gf_perf_config + +gluster_build_dir=`tar tvf $RELEASE_TARBALL | head -1 | awk '{print $NF}'` +def_gfd_loc=/usr/local/sbin/glusterfsd + +echo "Checking server installation" +for brick in $BRICK_IP_ADDRS +do + build_file_cksum=`ssh -l root $brick "cksum $SERVER_BUILD_DIR/$gluster_build_dir/glusterfsd/src/.libs/glusterfsd" | cut -f1 -d' '` + current_installed_file_cksum=`ssh -l root $brick cksum $def_gfd_loc | cut -f1 -d' '` + if [ "$build_file_cksum" != "$current_installed_file_cksum" ] + then + echo "Checksum on $brick do not match. Aborting..." + exit 1 + else + echo "Checksum on $brick OK" + fi +done + +echo "" +echo "Checking client installation" +build_file_cksum=`cksum $CLIENT_BUILD_DIR/$gluster_build_dir/glusterfsd/src/.libs/glusterfsd | cut -f1 -d' '` +current_installed_file_cksum=`cksum $def_gfd_loc | cut -f1 -d' '` + +if [ "$build_file_cksum" != "$current_installed_file_cksum" ] +then + echo "Checksum on client do not match. Aborting..." + exit 1 +else + echo "Checksum on client OK" +fi diff --git a/perf-framework/create_gluster_vol b/perf-framework/create_gluster_vol new file mode 100755 index 0000000..ce4a29e --- /dev/null +++ b/perf-framework/create_gluster_vol @@ -0,0 +1,127 @@ +#!/bin/bash -ue + +CONFIG_FILE=gf_perf_config +source $CONFIG_FILE +GF_BIN=/usr/local/sbin + +function mgmt_vol_create() +{ + if [ $ENABLE_ACL == "yes" ] + then + ./acl on + else + ./acl off + fi + for brick in $BRICK_IP_ADDRS + do + ssh -l root $MGMT_NODE "$GF_BIN/gluster peer probe $brick" + if [ $? -ne 0 ]; then + echo "cluster not set up properly. Please check & restart." + exit 255; + fi + done + ssh -l root $MGMT_NODE "$@" + ssh -l root $MGMT_NODE "$GF_BIN/gluster volume start $VOLNAME" + if [ $GF_CONFIG == "stripe" ]; then + echo "Setting cache-size to 128MB for stripe tests..." + ssh -l root $MGMT_NODE "$GF_BIN/gluster volume set $VOLNAME cache-size 128MB" + fi + #mem-factor is set for mount type of nfs to increase performance. + if [ $MOUNT_TYPE == "nfs" ]; then + echo "Setting mem-factor to 20 since mount type is nfs..." + ssh -l root $MGMT_NODE "$GF_BIN/gluster volume set $VOLNAME nfs.mem-factor 20" + fi +} + +function cleanup_brick() +{ + ssh -l root $1 "killall glusterd glusterfs glusterfsd > /dev/null 2>&1; rm -rf /etc/glusterd" + + if [ "$SERVER_EXPORT_DIR" != "" ] + then + echo "Deleting export dir $SERVER_EXPORT_DIR on $brick..." + ssh -l root $1 "rm -rf $SERVER_EXPORT_DIR" + else + echo "************* Empty export dir in config *************" + exit + fi +} + +echo "Cleaning bricks..." +for brick in $BRICK_IP_ADDRS +do + cleanup_brick $brick & +done + +wait + +echo "Creating export directory & flushing firewall rules." +for brick in $BRICK_IP_ADDRS +do + ssh -l root $brick "mkdir -p $SERVER_EXPORT_DIR" + ssh -l root $brick "iptables -F" +done + +# Start glusterd on all the bricks +echo "Starting glusterd..." +for brick in $BRICK_IP_ADDRS +do + if [ $ENABLE_MEM_ACCT == "yes" ] + then + ssh -l root $brick "GLUSTERFS_DISABLE_MEM_ACCT=0 $GF_BIN/glusterd" + else + ssh -l root $brick "$GF_BIN/glusterd" + fi +done + +# Create the gluster volume according to the config + +bricklist="" +count=0 +for brick in $BRICK_IP_ADDRS +do + bricklist+="$brick:$SERVER_EXPORT_DIR " + count=$((count+1)) +done + +if [ "$GF_CONFIG" == "replicate" -a "$count" -gt "2" ] +then + echo "NOTE:" + echo "Replicate volume with more than 2 bricks created." + echo "This will result in a distributed replicate config." +fi + +echo "Creating $GF_CONFIG volume..." +if [ "$GF_CONFIG" == "distribute" ] +then + mgmt_vol_create "gluster volume create $VOLNAME $bricklist" +elif [ "$GF_CONFIG" == "replicate" ] +then + mgmt_vol_create "gluster volume create $VOLNAME replica 2 $bricklist" +elif [ "$GF_CONFIG" == "distrep" ] +then + mgmt_vol_create "gluster volume create $VOLNAME replica 2 $bricklist" +elif [ "$GF_CONFIG" == "stripe" ] +then + mgmt_vol_create "gluster volume create $VOLNAME stripe 4 $bricklist" +else + echo "Unknown configuration. Exiting..." + exit 1 +fi + +sleep 5 + +if [ $ENABLE_MEM_ACCT == "yes" ] +then + for brick in $BRICK_IP_ADDRS + do + echo "Memory accounting status on : $brick -" + ssh -l root $brick "\ + echo \"x/x &gf_mem_acct_enable\" > gf_gdb_commands;\ + echo \"quit\" >> gf_gdb_commands;\ + gdb -q --command=gf_gdb_commands -p \`pidof glusterfsd\` 2> /dev/null | \ + grep gf_mem_acct_enable | awk '{print \$(NF-1) \$NF}';\ + rm gf_gdb_commands > /dev/null 2>&1" + echo "" + done +fi diff --git a/perf-framework/deploy_gluster b/perf-framework/deploy_gluster new file mode 100755 index 0000000..49543f7 --- /dev/null +++ b/perf-framework/deploy_gluster @@ -0,0 +1,53 @@ +#!/bin/bash + +function prepare_server() +{ + ip_addr=$1 + ssh -l root $ip_addr mkdir -p $SERVER_BUILD_DIR + scp -p $RELEASE_TARBALL $BUILD_SCRIPT root@$ip_addr:$SERVER_BUILD_DIR + ssh -l root $ip_addr "cd $SERVER_BUILD_DIR && tar xf $RELEASE_TARBALL" + ssh -l root $ip_addr "cd $SERVER_BUILD_DIR/$release_dir && ../$BUILD_SCRIPT" +} + +function prepare_client() +{ + mkdir -p $CLIENT_BUILD_DIR + cp $RELEASE_TARBALL $BUILD_SCRIPT $CLIENT_BUILD_DIR + cd $CLIENT_BUILD_DIR && tar xf $RELEASE_TARBALL + cd $CLIENT_BUILD_DIR/$release_dir && ../$BUILD_SCRIPT +} + + +CONFIG_FILE=gf_perf_config +BUILD_SCRIPT=buildit +source $CONFIG_FILE + +if [ "$RELEASE_TARBALL" == "" ] +then + echo "ERROR: Empty release tarball" + exit 1 +fi + +if [ ! -f $RELEASE_TARBALL ] +then + echo "ERROR: Unable to find source tarball $RELEASE_TARBALL" + exit 1 +fi + +release_dir=`tar tvf $RELEASE_TARBALL | head -1 | awk '{print $NF}'` + +# Build gluster on the bricks +for brick in `echo $BRICK_IP_ADDRS` +do + echo "Starting build on server $brick..." + prepare_server $brick & +done + +echo "Waiting for jobs to finish..." +sleep 5 + +# Build gluster on the client +echo "Starting build on client..." +prepare_client & + +wait diff --git a/perf-framework/diff_perfrun b/perf-framework/diff_perfrun new file mode 100755 index 0000000..65ea316 --- /dev/null +++ b/perf-framework/diff_perfrun @@ -0,0 +1,17 @@ +#!/bin/bash + +echo "" +echo "Q - Quota ON, q - Quota OFF, G - Gsync ON, g - Gsync OFF" +echo "" +echo "======================================================================" +echo "OPERATIONS q-g q-G Q-G Q-g" +echo "======================================================================" +for i in `cat /tmp/ops` +do + printf "%-25s" $i + for j in 2 3 4 5 + do + printf "%10.1f" `echo \`grep -w ^$i /tmp/$1 | awk -v col=$j '{print $col}'\` | awk '{print ($2 - $1)*100/$1}' | tr '\n' '\t'` + done + echo "" +done diff --git a/perf-framework/file_open_analyse b/perf-framework/file_open_analyse new file mode 100755 index 0000000..f7c98f5 --- /dev/null +++ b/perf-framework/file_open_analyse @@ -0,0 +1,430 @@ +#!/bin/bash -u + +source gf_perf_config + +if [ $# -le 0 -o $# -gt 2 ] +then + echo "Usage : $0 <run directory>" + exit 1 +fi + +rundir=$1 +cmpdir="" + +if [ $# -eq 2 ] +then + cmpdir=$2 +fi + +function gen_cpu_data() +{ + time=0 + sum=0 + + num_procs=`grep -w ^processor $SYSINFO | wc -l` + echo `grep idle $MPSTAT_LOG | head -1 | awk '{print $NF}'` | grep -o idle + idle_col_tweak=$? + echo `grep CPU $MPSTAT_LOG | head -1 | awk '{print $3}'` | grep -o CPU > /dev/null + cpu_col_tweak=$? + for i in "all" 0 `seq $((num_procs-1))` + do + egrep -v 'Linux|^$|idle' $MPSTAT_LOG | awk -v v1=$cpu_col_tweak -v v2=$idle_col_tweak '{print $(3-v1) " " $(NF-v2)}' | grep -w ^$i | cut -f 2 -d' '| sed 's/^/100-/g' | bc -l > cpu-$i; + cat -n cpu-$i > cpu-$i.new + done +} + +function plot_cpu_usage() +{ + mpstat_interval=5 + plot_info=pinfo.$$ + num_procs=`grep -w ^processor $SYSINFO | wc -l` + ltype=2 + identity=$1 + + for i in 0 `seq $((num_procs-1))` + do + echo -ne "\"cpu-$i.new\" using (\$1*$mpstat_interval):2 title 'cpu-$i' with lines lt $ltype lw 2,\\c" >> $plot_info + ltype=$((ltype+1)) + done + echo -ne "\"cpu-all.new\" using (\$1*$mpstat_interval):2 title 'cpu-all' with lines lt 1 lw 2" >> $plot_info + + gnuplot <<EOF + set autoscale + set grid + set title "CPU utilization : All CPUs ($identity)" + set xlabel "Time" + set ylabel "% CPU utilization" + set yr [0:100] + set terminal png nocrop size 1024,768 + set output "$CPU_PLOT_OUTPUT" + plot `cat $plot_info` +EOF + rm $plot_info +} + +function gen_vm_data() +{ + egrep -v 'memory|free|^$' $VMSTAT_LOG | awk '{print $4}' > vm_datafile + totalmem=`grep -w ^MemTotal $SYSINFO | awk '{print $2}'` + cat vm_datafile | sed "s/^/$totalmem-/g" | bc > memfile + cat -n memfile > memfile.new +} + +function plot_vm_usage() +{ + vmstat_interval=5 + total_mem=`grep -w ^MemTotal $SYSINFO | awk '{print $2}'` + identity=$1 + + gnuplot <<EOF + set autoscale + set grid + set title "Memory utilization ($identity)" + set xlabel "Time" + set ylabel "Memory utilization in bytes" + set yr [0:$total_mem] + set terminal png nocrop size 1024,768 + set output "$VM_PLOT_OUTPUT" + plot "memfile.new" using (\$1*$vmstat_interval):2 title 'memory-usage' with lines lt 2 lw 2 +EOF +} + +function gen_iostats() +{ + brick=$1 + dev=`ssh $brick "df -h $SERVER_EXPORT_DIR" | tail -1 | awk '{print $1}' | cut -f3 -d'/'` + egrep -v 'Device|^$' $IOSTAT_LOG |grep -w ^$dev | awk '{print $10}' | cut -f1 -d'.' > io_await + cat -n io_await > io_await.new + egrep -v 'Device|^$' $IOSTAT_LOG |grep -w ^$dev | awk '{print $6}' | cut -f1 -d'.' > read_tput + cat -n read_tput > read_tput.new + egrep -v 'Device|^$' $IOSTAT_LOG |grep -w ^$dev | awk '{print $7}' | cut -f1 -d'.' > write_tput + cat -n write_tput > write_tput.new +} + +function plot_iostats() +{ + iostat_interval=5 + max_wait=$((`sort -n io_await | tail -1` + 50)) + max_read=$(((`sort -n read_tput | tail -1`) / 2 + 100)) + max_write=$(((`sort -n write_tput | tail -1`) / 2 + 100)) + max_io=$max_write + if [ $max_read -gt $max_write ] + then + max_io=$max_read; + fi + identity=$1 + + gnuplot <<EOF1 + set autoscale + set grid + set title "IO Wait times ($identity)" + set xlabel "Time in seconds" + set ylabel "IO Wait times in milliseconds" + set yr [0:$max_wait] + set terminal png nocrop size 1024,768 + set output "$IO_TIMES_PLOT_OUTPUT" + plot "io_await.new" using (\$1*$iostat_interval):2 title 'IO wait times' with lines lt 3 lw 2 +EOF1 + gnuplot <<EOF2 + set autoscale + set grid + set title "Disk Read-Write throughput ($identity)" + set xlabel "Time in seconds" + set ylabel "Throughput in KB/sec" + set yr [0:$max_io] + set terminal png nocrop size 1024,768 + set output "$IO_TPUT_PLOT_OUTPUT" + plot "read_tput.new" using (\$1*$iostat_interval):(\$2/2) title 'Read throughput' with lines lt 4 lw 2,\ + "write_tput.new" using (\$1*$iostat_interval):(\$2/2) title 'Write throughput' with lines lt 5 lw 2 +EOF2 +} + +function gen_cmp_data() +{ + perflog_baseline=$1 + perflog_current=$2 + + time=0 + for op in `cat ops` + do + time=0; + for i in `grep -w ^$op $perflog_baseline | awk '{print $2}'| cut -f1 -d'.'` + do + time=$((time+$i)) + done; + time=$((time/3)) # Average over three runs + echo $time >> btimes.$$ + done + + for op in `cat ops` + do + time=0; + for i in `grep -w ^$op $perflog_current | awk '{print $2}'| cut -f1 -d'.'` + do + time=$((time+$i)) + done; + time=$((time/3)) # Average over three runs + echo $time >> ctimes.$$ + done + + paste -d " " ops btimes.$$ ctimes.$$ > $CMP_DATAFILE + rm btimes.$$ ctimes.$$ +} + +function plot_comparison() +{ + a=`cat $CMP_DATAFILE | awk '{print $2"\n"$3}' | sort -n | tail -1` + yrange=`echo $a + $a/5 | bc` + b=`wc -l $CMP_DATAFILE | awk '{print $1}'` + xrange=`echo $b - 0.5 | bc` + + gnuplot <<EOF + reset + set key at graph 0.15, 0.85 horizontal samplen 0.1 + set style data histogram + set style histogram cluster gap 1 + set style fill solid border -1 + set boxwidth 0.8 + set xtic rotate by 90 scale 0 + unset ytics + set y2tics rotate by 90 + set terminal png nocrop size 1024,768 + set xlabel ' ' + set size 0.6, 1 + set yrange [0:$yrange]; set xrange [-0.5:$xrange] + set y2label '$XLABEL' offset -2 + set label 1 '$YLABEL' at graph 0.5, -0.4 centre rotate by 180 + set label 2 '$LEGEND_A' at graph 0.05, 0.85 left rotate by 90 + set label 3 '$LEGEND_B' at graph 0.12, 0.85 left rotate by 90 + set label 4 '$PLOT_TITLE' at graph -0.01, 0.5 center rotate by 90 + set output "tmp.$$.png" + p '$CMP_DATAFILE' u 2 title ' ', '' u 3 title ' ', '' u 0:(0):xticlabel(1) w l title '' +EOF + convert tmp.$$.png -rotate 90 $CMP_PLOT_OUTPUT + rm tmp.$$.png +} + +function gen_intr_csw_stats() +{ + egrep -v 'memory|free|^$' $VMSTAT_LOG | awk '{print $11}' > intrstat + cat -n intrstat > intrstat.new + egrep -v 'memory|free|^$' $VMSTAT_LOG | awk '{print $12}' > cswstat + cat -n cswstat > cswstat.new +} + +function plot_intr_csw_stats() +{ + vmstat_interval=5 + max_intr=$((`sort -n intrstat | tail -1` + 100)) + max_csw=$((`sort -n cswstat | tail -1` + 100)) + max_val=$max_csw + if [ $max_intr -gt $max_csw ] + then + max_val=$max_intr; + fi + identity=$1 + + gnuplot <<EOF + set autoscale + set grid + set title "Interrupts and context switches ($identity)" + set xlabel "Time in seconds" + set ylabel "Interrupts/Context Switches" + set yr [0:$max_val] + set terminal png nocrop size 1024,768 + set output "$INTR_CSW_PLOT_OUTPUT" + plot "intrstat.new" using (\$1*$vmstat_interval):2 title 'Interrupts' with lines lt 4 lw 2,\ + "cswstat.new" using (\$1*$vmstat_interval):2 title 'Context Switches' with lines lt 5 lw 2 +EOF +} + +function gen_netstats() +{ + ip_addr=$1 + dev=`ssh $ip_addr ifconfig | grep -B1 $ip_addr | head -1 | cut -f1 -d' '` + egrep -v 'IFACE|^$' $SAR_NETSTAT_LOG |grep -w $dev | awk '{print $3}' | cut -f1 -d'.' > rpkts + cat -n rpkts > rpkts.new + egrep -v 'IFACE|^$' $SAR_NETSTAT_LOG |grep -w $dev | awk '{print $4}' | cut -f1 -d'.' > wpkts + cat -n wpkts > wpkts.new + egrep -v 'IFACE|^$' $SAR_NETSTAT_LOG |grep -w $dev | awk '{print $5}' | cut -f1 -d'.' > rkbytes + cat -n rkbytes > rkbytes.new + egrep -v 'IFACE|^$' $SAR_NETSTAT_LOG |grep -w $dev | awk '{print $6}' | cut -f1 -d'.' > wkbytes + cat -n wkbytes > wkbytes.new +} + +function plot_netstats() +{ + sar_netstat_interval=5 + max_read_pkts=$((`sort -n rpkts | tail -1` + 50)) + max_write_pkts=$((`sort -n wpkts | tail -1` + 50)) + max_read_kbytes=$(((`sort -n rkbytes | tail -1`)/1024 + 100)) + max_write_kbytes=$(((`sort -n wkbytes | tail -1`)/1024 + 100)) + + max_pkts=$max_write_pkts + if [ $max_read_pkts -gt $max_write_pkts ] + then + max_pkts=$max_read_pkts; + fi + + max_kbytes=$max_write_kbytes + if [ $max_read_kbytes -gt $max_write_kbytes ] + then + max_kbytes=$max_read_kbytes; + fi + identity=$1 + + gnuplot <<EOF1 + set autoscale + set grid + set title "Network statistics - Packet Read/Write ($identity)" + set xlabel "Time in seconds" + set ylabel "Number of Packets" + set yr [0:$max_pkts] + set terminal png nocrop size 1024,768 + set output "$NET_PKTS_PLOT_OUTPUT" + plot "rpkts.new" using (\$1*$sar_netstat_interval):2 title 'Read Packets' with lines lt 3 lw 2,\ + "wpkts.new" using (\$1*$sar_netstat_interval):2 title 'Write Packets' with lines lt 4 lw 2 +EOF1 + gnuplot <<EOF2 + set autoscale + set grid + set title "Network Read-Write throughput ($identity)" + set xlabel "Time in seconds" + set ylabel "Throughput in KB/sec" + set yr [0:$max_kbytes] + set terminal png nocrop size 1024,768 + set output "$NET_TPUT_PLOT_OUTPUT" + plot "rkbytes.new" using (\$1*$sar_netstat_interval):(\$2/1024) title 'Read throughput' with lines lt 3 lw 2,\ + "wkbytes.new" using (\$1*$sar_netstat_interval):(\$2/1024) title 'Write throughput' with lines lt 4 lw 2 +EOF2 +} + +function analyse_plot_data() +{ + identity=$1 + + if [ $identity != "client" ] + then + brick=$2 + fi + # Generate CPU data + gen_cpu_data + # plot CPU data + plot_cpu_usage $identity + + # Generate VM data + gen_vm_data + # plot VM data + plot_vm_usage $identity + + if [ $identity != "client" ] + then + # Generate io-times and io-throughput data + # This makes sense only for the bricks since the client is not involved in disk IO + gen_iostats $brick + # plot io-times and io-throughput data + plot_iostats $identity + fi + + # Generate interrupt and context switch data + gen_intr_csw_stats + # plot interrupt and context switch data + plot_intr_csw_stats $identity + + # Generate network packet statistics and throughput data + if [ $identity != "client" ] + then + gen_netstats $brick + else + gen_netstats $CLIENT_IP_ADDR + fi + # Generate network packet statistics and throughput data + plot_netstats $identity + + # cleanup tmp files + rm vm_datafile memfile* + rm cpu-* + if [ $identity != "client" ] + then + rm io_await* read_tput* write_tput* + fi + rm cswstat* intrstat* + rm rpkts* wpkts* rkbytes* wkbytes* +} + +function analyse_client_data() +{ + MPSTAT_LOG=$rundir/client/mpstat_log + VMSTAT_LOG=$rundir/client/vmstat_log + IOSTAT_LOG=$rundir/client/iostat_log + SAR_NETSTAT_LOG=$rundir/client/sar_netstat_log + SYSINFO=$rundir/client/sysinfo + PERFLOG=$rundir/client/perf-test.log + CPU_PLOT_OUTPUT=$rundir/client/cpu.png + VM_PLOT_OUTPUT=$rundir/client/vm.png + IO_TIMES_PLOT_OUTPUT=$rundir/client/io_times.png + IO_TPUT_PLOT_OUTPUT=$rundir/client/io_tput.png + INTR_CSW_PLOT_OUTPUT=$rundir/client/intr_csw.png + NET_PKTS_PLOT_OUTPUT=$rundir/client/net-pkts.png + NET_TPUT_PLOT_OUTPUT=$rundir/client/net-tput.png + identity="client" + analyse_plot_data $identity +} + +function analyse_brick_data() +{ + ind=0 + for b in $BRICK_IP_ADDRS + do + ind=$((ind+1)) + MPSTAT_LOG=$rundir/brick$ind*mpstat-log + VMSTAT_LOG=$rundir/brick$ind*vmstat-log + IOSTAT_LOG=$rundir/brick$ind*iostat-log + SAR_NETSTAT_LOG=$rundir/brick$ind-*sar_netstat-log + SYSINFO=$rundir/brick$ind*sysinfo-log + PERFLOG=$rundir/client/perf-test.log + CPU_PLOT_OUTPUT=$rundir/brick$ind-cpu.png + VM_PLOT_OUTPUT=$rundir/brick$ind-vm.png + IO_TIMES_PLOT_OUTPUT=$rundir/brick$ind-io_times.png + IO_TPUT_PLOT_OUTPUT=$rundir/brick$ind-io_tput.png + INTR_CSW_PLOT_OUTPUT=$rundir/brick$ind-intr_csw.png + NET_PKTS_PLOT_OUTPUT=$rundir/brick$ind-net-pkts.png + NET_TPUT_PLOT_OUTPUT=$rundir/brick$ind-net-tput.png + + identity="brick$ind" + analyse_plot_data $identity $b + done +} + +function do_comparison() +{ + # Generate comparison data + gen_cmp_data $1 $2 + + # plot perf comparison + plot_comparison +} + +cp ops $LOCAL_LOG_REPO +cd $LOCAL_LOG_REPO +analyse_client_data +analyse_brick_data +rm ops + +if [ "$cmpdir" != "" ] +then + CMP_PLOT_OUTPUT=$cmpdir-$rundir-cmp.png + CMP_DATAFILE=$rundir/client/cmp-with-$cmpdir-data.dat + + XLABEL="Time in seconds" + YLABEL="Operations" + LEGEND_A="$cmpdir" + LEGEND_B="$rundir" + PLOT_TITLE="Performance comparison - $cmpdir vs $rundir" + do_comparison $cmpdir/client/perf-test.log $rundir/client/perf-test.log +fi + +# Create tarball of the plots +echo "Creating plots.tar..." +cd $rundir +tar cf plots.tar *.png client/*.png diff --git a/perf-framework/file_open_test b/perf-framework/file_open_test new file mode 100755 index 0000000..83cbe97 --- /dev/null +++ b/perf-framework/file_open_test @@ -0,0 +1,159 @@ +#!/bin/bash -u + +CONFIG_FILE=gf_perf_config +source $CONFIG_FILE + +SETTLE_TIME=10 +RUNFILE=.runfile +STAT_COLLECTOR=stat_collect + +# Generate current run, update runfile +if [ ! -f $RUNFILE ] +then + run=1 +else + run=`cat $RUNFILE` +fi +echo $((run+1)) > $RUNFILE + +# Drop vm caches on all the bricks before starting the runs +for brick in $BRICK_IP_ADDRS +do + ssh -l root $brick "echo 3 > /proc/sys/vm/drop_caches" +done + +# Create the gluster volume +./create_gluster_vol + +mount | grep $MOUNT_POINT > /dev/null 2>&1 +if [ $? -eq 0 ] +then + umount $MOUNT_POINT +fi + +if [ ! -d $MOUNT_POINT ] +then + mkdir -p $MOUNT_POINT +fi + +# Make sure that the fuse kernel module is loaded +/sbin/lsmod | grep -w fuse > /dev/null 2>&1 +if [ $? -ne 0 ] +then + /sbin/modprobe fuse > /dev/null 2>&1 +fi + +ps -eaf | egrep -w 'glusterfs|glusterfsd|glusterd' > /dev/null 2>&1 +if [ $? -eq 0 ] +then + killall glusterfsd glusterd glusterfs > /dev/null 2>&1 +fi + +# Mount the client +# Sleep for a while. Sometimes, NFS mounts fail if attempted soon after creating the volume +sleep $SETTLE_TIME + +if [ $ENABLE_ACL == "yes" ] +then + acl_opts="-o acl" +else + acl_opts="" +fi + +echo "Mounting volume..." +if [ $ENABLE_MEM_ACCT == "yes" ] +then + echo "Memory accounting status on client -" + echo "x/x &gf_mem_acct_enable" > commands.$$ + echo "quit" >> commands.$$ + GLUSTERFS_DISABLE_MEM_ACCT=0 mount -t $MOUNT_TYPE $acl_opts $MGMT_NODE:$VOLNAME $MOUNT_POINT + mount_status=$? + gdb -q --command=commands.$$ -p `pidof glusterfs` | grep gf_mem_acct_enable | awk '{print $(NF-1) $NF}' + rm commands.$$ > /dev/null 2>&1 +else + mount -t $MOUNT_TYPE $acl_opts $MGMT_NODE:$VOLNAME $MOUNT_POINT + mount_status=$? +fi + +if [ $mount_status -ne 0 ] +then + echo "mount -t $MOUNT_TYPE $acl_opts $MGMT_NODE:$VOLNAME $MOUNT_POINT failed..." + echo "Exiting..." + exit 1 +fi + +# Copy statistics collection scripts to the server + +echo "" +echo "Copying stat collection script to bricks..." +for brick in $BRICK_IP_ADDRS +do + ssh -l root $brick "mkdir -p $SERVER_SCRIPTS_DIR" + scp -p $STAT_COLLECTOR root@$brick:$SERVER_SCRIPTS_DIR > /dev/null 2>&1 +done + +# Run statistics collection scripts on the server + +echo "" +echo "Starting server stat collection..." +for brick in $BRICK_IP_ADDRS +do + ssh -l root $brick "mkdir -p $SERVER_LOG_DIR" + ssh -l root $brick "$SERVER_SCRIPTS_DIR/$STAT_COLLECTOR $SERVER_LOG_DIR" & +done + +# Run statistics collection on client + +mkdir -p $LOCAL_LOG_REPO +./$STAT_COLLECTOR $LOCAL_LOG_REPO/run$run/client & + +# Start perf test + +echo "" +echo "Starting run $run..." + +sleep $SETTLE_TIME +./parallel_create 50 10000 +sleep $SETTLE_TIME + +# Stop statistics collection scripts on the client + +killall mpstat vmstat iostat $STAT_COLLECTOR sar > /dev/null 2>&1 + +# Stop statistics collection scripts on the server + +echo "" +echo "Stopping server stat collection..." +for brick in $BRICK_IP_ADDRS +do + ssh -l root $brick killall mpstat vmstat iostat $STAT_COLLECTOR sar > /dev/null 2>&1 +done + +# Since the ssh processes were backgrounded, they will be hanging around. +# Kill them +kill `jobs -l | awk '{print $2}'` > /dev/null 2>&1 + +# Copy statistics from the server + +echo "" +echo "Copying server logfiles for run $run..." +cur_log_dump_dir=$LOCAL_LOG_REPO/run$run +mkdir -p $cur_log_dump_dir +count=1 +for brick in $BRICK_IP_ADDRS +do + for statf in mpstat vmstat iostat sysinfo sar_netstat + do + scp root@$brick:$SERVER_LOG_DIR/*$statf* $cur_log_dump_dir/brick$count-$brick-$statf-log > /dev/null 2>&1 + done + count=$((count + 1)) +done + +# Cleanup statistics collected for this run on the server + +echo "" +echo "Cleaning server logfiles..." +for brick in $BRICK_IP_ADDRS +do + ssh -l root $brick "cd $SERVER_LOG_DIR; rm mpstat_log vmstat_log iostat_log sysinfo sar_netstat_log" +done diff --git a/perf-framework/gen_perf_data b/perf-framework/gen_perf_data new file mode 100755 index 0000000..31110f6 --- /dev/null +++ b/perf-framework/gen_perf_data @@ -0,0 +1,191 @@ +#!/bin/bash -u + +MPSTAT_LOG=mpstat_log1908 +VMSTAT_LOG=vmstat_log1908 +SYSINFO=sysinfo1908 +PERFLOG=perf-fuse-ami.log + +source gnuplotrc + +function gen_cpu_data() +{ + time=0 + sum=0 + + if [ -f times ] + then + rm times + fi + + for op in `cat ops` + do + time=0; + for i in `grep -w ^$op $PERFLOG | awk '{print $2}'| cut -f1 -d'.'` + do + time=$((time+$i)) + done; + time=$((time/3)) # Average over three runs + sum=$((sum + $time)) + echo $sum >> times + done + sed -i 's/$/ 200/g' times + + num_procs=`grep -w ^processor $SYSINFO | wc -l` + echo `grep idle $MPSTAT_LOG | head -1 | awk '{print $NF}'` | grep -o idle + col_tweak=$? + for i in "all" 0 `seq $((num_procs-1))` + do + egrep -v 'Linux|^$|idle' $MPSTAT_LOG | awk -v v1=$col_tweak '{print $3 " " $(NF-v1)}' | grep -w ^$i | cut -f 2 -d' '| sed 's/^/100-/g' | bc -l > cpu-$i; + cat -n cpu-$i > cpu-$i.new + done +} + +function plot_cpu_usage() +{ + xrange=$((`tail -1 times | awk '{print $1}'`+50)) + MPSTAT_INTERVAL=5 + plot_info=pinfo.$$ + num_procs=`grep -w ^processor $SYSINFO | wc -l` + ltype=1 + + for i in "all" 0 `seq $((num_procs-1))` + do + echo -ne "\"cpu-$i.new\" using (\$1*$MPSTAT_INTERVAL):2 title 'cpu-$i' with lines lt $ltype lw 2,\\c" >> $plot_info + ltype=$((ltype+1)) + done + echo "\"times\" using 1:2 title '' with impulse lt 2 lw 1 lc rgb \"#FF0000\"" >> $plot_info + + gnuplot <<EOF + set autoscale + set grid + set title "CPU utilization : All CPUs" + set xlabel "Time" + set ylabel "% CPU utilization" + set xr [0:$xrange] + set yr [0:100] + set terminal png nocrop size 1024,768 + set output "$CPU_PLOT_OUTPUT" + plot `cat $plot_info` +EOF + rm $plot_info +} + +function gen_vm_data() +{ + egrep -v 'memory|free' $VMSTAT_LOG | awk '{print $4}' > vm_datafile + totalmem=`grep -w ^MemTotal $SYSINFO | awk '{print $2}'` + cat vm_datafile | sed "s/^/$totalmem-/g" | bc > memfile +} + +function plot_vm_usage() +{ + vmstat_interval=5 + total_mem=`grep -w ^MemTotal $SYSINFO | awk '{print $2}'` + xrange=$((`tail -1 times | awk '{print $1}'`+50)) + + gnuplot <<EOF + set autoscale + set grid + set title "Memory utilization" + set xlabel "Time" + set ylabel "Memory utilization in bytes" + set xr [0:$xrange] + set yr [0:$total_mem] + set terminal png nocrop size 1024,768 + set output "$VM_PLOT_OUTPUT" + plot "vm_datafile" using 1 title 'free-memory' with lines lt 1 lw 2, \ + "memfile" using 1 title 'memory-usage' with lines lt 2 lw 2,\ + "times" using 1:(\$2*$total_mem/100) title '' with impulse lt 2 lw 1 lc rgb "#FF0000" +EOF +} + +function gen_cmp_data() +{ + perflog_baseline=$1 + perflog_current=$2 + + time=0 + for op in `cat ops` + do + time=0; + for i in `grep -w ^$op $perflog_baseline | awk '{print $2}'| cut -f1 -d'.'` + do + time=$((time+$i)) + done; + time=$((time/3)) # Average over three runs + echo $time >> btimes.$$ + done + + for op in `cat ops` + do + time=0; + for i in `grep -w ^$op $perflog_current | awk '{print $2}'| cut -f1 -d'.'` + do + time=$((time+$i)) + done; + time=$((time/3)) # Average over three runs + echo $time >> ctimes.$$ + done + + paste -d " " ops btimes.$$ ctimes.$$ > $CMP_DATAFILE + rm btimes.$$ ctimes.$$ +} + +function plot_comparison() +{ + a=`cat $CMP_DATAFILE | awk '{print $2"\n"$3}' | sort -n | tail -1` + yrange=`echo $a + $a/5 | bc` + b=`wc -l $CMP_DATAFILE | awk '{print $1}'` + xrange=`echo $b - 0.5 | bc` + + gnuplot <<EOF + reset + set key at graph 0.15, 0.85 horizontal samplen 0.1 + set style data histogram + set style histogram cluster gap 1 + set style fill solid border -1 + set boxwidth 0.8 + set xtic rotate by 90 scale 0 + unset ytics + set y2tics rotate by 90 + set terminal png nocrop size 1024,768 + set xlabel ' ' + set size 0.6, 1 + set yrange [0:$yrange]; set xrange [-0.5:$xrange] + set y2label '$XLABEL' offset -2 + set label 1 '$YLABEL' at graph 0.5, -0.4 centre rotate by 180 + set label 2 '$LEGEND_A' at graph 0.05, 0.85 left rotate by 90 + set label 3 '$LEGEND_B' at graph 0.12, 0.85 left rotate by 90 + set label 4 '$PLOT_TITLE' at graph -0.01, 0.5 center rotate by 90 + set output "tmp.$$.png" + p '$CMP_DATAFILE' u 2 title ' ', '' u 3 title ' ', '' u 0:(0):xticlabel(1) w l title '' +EOF + convert tmp.$$.png -rotate 90 $CMP_PLOT_OUTPUT + rm tmp.$$.png +} + +# Generate CPU data +gen_cpu_data + +# plot CPU data +plot_cpu_usage + +# Generate VM data +gen_vm_data + +# plot VM data +plot_vm_usage + +# Generate comparison data - baseline vs current run +gen_cmp_data perf-fuse-ami.log perf-fuse-aws.log + +# plot perf comparison - baseline vs current run +plot_comparison + +# cleanup tmp files +rm vm_datafile memfile +rm cpu-* times + +display cpu.png & +display vm.png & +display cmp.png & diff --git a/perf-framework/get_date b/perf-framework/get_date new file mode 100755 index 0000000..335359d --- /dev/null +++ b/perf-framework/get_date @@ -0,0 +1,5 @@ +date +%s +ssh -l root 10.1.10.30 "date +%s" +ssh -l root 10.1.10.31 "date +%s" +ssh -l root 10.1.10.35 "date +%s" +ssh -l root 10.1.10.36 "date +%s" diff --git a/perf-framework/gf_perf_config b/perf-framework/gf_perf_config new file mode 100644 index 0000000..017feb8 --- /dev/null +++ b/perf-framework/gf_perf_config @@ -0,0 +1,34 @@ +# Common config variables +RELEASE_TARBALL=glusterfs-3git.tar.gz +BRICK_IP_ADDRS="10.1.10.30 10.1.10.31 10.1.10.35 10.1.10.36" +GF_CONFIG="stripe" +VOLNAME="testvol" +MGMT_NODE="10.1.10.30" + +# Server specific config variables +SERVER_BUILD_DIR=/data/rahul/gluster_builds +SERVER_LOG_DIR=/data/rahul/PERF/server_logs +SERVER_SCRIPTS_DIR=/data/rahul/PERF/scripts +SERVER_EXPORT_DIR=/data/rahul/datastore + +# Client specific config variables +MOUNT_TYPE="glusterfs" +#MOUNT_TYPE="nfs" +MOUNT_POINT=/mnt/perfmount +LOCAL_LOG_REPO=/root/rahul/test_deploy/log_repo +CLIENT_BUILD_DIR=/root/rahul/test_deploy/gluster_builds +CLIENT_IP_ADDR=10.1.10.29 +RUNLOG=$LOCAL_LOG_REPO/runlog + +# Special runs +ENABLE_ACL=yes +ENABLE_MEM_ACCT=no + +# For passwordless ssh setup +ROOT_PW1=abc123 +ROOT_PW2=abc321 +# SSH_HOSTS is a space separated list of hosts +SSH_HOSTS="0" + +# Setup automated emails +EMAIL="rahul@shell.gluster.com:/home/rahul/perf_logs"
\ No newline at end of file diff --git a/perf-framework/glusterfs-precreate.sh b/perf-framework/glusterfs-precreate.sh new file mode 100755 index 0000000..d1c92a3 --- /dev/null +++ b/perf-framework/glusterfs-precreate.sh @@ -0,0 +1,32 @@ +#!/bin/bash + +set -e; + +if [ $# -eq 0 ]; then + echo "Usage: $0 <dir1> [dir2 [dir3 [dir4 ...]]]"; + exit 1 +fi + +for dir in "$@"; do + if [ ! -d "$dir" ]; then + echo "$0: $dir not a directory" + exit 1; + fi +done + +subdirs="{00" +for i in {1..255}; do + n=$(printf "%02x" $i); + subdirs="$subdirs,$n"; +done +subdirs="$subdirs}" + +mkdir -v "$dir/.glusterfs"; + +for dir in $@; do + for i in {0..255}; do + n=$(printf "%02x" $i); + mkdir -v "$dir/.glusterfs/$n" + eval "mkdir -v $dir/.glusterfs/$n/$subdirs" + done +done
\ No newline at end of file diff --git a/perf-framework/motd b/perf-framework/motd new file mode 100644 index 0000000..dd934b4 --- /dev/null +++ b/perf-framework/motd @@ -0,0 +1,25 @@ +/* + * System being used by Rahul for performance runs. + * It is part of the setup described below. + * Please contact tcp@gluster.com before running anything on this system. + * + * DO NOT DISTURB THE SETUP SINCE IT MIGHT AFFECT THE PERFORMANCE RUNS. + * + * /etc/motd last updated: + * Mon Jul 4 04:04:29 PDT 2011 + */ + ++-------------+ +-------------+ +-------------+ +-------------+ +| 10.1.10.30 |<---->| 10.1.10.31 |<---->| 10.1.10.35 |<---->| 10.1.10.36 | +| (client-10) | | (client-11) | | (client-15) | | (client-16) | +| (Brick-1) | | (Brick-2) | | (Brick-3) | | (Brick-4) | ++------+------+ +------+------+ +------+------+ +------+------+ + | | | | + +--------------------+---------+----------+--------------------+ + | + v + +--------------+ + | 10.1.10.29 | + | (client-9) | + | (client) | + +--------------+ diff --git a/perf-framework/newtest.sh b/perf-framework/newtest.sh new file mode 100755 index 0000000..789c784 --- /dev/null +++ b/perf-framework/newtest.sh @@ -0,0 +1,331 @@ +#!/bin/bash + + +function emptyfiles_create() +{ + mkdir -p $PG/emptyfiles; + for i in $(seq 1 $smallfilecount); do + : > $PG/emptyfiles/file.$i; + done +} + + +function emptyfiles_delete() +{ + rm -rf $PG/emptyfiles; +} + + +function emptydirs_create() +{ + mkdir -p $PG/emptydirs; + + eval "echo $PG/emptydirs/top.{1..$emptytops}" | xargs mkdir -p; + for top in $(seq 1 $emptytops); do + eval "echo $PG/emptydirs/top.$top/dir.{1..$emptydirs}" | xargs mkdir -p; + done +} + + +function emptydirs_delete() +{ + rm -rf $PG/emptydirs; +} + + +function smallfiles_create() +{ + mkdir -p $PG/smallfiles; + for i in $(seq 1 $smallfilecount); do + echo -n $smallblob >$PG/smallfiles/file.$i + done +} + + +function smallfiles_rewrite() +{ + smallfiles_create "$@"; +} + + +function smallfiles_read() +{ + for i in $(seq 1 $smallfilecount); do + cat $PG/smallfiles/file.$i > /dev/null + done +} + + +function smallfiles_reread() +{ + smallfiles_read "$@"; +} + + +function smallfiles_delete() +{ + rm -rf $PG/smallfiles; +} + + +function largefile_create() +{ + mkdir -p $PG/largefile; + dd if=/dev/zero of=$PG/largefile/large_file bs=$largeblock count=$largecount 2>/dev/null; +} + + +function largefile_rewrite() +{ + largefile_create "$@"; +} + + +function largefile_read() +{ + dd if=$PG/largefile/large_file of=/dev/null bs=$largeblock count=$largecount 2>/dev/null; +} + + +function largefile_reread() +{ + largefile_read "$@"; +} + + +function largefile_delete() +{ + rm -rf $PG/largefile +} + + +function crawl_create_recurse() +{ + local subpath; + local depth; + + subpath="$1"; + depth="$2"; + + if [ $depth -eq 0 ]; then + for i in $(seq 1 $leafcount); do + : > $subpath/file.$i; + done + return + fi + + depth=$(($depth - 1)); + + eval "echo $subpath/dir.{1..$crawlwidth}" | xargs mkdir -p; + + for i in $(seq 1 $crawlwidth); do + crawl_create_recurse "$subpath/dir.$i" $depth; + done +} + + +function directory_crawl_create () +{ + crawl_create_recurse "$PG/crawl" $crawldepth; +} + + +function directory_crawl() +{ + ls -lR "$PG/crawl" >/dev/null +} + + +function directory_recrawl() +{ + directory_crawl "$@"; +} + + +function directory_crawl_delete() +{ + rm -rf "$PG/crawl"; +} + + +function metadata_modify () +{ + chmod -R 777 "$PG/crawl"; + chown -R 1234 "$PG/crawl"; +} + + +function run_tests() +{ + #run emptyfiles_create; + #run emptyfiles_delete; + + + # run emptydirs_create; + # run emptydirs_delete; + + + #run smallfiles_create; + #run smallfiles_rewrite; + #echo 3 > /proc/sys/vm/drop_caches; + #sleep 10; + #run smallfiles_read; + #run smallfiles_reread; + #run smallfiles_delete; + + + #run largefile_create; + #run largefile_rewrite; + #echo 3 > /proc/sys/vm/drop_caches; + #sleep 10; + #run largefile_read; + #run largefile_reread; + #run largefile_delete; + + + run directory_crawl_create; + echo 3 > /proc/sys/vm/drop_caches; + sleep 10; + run directory_crawl; + run directory_recrawl; + run metadata_modify; + run directory_crawl_delete; +} + + +##################################################### +############ Framework code ######################### +##################################################### + + +function cleanup_playground() +{ + rm -rvf $PG; + mkdir -p $PG; +} + + +function params_init() +{ + emptytops=10; + emptydirs=10000; + + smallfilecount=100000; + smallblock=4096; + + largeblock=64K; + largecount=16K; + + crawlwidth=10; + crawldepth=3; + leafcount=100; + + smallblob=; + for i in $(seq 1 $smallblock); do + smallblob=a$smallblob + done +} + + +function _init() +{ + params_init; + + TSTDOUT=251 + TSTDERR=252 + LOGFD=253 + LOGFILE=/tmp/perf$$ + + eval "exec $TSTDOUT>&1" + eval "exec $TSTDERR>&2" + eval "exec $LOGFD<>$LOGFILE"; +} + + +function parse_cmdline() +{ + MOUNT=; + + if [ "x$1" == "x" ] ; then + echo "Usage: $0 /gluster/mount" + exit 1 + fi + + MOUNT="$1"; + PG=$MOUNT/playground; +} + + +function wrap() +{ + "$@" 1>&$TSTDOUT 2>&$TSTDERR; +} + + +function measure() +{ + set -o pipefail; + (time -p wrap "$@") 2>&1 >/dev/null | tail -n 3 | head -n 1 | cut -f2 -d' ' +} + + +function log() +{ + local t; + local rest; + + t=$1; + shift; + rest="$@"; + + echo "$rest $t" >&$LOGFD; +} + + +function run() +{ + local t; + + echo -n "running $@ ... " + t=$(measure "$@"); + + if [ $? -eq 0 ]; then + echo "done ($t secs)"; + log "$t" "$@"; + else + echo "FAILED!!!" + fi +} + + +function verify_mount() +{ + if [ ! -d "$MOUNT" ] ; then + echo "Can't access '$MOUNT'" + exit 1 + fi +} + + +function show_report() +{ + (echo "Testname Time"; cat $LOGFILE) | column -t; + rm -f $LOGFILE; +} + + +function main() +{ + parse_cmdline "$@"; + + verify_mount; + + cleanup_playground; + + run_tests; + + show_report; +} + + +_init && main "$@" diff --git a/perf-framework/nfs-reread.sh b/perf-framework/nfs-reread.sh new file mode 100755 index 0000000..1e2f51b --- /dev/null +++ b/perf-framework/nfs-reread.sh @@ -0,0 +1,331 @@ +#!/bin/bash + + +function emptyfiles_create() +{ + mkdir -p $PG/emptyfiles; + for i in $(seq 1 $smallfilecount); do + : > $PG/emptyfiles/file.$i; + done +} + + +function emptyfiles_delete() +{ + rm -rf $PG/emptyfiles; +} + + +function emptydirs_create() +{ + mkdir -p $PG/emptydirs; + + eval "echo $PG/emptydirs/top.{1..$emptytops}" | xargs mkdir -p; + for top in $(seq 1 $emptytops); do + eval "echo $PG/emptydirs/top.$top/dir.{1..$emptydirs}" | xargs mkdir -p; + done +} + + +function emptydirs_delete() +{ + rm -rf $PG/emptydirs; +} + + +function smallfiles_create() +{ + mkdir -p $PG/smallfiles; + for i in $(seq 1 $smallfilecount); do + echo -n $smallblob >$PG/smallfiles/file.$i + done +} + + +function smallfiles_rewrite() +{ + smallfiles_create "$@"; +} + + +function smallfiles_read() +{ + for i in $(seq 1 $smallfilecount); do + cat $PG/smallfiles/file.$i > /dev/null + done +} + + +function smallfiles_reread() +{ + smallfiles_read "$@"; +} + + +function smallfiles_delete() +{ + rm -rf $PG/smallfiles; +} + + +function largefile_create() +{ + mkdir -p $PG/largefile; + dd if=/dev/zero of=$PG/largefile/large_file bs=$largeblock count=$largecount 2>/dev/null; +} + + +function largefile_rewrite() +{ + largefile_create "$@"; +} + + +function largefile_read() +{ + dd if=$PG/largefile/large_file of=/dev/null bs=$largeblock count=$largecount 2>/dev/null; +} + + +function largefile_reread() +{ + largefile_read "$@"; +} + + +function largefile_delete() +{ + rm -rf $PG/largefile +} + + +function crawl_create_recurse() +{ + local subpath; + local depth; + + subpath="$1"; + depth="$2"; + + if [ $depth -eq 0 ]; then + for i in $(seq 1 $leafcount); do + : > $subpath/file.$i; + done + return + fi + + depth=$(($depth - 1)); + + eval "echo $subpath/dir.{1..$crawlwidth}" | xargs mkdir -p; + + for i in $(seq 1 $crawlwidth); do + crawl_create_recurse "$subpath/dir.$i" $depth; + done +} + + +function directory_crawl_create () +{ + crawl_create_recurse "$PG/crawl" $crawldepth; +} + + +function directory_crawl() +{ + ls -lR "$PG/crawl" >/dev/null +} + + +function directory_recrawl() +{ + directory_crawl "$@"; +} + + +function directory_crawl_delete() +{ + rm -rf "$PG/crawl"; +} + + +function metadata_modify () +{ + chmod -R 777 "$PG/crawl"; + chown -R 1234 "$PG/crawl"; +} + + +function run_tests() +{ + #run emptyfiles_create; + #run emptyfiles_delete; + + + # run emptydirs_create; + # run emptydirs_delete; + + + run smallfiles_create; + run smallfiles_rewrite; + echo 3 > /proc/sys/vm/drop_caches; + sleep 10; + run smallfiles_read; + run smallfiles_reread; + run smallfiles_delete; + + + #run largefile_create; + #run largefile_rewrite; + #echo 3 > /proc/sys/vm/drop_caches; + #sleep 10; + #run largefile_read; + #run largefile_reread; + #run largefile_delete; + + + #run directory_crawl_create; + #echo 3 > /proc/sys/vm/drop_caches; + #sleep 10; + #run directory_crawl; + #run directory_recrawl; + #run metadata_modify; + #run directory_crawl_delete; +} + + +##################################################### +############ Framework code ######################### +##################################################### + + +function cleanup_playground() +{ + rm -rvf $PG; + mkdir -p $PG; +} + + +function params_init() +{ + emptytops=10; + emptydirs=10000; + + smallfilecount=100000; + smallblock=4096; + + largeblock=64K; + largecount=16K; + + crawlwidth=10; + crawldepth=3; + leafcount=100; + + smallblob=; + for i in $(seq 1 $smallblock); do + smallblob=a$smallblob + done +} + + +function _init() +{ + params_init; + + TSTDOUT=251 + TSTDERR=252 + LOGFD=253 + LOGFILE=/tmp/perf$$ + + eval "exec $TSTDOUT>&1" + eval "exec $TSTDERR>&2" + eval "exec $LOGFD<>$LOGFILE"; +} + + +function parse_cmdline() +{ + MOUNT=; + + if [ "x$1" == "x" ] ; then + echo "Usage: $0 /gluster/mount" + exit 1 + fi + + MOUNT="$1"; + PG=$MOUNT/playground; +} + + +function wrap() +{ + "$@" 1>&$TSTDOUT 2>&$TSTDERR; +} + + +function measure() +{ + set -o pipefail; + (time -p wrap "$@") 2>&1 >/dev/null | tail -n 3 | head -n 1 | cut -f2 -d' ' +} + + +function log() +{ + local t; + local rest; + + t=$1; + shift; + rest="$@"; + + echo "$rest $t" >&$LOGFD; +} + + +function run() +{ + local t; + + echo -n "running $@ ... " + t=$(measure "$@"); + + if [ $? -eq 0 ]; then + echo "done ($t secs)"; + log "$t" "$@"; + else + echo "FAILED!!!" + fi +} + + +function verify_mount() +{ + if [ ! -d "$MOUNT" ] ; then + echo "Can't access '$MOUNT'" + exit 1 + fi +} + + +function show_report() +{ + (echo "Testname Time"; cat $LOGFILE) | column -t; + rm -f $LOGFILE; +} + + +function main() +{ + parse_cmdline "$@"; + + verify_mount; + + cleanup_playground; + + run_tests; + + show_report; +} + + +_init && main "$@" diff --git a/perf-framework/nfs_delete.sh b/perf-framework/nfs_delete.sh new file mode 100755 index 0000000..a6cb5bc --- /dev/null +++ b/perf-framework/nfs_delete.sh @@ -0,0 +1,331 @@ +#!/bin/bash + + +function emptyfiles_create() +{ + mkdir -p $PG/emptyfiles; + for i in $(seq 1 $smallfilecount); do + : > $PG/emptyfiles/file.$i; + done +} + + +function emptyfiles_delete() +{ + rm -rf $PG/emptyfiles; +} + + +function emptydirs_create() +{ + mkdir -p $PG/emptydirs; + + eval "echo $PG/emptydirs/top.{1..$emptytops}" | xargs mkdir -p; + for top in $(seq 1 $emptytops); do + eval "echo $PG/emptydirs/top.$top/dir.{1..$emptydirs}" | xargs mkdir -p; + done +} + + +function emptydirs_delete() +{ + rm -rf $PG/emptydirs; +} + + +function smallfiles_create() +{ + mkdir -p $PG/smallfiles; + for i in $(seq 1 $smallfilecount); do + echo -n $smallblob >$PG/smallfiles/file.$i + done +} + + +function smallfiles_rewrite() +{ + smallfiles_create "$@"; +} + + +function smallfiles_read() +{ + for i in $(seq 1 $smallfilecount); do + cat $PG/smallfiles/file.$i > /dev/null + done +} + + +function smallfiles_reread() +{ + smallfiles_read "$@"; +} + + +function smallfiles_delete() +{ + rm -rf $PG/smallfiles; +} + + +function largefile_create() +{ + mkdir -p $PG/largefile; + dd if=/dev/zero of=$PG/largefile/large_file bs=$largeblock count=$largecount 2>/dev/null; +} + + +function largefile_rewrite() +{ + largefile_create "$@"; +} + + +function largefile_read() +{ + dd if=$PG/largefile/large_file of=/dev/null bs=$largeblock count=$largecount 2>/dev/null; +} + + +function largefile_reread() +{ + largefile_read "$@"; +} + + +function largefile_delete() +{ + rm -rf $PG/largefile +} + + +function crawl_create_recurse() +{ + local subpath; + local depth; + + subpath="$1"; + depth="$2"; + + if [ $depth -eq 0 ]; then + for i in $(seq 1 $leafcount); do + : > $subpath/file.$i; + done + return + fi + + depth=$(($depth - 1)); + + eval "echo $subpath/dir.{1..$crawlwidth}" | xargs mkdir -p; + + for i in $(seq 1 $crawlwidth); do + crawl_create_recurse "$subpath/dir.$i" $depth; + done +} + + +function directory_crawl_create () +{ + crawl_create_recurse "$PG/crawl" $crawldepth; +} + + +function directory_crawl() +{ + ls -lR "$PG/crawl" >/dev/null +} + + +function directory_recrawl() +{ + directory_crawl "$@"; +} + + +function directory_crawl_delete() +{ + rm -rf "$PG/crawl"; +} + + +function metadata_modify () +{ + chmod -R 777 "$PG/crawl"; + chown -R 1234 "$PG/crawl"; +} + + +function run_tests() +{ + run emptyfiles_create; + #run emptyfiles_delete; + + + # run emptydirs_create; + # run emptydirs_delete; + + + #run smallfiles_create; + #run smallfiles_rewrite; + #echo 3 > /proc/sys/vm/drop_caches; + #sleep 10; + #run smallfiles_read; + #run smallfiles_reread; + #run smallfiles_delete; + + + #run largefile_create; + #run largefile_rewrite; + #echo 3 > /proc/sys/vm/drop_caches; + #sleep 10; + #run largefile_read; + #run largefile_reread; + #run largefile_delete; + + + #run directory_crawl_create; + #echo 3 > /proc/sys/vm/drop_caches; + #sleep 10; + #run directory_crawl; + #run directory_recrawl; + #run metadata_modify; + #run directory_crawl_delete; +} + + +##################################################### +############ Framework code ######################### +##################################################### + + +function cleanup_playground() +{ + rm -rvf $PG; + mkdir -p $PG; +} + + +function params_init() +{ + emptytops=10; + emptydirs=10000; + + smallfilecount=80000; + smallblock=4096; + + largeblock=64K; + largecount=16K; + + crawlwidth=10; + crawldepth=3; + leafcount=100; + + smallblob=; + for i in $(seq 1 $smallblock); do + smallblob=a$smallblob + done +} + + +function _init() +{ + params_init; + + TSTDOUT=251 + TSTDERR=252 + LOGFD=253 + LOGFILE=/tmp/perf$$ + + eval "exec $TSTDOUT>&1" + eval "exec $TSTDERR>&2" + eval "exec $LOGFD<>$LOGFILE"; +} + + +function parse_cmdline() +{ + MOUNT=; + + if [ "x$1" == "x" ] ; then + echo "Usage: $0 /gluster/mount" + exit 1 + fi + + MOUNT="$1"; + PG=$MOUNT/playground; +} + + +function wrap() +{ + "$@" 1>&$TSTDOUT 2>&$TSTDERR; +} + + +function measure() +{ + set -o pipefail; + (time -p wrap "$@") 2>&1 >/dev/null | tail -n 3 | head -n 1 | cut -f2 -d' ' +} + + +function log() +{ + local t; + local rest; + + t=$1; + shift; + rest="$@"; + + echo "$rest $t" >&$LOGFD; +} + + +function run() +{ + local t; + + echo -n "running $@ ... " + t=$(measure "$@"); + + if [ $? -eq 0 ]; then + echo "done ($t secs)"; + log "$t" "$@"; + else + echo "FAILED!!!" + fi +} + + +function verify_mount() +{ + if [ ! -d "$MOUNT" ] ; then + echo "Can't access '$MOUNT'" + exit 1 + fi +} + + +function show_report() +{ + (echo "Testname Time"; cat $LOGFILE) | column -t; + rm -f $LOGFILE; +} + + +function main() +{ + parse_cmdline "$@"; + + verify_mount; + + cleanup_playground; + + run_tests; + + show_report; +} + + +_init && main "$@" diff --git a/perf-framework/ops b/perf-framework/ops new file mode 100644 index 0000000..74e1939 --- /dev/null +++ b/perf-framework/ops @@ -0,0 +1,17 @@ +emptyfiles_create +emptyfiles_delete +smallfiles_create +smallfiles_rewrite +smallfiles_read +smallfiles_reread +smallfiles_delete +largefile_create +largefile_rewrite +largefile_read +largefile_reread +largefile_delete +directory_crawl_create +directory_crawl +directory_recrawl +metadata_modify +directory_crawl_delete diff --git a/perf-framework/parallel_create b/perf-framework/parallel_create new file mode 100755 index 0000000..0bd9406 --- /dev/null +++ b/perf-framework/parallel_create @@ -0,0 +1,71 @@ +#!/bin/bash + +FILE_REPO="/mnt/perfmount/file_repo" +TEST=0 +DD_BS=512 +RAND_SCALE=20 + +function usage() +{ + echo "usage: $1 <thread_count> <file_count>" + exit 1 +} + +function create_files() +{ + tid=$1 + count=$2 + start=$((tid*count)) + + if [ $TEST -eq "1" ]; then + for ((i=0; i<${count}; i++)); do + outfile=$FILE_REPO/tid${tid}_file$((start+i)) + echo "dd if=/dev/urandom of=$outfile bs=1b count=$((RANDOM*20))" + done + else + for ((i=0; i<${count}; i++)); do + filesize=$((RANDOM*RAND_SCALE)) + dd_count=$((filesize/DD_BS)) + outfile=$FILE_REPO/tid${tid}_file$((start+i)) + dd if=/dev/zero of=$outfile bs=1b count=$dd_count > /dev/null 2>&1 + echo "$tid $i" + done + fi +} + +function Not_A_Number() +{ + echo $1$2 | grep ^[0-9]*$ > /dev/null + return $? +} + +function main() +{ + if [ $# -ne 2 ]; then + usage $0 + fi + + if ! Not_A_Number $1 $2; then + echo "Invalid input. Enter only numbers." + usage + fi + + if [ $2 -lt $1 ]; then + echo "Number of files cannot be less than number of threads" + usage $0 + fi + + mkdir -p $FILE_REPO + + nthr=$1 + fc=$2 + files_per_thr=$((fc/nthr)) + + for i in `seq 0 $((nthr-1))`; do + create_files $i $files_per_thr & + done + + wait +} + +main $@ diff --git a/perf-framework/perf-test.sh b/perf-framework/perf-test.sh new file mode 100755 index 0000000..6ce5b55 --- /dev/null +++ b/perf-framework/perf-test.sh @@ -0,0 +1,337 @@ +#!/bin/bash + +CONFIG_FILE=gf_perf_config +source $CONFIG_FILE + +function emptyfiles_create() +{ + mkdir -p $PG/emptyfiles; + for i in $(seq 1 $smallfilecount); do + : > $PG/emptyfiles/file.$i; + done +} + + +function emptyfiles_delete() +{ + rm -rf $PG/emptyfiles; +} + + +function emptydirs_create() +{ + mkdir -p $PG/emptydirs; + + eval "echo $PG/emptydirs/top.{1..$emptytops}" | xargs mkdir -p; + for top in $(seq 1 $emptytops); do + eval "echo $PG/emptydirs/top.$top/dir.{1..$emptydirs}" | xargs mkdir -p; + done +} + + +function emptydirs_delete() +{ + rm -rf $PG/emptydirs; +} + + +function smallfiles_create() +{ + mkdir -p $PG/smallfiles; + for i in $(seq 1 $smallfilecount); do + echo -n $smallblob >$PG/smallfiles/file.$i + done +} + + +function smallfiles_rewrite() +{ + smallfiles_create "$@"; +} + + +function smallfiles_read() +{ + for i in $(seq 1 $smallfilecount); do + cat $PG/smallfiles/file.$i > /dev/null + done +} + + +function smallfiles_reread() +{ + smallfiles_read "$@"; +} + + +function smallfiles_delete() +{ + rm -rf $PG/smallfiles; +} + + +function largefile_create() +{ + mkdir -p $PG/largefile; + dd if=/dev/zero of=$PG/largefile/large_file bs=$largeblock count=$largecount 2>/dev/null; +} + + +function largefile_rewrite() +{ + largefile_create "$@"; +} + + +function largefile_read() +{ + dd if=$PG/largefile/large_file of=/dev/null bs=$largeblock count=$largecount 2>/dev/null; +} + + +function largefile_reread() +{ + largefile_read "$@"; +} + + +function largefile_delete() +{ + rm -rf $PG/largefile +} + + +function crawl_create_recurse() +{ + local subpath; + local depth; + + subpath="$1"; + depth="$2"; + + if [ $depth -eq 0 ]; then + for i in $(seq 1 $leafcount); do + : > $subpath/file.$i; + done + return + fi + + depth=$(($depth - 1)); + + eval "echo $subpath/dir.{1..$crawlwidth}" | xargs mkdir -p; + + for i in $(seq 1 $crawlwidth); do + crawl_create_recurse "$subpath/dir.$i" $depth; + done +} + + +function directory_crawl_create () +{ + crawl_create_recurse "$PG/crawl" $crawldepth; +} + + +function directory_crawl() +{ + ls -lR "$PG/crawl" >/dev/null +} + + +function directory_recrawl() +{ + directory_crawl "$@"; +} + + +function directory_crawl_delete() +{ + rm -rf "$PG/crawl"; +} + + +function metadata_modify () +{ + chmod -R 777 "$PG/crawl"; + chown -R 1234 "$PG/crawl"; +} + + +function run_tests() +{ + run emptyfiles_create; + run emptyfiles_delete; + + + # run emptydirs_create; + # run emptydirs_delete; + + + run smallfiles_create; + run smallfiles_rewrite; + echo 3 > /proc/sys/vm/drop_caches; + sleep 10; + run smallfiles_read; + run smallfiles_reread; + run smallfiles_delete; + + + run largefile_create; + run largefile_rewrite; + echo 3 > /proc/sys/vm/drop_caches; + sleep 10; + run largefile_read; + run largefile_reread; + run largefile_delete; + + + run directory_crawl_create; + echo 3 > /proc/sys/vm/drop_caches; + sleep 10; + run directory_crawl; + run directory_recrawl; + run metadata_modify; + run directory_crawl_delete; +} + + +##################################################### +############ Framework code ######################### +##################################################### + + +function cleanup_playground() +{ + rm -rvf $PG; + mkdir -p $PG; +} + + +function params_init() +{ + emptytops=10; + emptydirs=10000; + + if [ $MOUNT_TYPE == "nfs" ]; then + smallfilecount=85000; + else + smallfilecount=100000; + fi + smallblock=4096; + + largeblock=64K; + largecount=16K; + + crawlwidth=10; + crawldepth=3; + leafcount=100; + + smallblob=; + for i in $(seq 1 $smallblock); do + smallblob=a$smallblob + done +} + + +function _init() +{ + params_init; + + TSTDOUT=251 + TSTDERR=252 + LOGFD=253 + LOGFILE=/tmp/perf$$ + + eval "exec $TSTDOUT>&1" + eval "exec $TSTDERR>&2" + eval "exec $LOGFD<>$LOGFILE"; +} + + +function parse_cmdline() +{ + MOUNT=; + + if [ "x$1" == "x" ] ; then + echo "Usage: $0 /gluster/mount" + exit 1 + fi + + MOUNT="$1"; + PG=$MOUNT/playground; +} + + +function wrap() +{ + "$@" 1>&$TSTDOUT 2>&$TSTDERR; +} + + +function measure() +{ + set -o pipefail; + (time -p wrap "$@") 2>&1 >/dev/null | tail -n 3 | head -n 1 | cut -f2 -d' ' +} + + +function log() +{ + local t; + local rest; + + t=$1; + shift; + rest="$@"; + + echo "$rest $t" >&$LOGFD; +} + + +function run() +{ + local t; + + echo -n "running $@ ... " + t=$(measure "$@"); + + if [ $? -eq 0 ]; then + echo "done ($t secs)"; + log "$t" "$@"; + else + echo "FAILED!!!" + fi +} + + +function verify_mount() +{ + if [ ! -d "$MOUNT" ] ; then + echo "Can't access '$MOUNT'" + exit 1 + fi +} + + +function show_report() +{ + (echo "Testname Time"; cat $LOGFILE) | column -t; + rm -f $LOGFILE; +} + + +function main() +{ + parse_cmdline "$@"; + + verify_mount; + + cleanup_playground; + + run_tests; + + show_report; +} + + +_init && main "$@" diff --git a/perf-framework/perf.sh b/perf-framework/perf.sh new file mode 100755 index 0000000..1a4bdc6 --- /dev/null +++ b/perf-framework/perf.sh @@ -0,0 +1,23 @@ +#!/bin/bash + +perf_test () +{ + ./perf-test.sh $mount_point >> $logfile +} + +_init () +{ + mount_point=$1; + logfile=$2 +} + +main () +{ + echo "" > $logfile + perf_test; + perf_test; + perf_test; +} + + +_init "$@" && main "$@"; diff --git a/perf-framework/pwl_ssh b/perf-framework/pwl_ssh new file mode 100755 index 0000000..fc837f1 --- /dev/null +++ b/perf-framework/pwl_ssh @@ -0,0 +1,50 @@ +#!/usr/bin/expect -f + +proc setup_pwl_ssh { host pw1 pw2 } { + spawn ssh-copy-id -i /root/.ssh/id_rsa.pub root@$host + set timeout 50 + expect { + "yes/no" { + send "yes\r" + exp_continue + } "password: " { + send "$pw1\r" + expect { + "#" { + # Success. Nothing to do + } "password: " { + send "$pw2\r"; + expect { + "#" { + # Success. Nothing to do + } (.*) { + catch {close -i $spawn_id} + wait -nowait -i $spawn_id + } + } + } + } + } timeout { + catch {close -i $spawn_id} + wait -nowait -i $spawn_id + } eof { + } + } +} + +if { $argc < 2 } { + puts "usage $argv0 <logfile> <list-of-hosts>" + exit +} + +log_user 0 +set logfile [ lindex $argv 0 ] +exp_log_file -a -noappend $logfile + +set pw1 $env(ROOT_PW1) +set pw2 $env(ROOT_PW2) + +for {set i 1} {$i<[llength $argv]} {incr i} { + set host [lindex $argv $i] + setup_pwl_ssh $host $pw1 $pw2 +} diff --git a/perf-framework/quota_gsync_run b/perf-framework/quota_gsync_run new file mode 100755 index 0000000..a090b66 --- /dev/null +++ b/perf-framework/quota_gsync_run @@ -0,0 +1,42 @@ +#!/bin/bash -u + +CONFIG_FILE=gf_perf_config +source $CONFIG_FILE + +run=$1 +runlabel=`echo $run|sed -e 's/^glusterfs-//' -e 's/\.tar\.gz//'` + +a=`cat .runfile` +echo "run$a - Quota off; gsync on..." +ssh -l root $MGMT_NODE "gluster volume reset $VOLNAME force" +sleep 5 +ssh -l root $MGMT_NODE "gluster volume set $VOLNAME geo-replication.indexing on" +sleep 5 +echo "run`cat .runfile` - $MOUNT_TYPE - $runlabel - $GF_CONFIG - (quota off, gsync on)" >> $RUNLOG +./start_perf_measure +sleep 10 + +echo "run$((a+1)) - Quota on; gsync on..." +ssh -l root $MGMT_NODE "gluster volume reset $VOLNAME force" +sleep 5 +ssh -l root $MGMT_NODE "gluster volume quota $VOLNAME enable" +sleep 5 +ssh -l root $MGMT_NODE "gluster volume set $VOLNAME geo-replication.indexing on" +sleep 5 +echo "run`cat .runfile` - $MOUNT_TYPE - $runlabel - $GF_CONFIG - (quota on, gsync on)" >> $RUNLOG +./start_perf_measure +sleep 10 + +echo "run$((a+2)) - Quota on; gsync off..." +ssh -l root $MGMT_NODE "gluster volume reset $VOLNAME force" +sleep 5 +ssh -l root $MGMT_NODE "gluster volume quota $VOLNAME enable" +sleep 5 +echo "run`cat .runfile` - $MOUNT_TYPE - $runlabel - $GF_CONFIG - (quota on, gsync off)" >> $RUNLOG +./start_perf_measure +sleep 10 + +# Restore +ssh -l root $MGMT_NODE "gluster volume reset $VOLNAME force" +sleep 5 + diff --git a/perf-framework/remove_export_dirs b/perf-framework/remove_export_dirs new file mode 100755 index 0000000..c84d881 --- /dev/null +++ b/perf-framework/remove_export_dirs @@ -0,0 +1,10 @@ +#!/bin/bash -ue + +SERVER_EXPORT_DIR=$1 + +if [ "$SERVER_EXPORT_DIR" != "" -a "$SERVER_EXPORT_DIR" != "/" ] +then + echo "$SERVER_EXPORT_DIR" +else + echo "Empty export dir" +fi diff --git a/perf-framework/run b/perf-framework/run new file mode 100755 index 0000000..965d7bb --- /dev/null +++ b/perf-framework/run @@ -0,0 +1,9 @@ +#!/bin/bash -u + +source gf_perf_config + +for brick in $BRICK_IP_ADDRS +do + echo $brick + ssh -l root $brick "$@" +done diff --git a/perf-framework/send_mail b/perf-framework/send_mail new file mode 100755 index 0000000..e44f29b --- /dev/null +++ b/perf-framework/send_mail @@ -0,0 +1,8 @@ +#!/bin/bash + +CONFIG_FILE=gf_perf_config +source $CONFIG_FILE + +./calc_avg "$@" > /tmp/run_all + +scp /tmp/run_all $EMAIL/run_all; diff --git a/perf-framework/setbatch b/perf-framework/setbatch new file mode 100755 index 0000000..fb18585 --- /dev/null +++ b/perf-framework/setbatch @@ -0,0 +1,25 @@ +#!/bin/bash -u + +TARBALL_DIR=./tarballs +function usage() +{ + echo "Usage : $0 <gluster_release_tarball_list>" + exit 1 +} + +if [ $# -lt 1 ] +then + usage +fi + +list=`echo $@` +for i in $list +do + if [ ! -f $TARBALL_DIR/$i ] + then + echo "$i not found in directory $TARBALL_DIR. Provide correct list." + exit + fi +done + +sed -i "s/^RUNLIST=.*$/RUNLIST=\"$list\"/" batchrun diff --git a/perf-framework/setrun b/perf-framework/setrun new file mode 100755 index 0000000..7992051 --- /dev/null +++ b/perf-framework/setrun @@ -0,0 +1,14 @@ +#!/bin/bash -u + +function usage() +{ + echo "Usage : $0 <gluster_release_tarball>" + exit 1 +} + +if [ $# -ne 1 ] +then + usage +fi + +sed -i "s/^RELEASE_TARBALL=.*$/RELEASE_TARBALL=$1/" gf_perf_config diff --git a/perf-framework/start_perf_measure b/perf-framework/start_perf_measure new file mode 100755 index 0000000..2e06db5 --- /dev/null +++ b/perf-framework/start_perf_measure @@ -0,0 +1,166 @@ +#!/bin/bash -u + +CONFIG_FILE=gf_perf_config +source $CONFIG_FILE + +SETTLE_TIME=10 +RUNFILE=.runfile +STAT_COLLECTOR=stat_collect + +# Generate current run, update runfile +if [ ! -f $RUNFILE ] +then + run=1 +else + run=`cat $RUNFILE` +fi +echo $((run+1)) > $RUNFILE + +# Drop vm caches on all the bricks before starting the runs +for brick in $BRICK_IP_ADDRS +do + ssh -l root $brick "echo 3 > /proc/sys/vm/drop_caches" +done + +# Create the gluster volume +./create_gluster_vol + +mount | grep $MOUNT_POINT > /dev/null 2>&1 +if [ $? -eq 0 ] +then + umount $MOUNT_POINT +fi + +if [ ! -d $MOUNT_POINT ] +then + mkdir -p $MOUNT_POINT +fi + +# Make sure that the fuse kernel module is loaded +/sbin/lsmod | grep -w fuse > /dev/null 2>&1 +if [ $? -ne 0 ] +then + /sbin/modprobe fuse > /dev/null 2>&1 +fi + +ps -eaf | egrep -w 'glusterfs|glusterfsd|glusterd' > /dev/null 2>&1 +if [ $? -eq 0 ] +then + killall glusterfsd glusterd glusterfs > /dev/null 2>&1 +fi + +# Mount the client +# Sleep for a while. Sometimes, NFS mounts fail if attempted soon after creating the volume +sleep $SETTLE_TIME + +if [ $ENABLE_ACL == "yes" ] +then + acl_opts="-o acl" +else + acl_opts="" +fi + +if [ $MOUNT_TYPE == "nfs" ] +then + nfs_opts="-o vers=3" +else + nfs_opts="" +fi + +echo "Mounting volume..." +if [ $ENABLE_MEM_ACCT == "yes" ] +then + echo "Memory accounting status on client -" + echo "x/x &gf_mem_acct_enable" > commands.$$ + echo "quit" >> commands.$$ + GLUSTERFS_DISABLE_MEM_ACCT=0 mount -t $MOUNT_TYPE $acl_opts $nfs_opts $MGMT_NODE:$VOLNAME $MOUNT_POINT + mount_status=$? + gdb -q --command=commands.$$ -p `pidof glusterfs` | grep gf_mem_acct_enable | awk '{print $(NF-1) $NF}' + rm commands.$$ > /dev/null 2>&1 +else + mount -t $MOUNT_TYPE $acl_opts $nfs_opts $MGMT_NODE:$VOLNAME $MOUNT_POINT + mount_status=$? +fi + +if [ $mount_status -ne 0 ] +then + echo "mount -t $MOUNT_TYPE $acl_opts $nfs_opts $MGMT_NODE:$VOLNAME $MOUNT_POINT failed..." + echo "Exiting..." + exit 1 +fi + +# Copy statistics collection scripts to the server + +echo "" +echo "Copying stat collection script to bricks..." +for brick in $BRICK_IP_ADDRS +do + ssh -l root $brick "mkdir -p $SERVER_SCRIPTS_DIR" + scp -p $STAT_COLLECTOR root@$brick:$SERVER_SCRIPTS_DIR > /dev/null 2>&1 +done + +# Run statistics collection scripts on the server + +echo "" +echo "Starting server stat collection..." +for brick in $BRICK_IP_ADDRS +do + ssh -l root $brick "mkdir -p $SERVER_LOG_DIR" + ssh -l root $brick "$SERVER_SCRIPTS_DIR/$STAT_COLLECTOR $SERVER_LOG_DIR" & +done + +# Run statistics collection on client + +mkdir -p $LOCAL_LOG_REPO +./$STAT_COLLECTOR $LOCAL_LOG_REPO/run$run/client & + +# Start perf test + +echo "" +echo "Starting run $run..." + +sleep $SETTLE_TIME +./perf.sh $MOUNT_POINT $LOCAL_LOG_REPO/run$run/client/perf-test.log +sleep $SETTLE_TIME + +# Stop statistics collection scripts on the client + +killall mpstat vmstat iostat $STAT_COLLECTOR sar > /dev/null 2>&1 + +# Stop statistics collection scripts on the server + +echo "" +echo "Stopping server stat collection..." +for brick in $BRICK_IP_ADDRS +do + ssh -l root $brick killall mpstat vmstat iostat $STAT_COLLECTOR sar > /dev/null 2>&1 +done + +# Since the ssh processes were backgrounded, they will be hanging around. +# Kill them +kill `jobs -l | awk '{print $2}'` > /dev/null 2>&1 + +# Copy statistics from the server + +echo "" +echo "Copying server logfiles for run $run..." +cur_log_dump_dir=$LOCAL_LOG_REPO/run$run +mkdir -p $cur_log_dump_dir +count=1 +for brick in $BRICK_IP_ADDRS +do + for statf in mpstat vmstat iostat sysinfo sar_netstat + do + scp root@$brick:$SERVER_LOG_DIR/*$statf* $cur_log_dump_dir/brick$count-$brick-$statf-log > /dev/null 2>&1 + done + count=$((count + 1)) +done + +# Cleanup statistics collected for this run on the server + +echo "" +echo "Cleaning server logfiles..." +for brick in $BRICK_IP_ADDRS +do + ssh -l root $brick "cd $SERVER_LOG_DIR; rm mpstat_log vmstat_log iostat_log sysinfo sar_netstat_log" +done diff --git a/perf-framework/stat_collect b/perf-framework/stat_collect new file mode 100755 index 0000000..8b63203 --- /dev/null +++ b/perf-framework/stat_collect @@ -0,0 +1,36 @@ +#!/bin/bash -ue + +function start_stat_collect() +{ + echo "Interrupts details:" >> $SYSINFO + cat /proc/interrupts >> $SYSINFO + echo "CPU details:" >> $SYSINFO + cat /proc/cpuinfo >> $SYSINFO + echo "Memory details:" >> $SYSINFO + cat /proc/meminfo >> $SYSINFO + + iostat -xcdh 5 >> $IOSTAT_LOG & + mpstat -P ALL 5 >> $MPSTAT_LOG & + vmstat 5 >> $VMSTAT_LOG & + LC_TIME="POSIX" sar -n DEV 5 0 >> $SAR_NETSTAT_LOG & +} + +function init() +{ + mkdir -p $LOGDIR + IOSTAT_LOG=$LOGDIR/iostat_log + VMSTAT_LOG=$LOGDIR/vmstat_log + MPSTAT_LOG=$LOGDIR/mpstat_log + SAR_NETSTAT_LOG=$LOGDIR/sar_netstat_log + SYSINFO=$LOGDIR/sysinfo + echo "" > $IOSTAT_LOG + echo "" > $VMSTAT_LOG + echo "" > $MPSTAT_LOG + echo "" > $SAR_NETSTAT_LOG + echo "" > $SYSINFO +} + +LOGDIR=$1 +init $LOGDIR; +start_stat_collect; +wait diff --git a/sanity/nightly_sanity/nightly_updated.sh b/sanity/nightly_sanity/nightly_updated.sh index 2f7ead1..c9644e5 100755 --- a/sanity/nightly_sanity/nightly_updated.sh +++ b/sanity/nightly_sanity/nightly_updated.sh @@ -792,10 +792,17 @@ function post_run() clean_results; } +# Eventhough statedump is an important information, lets not stop the tests +# because of some failure in statedump. Instead lets just log a message +# saying statedump was not taken and continue with the tests. + function take_statedump () { + set +e; local dir; + local ret_value; + ret_value=0; echo 3 > /proc/sys/vm/drop_caches; sleep 2; @@ -806,6 +813,14 @@ function take_statedump () kill -USR1 $BRICK_PID; sleep 1; mv /tmp/*.$BRICK_PID.dump $dir; + if [ $? -ne 0 ]; then + # probably its on release-3.2 where tests are being run. So search the + # statedump files according to older formats only. + mv /tmp/glusterdump.$BRICK_PID $dir; + if [ $? -ne 0 ]; then + ret_value=1; + fi + fi done for j in $(seq 1 $num_clients) @@ -814,7 +829,18 @@ function take_statedump () kill -USR1 $CLIENT_PID; sleep 1; mv /tmp/*.$CLIENT_PID.dump $dir; + if [ $? -ne 0 ]; then + # probably its on release-3.2 where tests are being run. So search the + # statedump files according to older formats only. + mv /tmp/glusterdump.$CLIENT_PID $dir; + if [ $? -ne 0 ]; then + ret_value=1; + fi + fi done + + set -e; + return $ret_value; } function main() @@ -827,8 +853,14 @@ function main() start_glusterd; start_glusterfs; take_statedump $LOGDIR/old_dump/; + if [ $? -ne 0 ]; then + echo "taking pre run statedump failed." >> /export/tests_failed; + fi run_tests; take_statedump $LOGDIR/new_dump/; + if [ $? -ne 0 ]; then + echo "taking post run statedump failed." >> /export/tests_failed; + fi trap - INT TERM EXIT post_run; } diff --git a/sanity/system_light/config b/sanity/system_light/config index 3acde78..0d95bcf 100755 --- a/sanity/system_light/config +++ b/sanity/system_light/config @@ -13,8 +13,9 @@ export LOG_FILE="$TMP/time$$.log" export TEST_DIR=$PWD export DECISION="n" export TYPE="other" +export TEST="all" -while getopts 'w:t:l:D' option +while getopts 'w:t:l:D:m' option do case $option in w) @@ -26,9 +27,12 @@ do D) DECISION="$OPTARG" ;; - t) + m) TYPE="$OPTARG" ;; + t) + TEST="$OPTARG" + ;; esac done diff --git a/sanity/system_light/config~ b/sanity/system_light/config~ deleted file mode 100755 index 99b6779..0000000 --- a/sanity/system_light/config~ +++ /dev/null @@ -1,139 +0,0 @@ - -#Test path i.e. the path where the configuration file resides.Execution must be done from the saopt location where it resides -export CONF_FILE=/opt/qa/tools/system_light/config - -#Directory from which the script is invoked -export INVOKEDIR=$PWD - -export ERR=0 - -#The log file where the error optassages and the tiopt duration should be logged -export TMP="/mnt/logs" -export LOG_FILE="$TMP/time$$.log" -export TEST_DIR=$PWD -export DECISION="n" -export TYPE="other" - -while getopts 'w:l:D:t' option -do - case $option in - w) - TEST_DIR="$OPTARG" - ;; - l) - LOG_FILE="$OPTARG" - ;; - D) - DECISION="$OPTARG" - ;; - t) - TYPE="$OPTARG" - ;; - esac -done - -# Do you want thetest to be terminated on first falure? Press y if you want to -if [ "$DECISION" = "y" ]; then - set -e; -fi - -#File where failed tests are saved" -export LOG_DIR=$(dirname $LOG_FILE) -#mkdir $LOG_DIR/tests_failed -export TEST_FAIL=$LOG_DIR/tests_failed - -#GlusterFS mount point -export GF_MP=$TEST_DIR -export THIS_TEST_DIR=$TEST_DIR/run$$/ -mkdir $THIS_TEST_DIR - -#The path which contains the binaries of arequal,postmark,dbench,fileop etc if they are not installed in the system -export PATH=$PATH:/opt/qa/tools -export BIN_PATH=/opt/qa/tools - -#Arequal related variables -export ARE_SRC_DIR=/opt/qa/tools/ #The Directory in which the file or the directory being tested is present -export FILEORDIR=system_light #THe file or the directory to be tested -export ARE_SRC=$ARE_SRC_DIR$FILEORDIR -export ARE_DST=$THIS_TEST_DIR$FILEORDIR #The destination directory where the test file should be copied - -#Bonnie Related variables -export USER_NAME=`whoami` -export WD=$THIS_TEST_DIR - -#dbench related variables -export TIME=300 -export DBENCH_CLNTS=10 - -#dd related variables -export OF=dd_test -export BS_SIZE=100M -export DD_CNT=10 - -#Read from the large file -export LARGE_FILE_SOURCE=$THIS_TEST_DIR/$OF -export LARGE_FILE_DEST=/dev/null - -#fileop related variables -#export FILEOP_SIZE=100 -export FILEOP_CNT=30 - -#fsx related variables -export FSX_FILE_ORIG=/opt/qa/tools/read -export FSX_FILE=$(basename $FSX_FILE_ORIG) -export NUM_OPS=100 #Number of operations to be perforoptd default is infinity - -#ffsb related variables -export FFSB_FILE=$THIS_TEST_DIR/profile_everything - -#glusterfs build -export GFS_VERSION=3.0.0pre2 -export GLUSTERFS_TAR_FILE=/opt/qa/tools/glusterfs-$GFS_VERSION.tar.gz -export GLUSTERFS_DIR=glusterfs.git - -#ioblazer related variables -export BLAZER_DIR=$THIS_TEST_DIR; - -#iozone related variables -export FILE_SIZE=4g -export RECORD_SIZE=22k - -# kernel related variables -#export SCRIPTS_PATH="/opt/qa/tools/system_light/scripts" -export VERSION=2.6.31.1 -export KERNEL_PATH=/opt/qa/tools/linux-$VERSION.tar.bz2 - -#ltp related variables -export LTP_DIR=/opt/qa/tools/ltp-full-20091031/testcases/kernel/fs/ - -#locks related variables -export LOCK_BIN=/opt/qa/tools/locks/locktests -export CON_PROC=22 -export LOCK_TEST_FILE=$THIS_TEST_DIR/locks_test - -#lmbench related variables -export LM_DIR=lmbench-3.0-a9 -export SRC_DIR=$BIN_PATH/lmbench-3.0-a9 - -#openssl related variables -export OPENSSL_TAR_FILE="/opt/qa/tools/openssl-0.9.8c.tar.gz" -export OPENSSL_DIR="openssl-0.9.8c" -export PREFIX='' -export OPENSSLDIR='' - -#posix compliance related variables -export DIR="/opt/qa/tools/posix-testsuite"; - -#postmark related variables -export POST_FILE=/opt/qa/tools/system_light/pm.config - -#Multiple file related variables -export NUM_OF_FILES=100000 - -#syscallbench related variables -export TOOL_DIR=/opt/qa/tools/tools.git -export SYSCALL_BIN=$TOOL_DIR/syscallbench/syscallbench -export SYSCALL_PLOT=$TOOL_DIR/syscallbench/syscallben-plot - -#tiobench related variables -export TIO_BIN=/opt/qa/tools/tiobench-0.3.3/tiotest diff --git a/sanity/system_light/scripts/lmbench_test.sh b/sanity/system_light/legacy/lmbench_test.sh index 30c5f6d..30c5f6d 100755 --- a/sanity/system_light/scripts/lmbench_test.sh +++ b/sanity/system_light/legacy/lmbench_test.sh diff --git a/sanity/system_light/scripts/mmapstress.sh b/sanity/system_light/legacy/mmapstress.sh index bfcb206..bfcb206 100644..100755 --- a/sanity/system_light/scripts/mmapstress.sh +++ b/sanity/system_light/legacy/mmapstress.sh diff --git a/sanity/system_light/run.sh b/sanity/system_light/run.sh index 910f5bc..b8061c8 100755 --- a/sanity/system_light/run.sh +++ b/sanity/system_light/run.sh @@ -6,406 +6,80 @@ _init () set +x set -u; basedir=$(dirname $0); - SCRIPTS_PATH=$basedir/scripts; + name=$(basename $0); + abs=$(cd "$(dirname "$0")"; pwd) + SCRIPTS_PATH=$abs/scripts; + echo $abs; #SCRIPTS_PATH="/opt/qa/tools/system_light/scripts" CNT=0 . $basedir/config; #. /opt/qa/tools/system_light/config - - echo " This script runs the tools and scriprts which are used to test the performance.The tests are run on ther glusterFS mountpoint.They are: -1.dd -2.dbench -3.arequal -4.posix_compliance -5.kernel compile -6.fsx -7.ltp tests -8.fileop -9.openssl build -10.postmark -11.ffsb -12.Reading from large file -13.Multiple file creation(100000) -14.glusterfs build -15.syscallbench -16.tiobench -17.locktests -18.ioblazer -19.fsmark"; -} - -run_ffsb () -{ - echo "Executing ffsb" - set +x - cp $BIN_PATH/system_light/profile_everything $THIS_TEST_DIR/profile_everything - sed -i "s[/mnt/test1[$THIS_TEST_DIR[" profile_everything - $SCRIPTS_PATH/ffsb_test.sh - if [ "${?}" -eq 0 ]; then - CNT=$((CNT+1)) - echo $CNT - echo "Removing data" - rm -rfv data && echo "Removed" - echo "Removing meta" - rm -rfv meta && echo "Removed" - echo "Removing profile_everything" - rm $FFSB_FILE && echo "Removed" - else - echo "ffsb failed" | tee -a $TEST_FAIL - echo $CNT - fi -} - -run_ltp () -{ - echo "Executing ltp tests" - set +x - mkdir ltp - cd ltp - $SCRIPTS_PATH/ltp_test.sh - if [ "${?}" -eq 0 ]; then - CNT=$((CNT+1)) - echo $CNT - echo "Removing directory" - cd - - rm -rfv ltp && echo "removed" - else - echo "ltp failed" | tee -a $TEST_FAIL - echo $CNT - fi -} - -run_fileop () -{ - echo "Executing fileop" - set +x - $SCRIPTS_PATH/fileop_test.sh - if [ "${?}" -eq 0 ]; then - CNT=$((CNT+1)) - echo $CNT - else - echo "fileop failed" - echo $CNT - fi -} - -run_kernel_compile () -{ - echo "Kernel compiling" #Untars the given kernel file and compiles it - set +x - $SCRIPTS_PATH/kernel.sh - if [ "${?}" -eq 0 ]; then - CNT=$((CNT+1)) - echo $CNT - echo "Removing linux-$VERSION.tar.bz2 and linux-$VERSION" - rm -r linux-$VERSION* && echo "removed" - else - echo "kernel compile failed" | tee -a $TEST_FAIL - echo $CNT - fi -} - -run_bonnie () -{ - echo "Executing bonnie++" - set +x - $SCRIPTS_PATH/bonnie_test.sh - if [ "${?}" -eq 0 ]; then - CNT=$((CNT+1)) - echo $CNT - else - echo "bonnie failed" | tee -a $TEST_FAIL - echo $CNT - fi -} - -run_dd () -{ - echo "Executing dd" - set +x - $SCRIPTS_PATH/dd_test.sh - if [ "${?}" -eq 0 ]; then - CNT=$((CNT+1)) - echo $CNT - else - echo "dd failed" | tee -a $TEST_FAIL - echo $CNT - fi -} - -run_read_large () -{ - echo "Reading from large file" - set +x - $SCRIPTS_PATH/read_large.sh - if [ "${?}" -eq 0 ]; then - CNT=$((CNT+1)) - echo $CNT - echo "Removing $PWD/$OF" - rm $PWD/$OF && echo "Removed" - else - echo "Large file reading failed" | tee -a $TEST_FAIL - echo $CNT - fi -} - -run_dbench () -{ - echo "Executing dbench" - set +x - $SCRIPTS_PATH/dbench_test.sh - if [ "${?}" -eq 0 ]; then - CNT=$((CNT+1)) - echo $CNT - echo "Removing clients" - rm -r clients && echo "Removed" - else - echo "dbench failed" | tee -a $TEST_FAIL - echo $CNT - fi -} - -run_glusterfs_build () -{ - echo "glusterfs build" - set +x; - $SCRIPTS_PATH/glusterfs_build.sh - if [ "${?}" -eq 0 ]; then - CNT=$((CNT+1)) - echo $CNT - echo "Removing glusterfs directory" - rm -r $GLUSTERFS_DIR && echo "Removed" - else - echo "glusterfs build failed" | tee -a $TEST_FAIL - echo $CNT - fi -} - -run_posix_compliance () -{ - echo "Checking for POSIX compliance" - set +x - $SCRIPTS_PATH/posix_compliance.sh - if [ "${?}" -eq 0 ]; then - CNT=$((CNT+1)) - echo $CNT - echo "Removing posix created directories and files" - rm -r fstest* && echo "Removed" - else - echo "posix failed" | tee -a $TEST_FAIL - echo $CNT - fi -} - -run_openssl_build () -{ - echo "Building opnssl" - set +x - $SCRIPTS_PATH/open.sh - if [ "${?}" -eq 0 ]; then - CNT=$((CNT+1)) - echo $CNT - echo "Removing $OPENSSL_DIR" - rm -r openssl* && echo "Removed" - else - echo "openssl failed" | tee -a $TEST_FAIL - echo $CNT - fi } -run_postmark () +function run_tests () { - echo "Running postmark" - set +x - $SCRIPTS_PATH/postmark.sh - if [ "${?}" -eq 0 ]; then - CNT=$((CNT+1)) - echo $CNT - else - echo "postmark failed" | tee -a $TEST_FAIL - echo $CNT + local tool=; + if [ $# -eq 1 ]; then + tool=$1; fi -} -run_multiple_files () -{ - echo "Multiple files creation(100000),listing,removal" - set +x - $SCRIPTS_PATH/multiple_files.sh - if [ "${?}" -eq 0 ]; then - CNT=$((CNT+1)) - echo $CNT - else - echo "multiple files failed" | tee -a $TEST_FAIL - echo $CNT - fi -} + export global_test=; -run_iozone () -{ - echo "Executing iozone" - set +x - $SCRIPTS_PATH/iozone_test.sh - if [ "${?}" -eq 0 ]; then - CNT=$((CNT+1)) - echo $CNT - else - echo "iozone failed" | tee -a $TEST_FAIL - echo $CNT - fi -} - -run_fsx () -{ - echo "Executing fsx" - set +x - $SCRIPTS_PATH/fsx_test.sh - if [ "${?}" -eq 0 ]; then - CNT=$((CNT+1)) - echo $CNT - echo "Removing $FSX_FILE,$FSX_FILE.fsxgood and $FSX_FILE.fsxlog" - rm $FSX_FILE* && echo "Removed" - else - echo "fsx failed" | tee -a $TEST_FAIL - echo $CNT - fi -} - -run_arequal () -{ - echo "executing arequal" - set +x - $SCRIPTS_PATH/arequal_test.sh - if [ "${?}" -eq 0 ]; then - CNT=$((CNT+1)) - echo $CNT - echo "Removing $ARE_DST" - rm -r $ARE_DST && echo "Removed" - else - echo "arequal failed" | tee -a $TEST_FAIL - echo $CNT - fi -} - -run_syscallbench () -{ - echo "Executing syscallbench" - set +x - $SCRIPTS_PATH/syscallbench.sh - if [ "${?}" -eq 0 ]; then - CNT=$((CNT+1)) - echo $CNT - else - echo "syscallbench failed" | tee -a $TEST_FAIL - echo $CNT - fi -} + if [ $tool ]; then + global_test=$tool; + export TOOLDIR=$SCRIPTS_PATH/$global_test; -run_tiobench () -{ - echo "Executing tiobench" - set +x - $SCRIPTS_PATH/tiobench.sh - if [ "${?}" -eq 0 ]; then - CNT=$((CNT+1)) - echo $CNT - else - echo "tiobench failed" | tee -a $TEST_FAIL - echo $CNT - fi -} + if [ -f $SCRIPTS_PATH/$tool/$tool.sh ]; then -run_locktests () -{ - echo "Executing locktests" - set +x - locks_dirname=$(dirname $LOCK_BIN) - cp $locks_dirname/test $LOCK_TEST_FILE - $SCRIPTS_PATH/locks.sh - if [ "${?}" -eq 0 ]; then - CNT=$((CNT+1)) - echo $CNT - rm $LOCK_TEST_FILE - else - echo "locktests failed" | tee -a $TEST_FAIL - echo $CNT - rm $LOCK_TEST_FILE + echo "executing $tool" && sleep 2; + set +x; + $SCRIPTS_PATH/$tool/$tool.sh; + if [ "${?}" -eq 0 ]; then + CNT=$((CNT+1)) + echo $CNT + else + echo "$tool failed" | tee -a $TEST_FAIL + echo $CNT + fi + return 0; + else + echo "tool $tool is not there in the script directory $SCRIPTS_PATH. Exiting"; + return 22; + fi fi -} -run_blazer () -{ - echo "Executing ioblazer"; - set +x; - $SCRIPTS_PATH/blazer.sh; - if [ "${?}" -eq 0 ]; then - CNT=$((CNT+1)); - echo $CNT; - else - echo "blazer failed | tee -a $TEST_FAIL"; - echo $CNT; - fi + for i in $(ls $SCRIPTS_PATH | sort -n) + do + if [ -f $SCRIPTS_PATH/$i/$i.sh ]; then + run_tests $i; + sleep 1; + fi + done } -run_rpc_coverage () +main () { - echo "Executing rpc coverage tests"; - set +x; - $SCRIPTS_PATH/rpc-fops.sh; - if [ "${?}" -eq 0 ]; then - CNT=$((CNT+1)); - echo $CNT; - else - echo "rpc-coverage failed | tee -a $TEST_FAIL"; - echo $CNT; - fi -} + echo "Tests available:" + ls $SCRIPTS_PATH | sort -n && sleep 1; -run_fsmark () -{ - echo "Executing fsmark" - set +x - $SCRIPTS_PATH/fs_mark.sh - if [ "${?}" -eq 0 ]; then - CNT=$((CNT+1)) - echo $CNT - else - echo "fsmark failed" - echo $CNT - fi -} + old_PWD=$PWD; -main () -{ - echo " Changing to the specified mountpoint"; + echo "===========================TESTS RUNNING==========================="; + echo "Changing to the specified mountpoint"; cd $THIS_TEST_DIR; pwd; sleep 1; - run_rpc_coverage; - run_posix_compliance; - run_ffsb; - run_ltp; - run_fileop; - run_kernel_compile; - run_dd; - run_read_large; - run_dbench; - run_bonnie; - run_iozone; - run_glusterfs_build; - run_openssl_build; - run_postmark; - run_multiple_files; - run_fsx; - run_arequal; - run_syscallbench; - run_tiobench; - run_fsmark; - if [ $TYPE != "nfs" ]; then - run_locktests; + if [ $TEST == "all" ]; then + run_tests; + else + run_tests $TEST; + if [ $? -ne 0 ]; then + cd $old_PWD; + rmdir $THIS_TEST_DIR; + exit 22; + fi fi - #run_blazer; echo "Total $CNT tests were successful" | tee -a $TEST_FAIL diff --git a/sanity/system_light/run.sh~ b/sanity/system_light/run.sh~ deleted file mode 100755 index e7edcb1..0000000 --- a/sanity/system_light/run.sh~ +++ /dev/null @@ -1,408 +0,0 @@ -#!/bin/sh - -_init () -{ - ulimit -c unlimited - set +x - set -u; - basedir=$(dirname $0); - SCRIPTS_PATH=$basedir/scripts; - #SCRIPTS_PATH="/opt/qa/tools/system_light/scripts" - CNT=0 - . $basedir/config; - #. /opt/qa/tools/system_light/config - - echo " This script runs the tools and scriprts which are used to test the performance.The tests are run on ther glusterFS mountpoint.They are: -1.dd -2.dbench -3.arequal -4.posix_compliance -5.kernel compile -6.fsx -7.ltp tests -8.fileop -9.openssl build -10.postmark -11.ffsb -12.Reading from large file -13.Multiple file creation(100000) -14.glusterfs build -15.syscallbench -16.tiobench -17.locktests -18.ioblazer"; -} - -run_ffsb () -{ - echo "Executing ffsb" - set +x - cp $BIN_PATH/system_light/profile_everything $THIS_TEST_DIR/profile_everything - sed -i "s[/mnt/test1[$THIS_TEST_DIR[" profile_everything - $SCRIPTS_PATH/ffsb_test.sh - if [ "${?}" -eq 0 ]; then - CNT=$((CNT+1)) - echo $CNT - echo "Removing data" - rm -rfv data && echo "Removed" - echo "Removing meta" - rm -rfv meta && echo "Removed" - echo "Removing profile_everything" - rm $FFSB_FILE && echo "Removed" - else - echo "ffsb failed" | tee -a $TEST_FAIL - echo $CNT - fi -} - -run_ltp () -{ - echo "Executing ltp tests" - set +x - mkdir ltp - cd ltp - $SCRIPTS_PATH/ltp_test.sh - if [ "${?}" -eq 0 ]; then - CNT=$((CNT+1)) - echo $CNT - echo "Removing directory" - cd - - rm -rfv ltp && echo "removed" - else - echo "ltp failed" | tee -a $TEST_FAIL - echo $CNT - fi -} - -run_fileop () -{ - echo "Executing fileop" - set +x - $SCRIPTS_PATH/fileop_test.sh - if [ "${?}" -eq 0 ]; then - CNT=$((CNT+1)) - echo $CNT - else - echo "fileop failed" - echo $CNT - fi -} - -run_kernel_compile () -{ - echo "Kernel compiling" #Untars the given kernel file and compiles it - set +x - $SCRIPTS_PATH/kernel.sh - if [ "${?}" -eq 0 ]; then - CNT=$((CNT+1)) - echo $CNT - echo "Removing linux-$VERSION.tar.bz2 and linux-$VERSION" - rm -r linux-$VERSION* && echo "removed" - else - echo "kernel compile failed" | tee -a $TEST_FAIL - echo $CNT - fi -} - -# echo "Executing bonnie++" -# set +x -# $SCRIPTS_PATH/bonnie_test.sh -# if [ "${?}" -eq 0 ]; then -# CNT=$((CNT+1)) -# echo $CNT -# else -# echo "bonnie failed" | tee -a $TEST_FAIL -# echo $CNT -# fi - -run_dd () -{ - echo "Executing dd" - set +x - $SCRIPTS_PATH/dd_test.sh - if [ "${?}" -eq 0 ]; then - CNT=$((CNT+1)) - echo $CNT - else - echo "dd failed" | tee -a $TEST_FAIL - echo $CNT - fi -} - -run_read_large () -{ - echo "Reading from large file" - set +x - $SCRIPTS_PATH/read_large.sh - if [ "${?}" -eq 0 ]; then - CNT=$((CNT+1)) - echo $CNT - echo "Removing $PWD/$OF" - rm $PWD/$OF && echo "Removed" - else - echo "Large file reading failed" | tee -a $TEST_FAIL - echo $CNT - fi -} - -run_dbench () -{ - echo "Executing dbench" - set +x - $SCRIPTS_PATH/dbench_test.sh - if [ "${?}" -eq 0 ]; then - CNT=$((CNT+1)) - echo $CNT - echo "Removing clients" - rm -r clients && echo "Removed" - else - echo "dbench failed" | tee -a $TEST_FAIL - echo $CNT - fi -} - -run_glusterfs_build () -{ - echo "glusterfs build" - set +x; - $SCRIPTS_PATH/glusterfs_build.sh - if [ "${?}" -eq 0 ]; then - CNT=$((CNT+1)) - echo $CNT - echo "Removing glusterfs directory" - rm -r $GLUSTERFS_DIR && echo "Removed" - else - echo "glusterfs build failed" | tee -a $TEST_FAIL - echo $CNT - fi -} - -run_posix_compliance () -{ - echo "Checking for POSIX compliance" - set +x - $SCRIPTS_PATH/posix_compliance.sh - if [ "${?}" -eq 0 ]; then - CNT=$((CNT+1)) - echo $CNT - echo "Removing posix created directories and files" - rm -r fstest* && echo "Removed" - else - echo "posix failed" | tee -a $TEST_FAIL - echo $CNT - fi -} - -run_openssl_build () -{ - echo "Building opnssl" - set +x - $SCRIPTS_PATH/open.sh - if [ "${?}" -eq 0 ]; then - CNT=$((CNT+1)) - echo $CNT - echo "Removing $OPENSSL_DIR" - rm -r openssl* && echo "Removed" - else - echo "openssl failed" | tee -a $TEST_FAIL - echo $CNT - fi -} - -run_postmark () -{ - echo "Running postmark" - set +x - $SCRIPTS_PATH/postmark.sh - if [ "${?}" -eq 0 ]; then - CNT=$((CNT+1)) - echo $CNT - else - echo "postmark failed" | tee -a $TEST_FAIL - echo $CNT - fi -} - -run_multiple_files () -{ - echo "Multiple files creation(100000),listing,removal" - set +x - $SCRIPTS_PATH/multiple_files.sh - if [ "${?}" -eq 0 ]; then - CNT=$((CNT+1)) - echo $CNT - else - echo "multiple files failed" | tee -a $TEST_FAIL - echo $CNT - fi -} - -# echo "Executing iozone" -# set +x -# $SCRIPTS_PATH/iozone_test.sh -# if [ "${?}" -eq 0 ]; then -# CNT=$((CNT+1)) -# echo $CNT -# else -# echo "iozone failed" | tee -a $TEST_FAIL -# echo $CNT -# fi - -run_fsx () -{ - echo "Executing fsx" - set +x - $SCRIPTS_PATH/fsx_test.sh - if [ "${?}" -eq 0 ]; then - CNT=$((CNT+1)) - echo $CNT - echo "Removing $FSX_FILE,$FSX_FILE.fsxgood and $FSX_FILE.fsxlog" - rm $FSX_FILE* && echo "Removed" - else - echo "fsx failed" | tee -a $TEST_FAIL - echo $CNT - fi -} - -run_arequal () -{ - echo "executing arequal" - set +x - $SCRIPTS_PATH/arequal_test.sh - if [ "${?}" -eq 0 ]; then - CNT=$((CNT+1)) - echo $CNT - echo "Removing $ARE_DST" - rm -r $ARE_DST && echo "Removed" - else - echo "arequal failed" | tee -a $TEST_FAIL - echo $CNT - fi -} - -run_syscallbench () -{ - echo "Executing syscallbench" - set +x - $SCRIPTS_PATH/syscallbench.sh - if [ "${?}" -eq 0 ]; then - CNT=$((CNT+1)) - echo $CNT - else - echo "syscallbench failed" | tee -a $TEST_FAIL - echo $CNT - fi -} - -run_tiobench () -{ - echo "Executing tiobench" - set +x - $SCRIPTS_PATH/tiobench.sh - if [ "${?}" -eq 0 ]; then - CNT=$((CNT+1)) - echo $CNT - else - echo "tiobench failed" | tee -a $TEST_FAIL - echo $CNT - fi -} - -run_locktests () -{ - echo "Executing locktests" - set +x - locks_dirname=$(dirname $LOCK_BIN) - cp $locks_dirname/test $LOCK_TEST_FILE - $SCRIPTS_PATH/locks.sh - if [ "${?}" -eq 0 ]; then - CNT=$((CNT+1)) - echo $CNT - rm $LOCK_TEST_FILE - else - echo "locktests failed" | tee -a $TEST_FAIL - echo $CNT - rm $LOCK_TEST_FILE - fi -} - -run_blazer () -{ - echo "Executing ioblazer"; - set +x; - $SCRIPTS_PATH/blazer.sh; - if [ "${?}" -eq 0 ]; then - CNT=$((CNT+1)); - echo $CNT; - else - echo "blazer failed | tee -a $TEST_FAIL"; - echo $CNT; - fi -} - -run_rpc_coverage () -{ - echo "Executing rpc coverage tests"; - #set +x; - set -x; - $SCRIPTS_PATH/rpc-fops.sh; - if [ "${?}" -eq 0 ]; then - CNT=$((CNT+1)); - echo $CNT; - else - echo "rpc-coverage failed | tee -a $TEST_FAIL"; - echo $CNT; - fi -} - -main () -{ - echo " Changing to the specified mountpoint"; - cd $THIS_TEST_DIR; - pwd; - sleep 1; - - run_rpc_coverage; - # run_posix_compliance; -# run_ffsb; -# run_ltp; -# run_fileop; - run_kernel_compile; - # run_dd; -# run_read_large; -# run_dbench; -# run_glusterfs_build; -# run_openssl_build; -# run_postmark; -# run_multiple_files; -# run_fsx; -# run_arequal; -# run_syscallbench; -# run_tiobench; -# if [ $TYPE != "nfs" ]; then -# run_locktests; -# fi - #run_blazer; - - echo "Total $CNT tests were successful" | tee -a $TEST_FAIL - - if [ "$INVOKEDIR" == "$THIS_TEST_DIR" ]; then - echo "moving to the parent directory" - cd .. - echo "Removing $THIS_TEST_DIR" - rmdir $THIS_TEST_DIR - if [ "${?}" -ne 0 ]; then - echo "rmdir failed:Directory not empty" - fi - else - echo "Switching over to the previous working directory" - cd $INVOKEDIR - echo "Removing $THIS_TEST_DIR" - rmdir $THIS_TEST_DIR - if [ "${?}" -ne 0 ]; then - echo "rmdir failed:Directory not empty" - fi - fi -} - -_init "$@" && main "$@"
\ No newline at end of file diff --git a/sanity/system_light/run_nfs.sh b/sanity/system_light/run_nfs.sh deleted file mode 100755 index 94d8262..0000000 --- a/sanity/system_light/run_nfs.sh +++ /dev/null @@ -1,101 +0,0 @@ -#!/bin/sh - -ulimit -c unlimited -set +x -SCRIPTS_PATH="/opt/qa/tools/system_light/scripts" -CNT=0 -. /opt/qa/tools/system_light/config -echo " Changing to the specified mountpoint" -cd $THIS_TEST_DIR -pwd - -echo " This script runs the tools and scriprts which are used to test the performance.The tests are run on ther glusterFS mountpoint.They are: -1.dd -2.dbench -3.arequal -4.posix_compliance -5.kernel compile -6.fsx -7.ltp tests -8.fileop -9.bonnie -10.iozone -11.openssl build -12.postmark -13.ffsb -14.Reading from large file -15.Multiple file creation(100000) -16.glusterfs build"; - -sleep 1 - -echo "Executing ffsb" - set +x - cp $BIN_PATH/system_light/profile_everything $THIS_TEST_DIR/profile_everything - sed -i "s[/mnt/test1[$THIS_TEST_DIR[" profile_everything - $SCRIPTS_PATH/ffsb_test.sh -if [ $? -eq 0 ]; then - CNT=$((CNT+1)) - echo $CNT - echo "Removing data" - rm -rfv data && echo "Removed" - echo "Removing meta" - rm -rfv meta && echo "Removed" - echo "Removing profile_everything" - rm $FFSB_FILE && echo "Removed" -else - echo "ffsb failed" - echo $CNT -fi - -echo "Executing ltp tests" - set +x - mkdir ltp - cd ltp - $SCRIPTS_PATH/ltp_test.sh -if [ $? -eq 0 ]; then - CNT=$((CNT+1)) - echo $CNT - echo "Removing directory" - cd - - rm -rfv ltp && echo "removed" -else - echo "ltp failed" - echo $CNT -fi - -echo "Executing fileop" - set +x - $SCRIPTS_PATH/fileop_test.sh -if [ $? -eq 0 ]; then - CNT=$((CNT+1)) - echo $CNT -else - echo "fileop failed" - echo $CNT -fi - -echo "Kernel compiling" #Untars the given kernel file and compiles it - set +x - $SCRIPTS_PATH/kernel.sh -if [ $? -eq 0 ]; then - CNT=$((CNT+1)) - echo $CNT - echo "Removing linux-$VERSION.tar.bz2 and linux-$VERSION" - rm -r linux-$VERSION* && echo "removed" -else - echo "kernel compile failed" - echo $CNT -fi - -echo "Executing bonnie++" - set +x - $SCRIPTS_PATH/bonnie_test.sh -if [ $? -eq 0 ]; then - CNT=$((CNT+1)) - echo $CNT -else - echo "bonnie failed" - echo $CNT -fi - diff --git a/sanity/system_light/scripts/#kernel.sh# b/sanity/system_light/scripts/#kernel.sh# deleted file mode 100755 index 40268a9..0000000 --- a/sanity/system_light/scripts/#kernel.sh# +++ /dev/null @@ -1,44 +0,0 @@ -#!/bin/bash - -#This script 1st searches in the pwd for the kernel tar file. If its not there then based on the value of the vriable KERNEL_PATH it either searches from the path given or searches in http://www.kernel.org - -function main() -{ - if [ -e "linux-$VERSION.tar.bz2" ] - then - echo "start:`date +%T`" - time $SCRIPTS_PATH/kernel_compile.sh linux-$VERSION.tar.bz2 2>>$LOG_FILE 1>>$LOG_FILE - if [ $? -ne 0 ]; then - - f err=$? - echo "end:`date +%T`" - return 11; - else - echo "end:`date +%T`" - return 0; - fi - elif [ -z "$KERNEL_PATH" ] - then - time $SCRIPTS_PATH/kernel_compile.sh 2>>$LOG_FILE 1>>$LOG_FILE - if [ $? -ne 0 ]; then - err=$? - echo "end:`date +%T`" - return 11; - else - echo "end:`date +%T`" - return 0; - fi - else - time $SCRIPTS_PATH/kernel_compile.sh $KERNEL_PATH 2>>$LOG_FILE 1>>$LOG_FILE - if [ $? -ne 0 ]; then - err=$? - echo "end:`date +%T`" - return 11; - else - echo "end:`date +%T`" - return 0; - fi - fi; -} - -main "$@";
\ No newline at end of file diff --git a/sanity/system_light/scripts/arequal/arequal.sh b/sanity/system_light/scripts/arequal/arequal.sh new file mode 100755 index 0000000..1dafdbd --- /dev/null +++ b/sanity/system_light/scripts/arequal/arequal.sh @@ -0,0 +1,15 @@ +#!/bin/bash + +function main () +{ + $TOOLDIR/arequal_run.sh; + + if [ $? -eq 0 ]; then + rm -r $ARE_DST && echo "removed"; + return 0; + else + return 1; + fi +} + +main "$@" diff --git a/sanity/system_light/scripts/arequal_test.sh b/sanity/system_light/scripts/arequal/arequal_run.sh index 3b66680..ebf40b7 100755 --- a/sanity/system_light/scripts/arequal_test.sh +++ b/sanity/system_light/scripts/arequal/arequal_run.sh @@ -4,19 +4,19 @@ function main () { stat $ARE_SRC | grep directory > /dev/null if [ $? -eq 0 ] ; then - ARE_SRC=$ARE_SRC/ + ARE_SRC=$ARE_SRC/ fi - - echo "start:`date +%T`" - time arequal-run.sh $ARE_SRC $ARE_DST 2>>$LOG_FILE 1>>$LOG_FILE + + echo "start:`date +%T`" + time arequal-run.sh $ARE_SRC $ARE_DST 2>>$LOG_FILE 1>>$LOG_FILE #copies the contents of $4 directory to $5 and calculates the checksum of both src and dst directories to check whether the transfer was successful. We need to redirect the standard output also to the logfile to see the output of arequal. if [ $? -ne 0 ]; then - echo "end:`date +%T`" - return 11; + echo "end:`date +%T`" + return 11; else - echo "end:`date +%T`" + echo "end:`date +%T`" fi } diff --git a/sanity/system_light/scripts/blazer.sh b/sanity/system_light/scripts/blazer.sh deleted file mode 100755 index 3e59cb0..0000000 --- a/sanity/system_light/scripts/blazer.sh +++ /dev/null @@ -1,25 +0,0 @@ -#!/bin/bash - - -#ioblazer test which tests the IO functionality, and can generate vm related loads. - -function main() -{ - ioblazer -d $BLAZER_DIR; - - # Since opening a file with O_DIRECT in fuse fails check the exit value for failure. If the test fails for the first time assume that - # the mount point was a fuse mount point and re run the test again with buffered IO enabled. - - if [ $? -ne 0 ]; then - ioblazer -B 1 -d $BLAZER_DIR - if [ $? -ne 0 ]; then - return 11; - else - return 0; - fi - else - return 0; - fi -} - -main "$@";
\ No newline at end of file diff --git a/sanity/system_light/scripts/bonnie_test.sh b/sanity/system_light/scripts/bonnie/bonnie.sh index 70bc2bd..70bc2bd 100755 --- a/sanity/system_light/scripts/bonnie_test.sh +++ b/sanity/system_light/scripts/bonnie/bonnie.sh diff --git a/sanity/system_light/scripts/compile_kernel/compile_kernel.sh b/sanity/system_light/scripts/compile_kernel/compile_kernel.sh new file mode 100755 index 0000000..ed153f2 --- /dev/null +++ b/sanity/system_light/scripts/compile_kernel/compile_kernel.sh @@ -0,0 +1,15 @@ +#!/bin/bash + +function main () +{ + $TOOLDIR/kernel.sh; + + if [ $? -eq 0 ]; then + rm -r linux-$VERSION* && echo "removed kernel"; + return 0; + else + return 1; + fi +} + +main "$@"
\ No newline at end of file diff --git a/sanity/system_light/scripts/kernel.sh b/sanity/system_light/scripts/compile_kernel/kernel.sh index 1af548c..c6c9542 100755 --- a/sanity/system_light/scripts/kernel.sh +++ b/sanity/system_light/scripts/compile_kernel/kernel.sh @@ -7,35 +7,35 @@ function main() SCRIPTS_DIR=$(dirname $0); if [ -e "linux-$VERSION.tar.bz2" ] then - echo "start:`date +%T`" - time $SCRIPTS_DIR/kernel_compile.sh linux-$VERSION.tar.bz2 2>>$LOG_FILE 1>>$LOG_FILE + echo "start:`date +%T`" + time $SCRIPTS_DIR/kernel_compile.sh linux-$VERSION.tar.bz2 2>>$LOG_FILE 1>>$LOG_FILE if [ $? -ne 0 ]; then err=$? - echo "end:`date +%T`" + echo "end:`date +%T`" return 11; else - echo "end:`date +%T`" + echo "end:`date +%T`" return 0; fi elif [ -z "$KERNEL_PATH" ] then - time $SCRIPTS_DIR/kernel_compile.sh 2>>$LOG_FILE 1>>$LOG_FILE + time $SCRIPTS_DIR/kernel_compile.sh 2>>$LOG_FILE 1>>$LOG_FILE if [ $? -ne 0 ]; then err=$? - echo "end:`date +%T`" + echo "end:`date +%T`" return 11; else - echo "end:`date +%T`" + echo "end:`date +%T`" return 0; fi else - time $SCRIPTS_DIR/kernel_compile.sh $KERNEL_PATH 2>>$LOG_FILE 1>>$LOG_FILE + time $SCRIPTS_DIR/kernel_compile.sh $KERNEL_PATH 2>>$LOG_FILE 1>>$LOG_FILE if [ $? -ne 0 ]; then err=$? - echo "end:`date +%T`" + echo "end:`date +%T`" return 11; else - echo "end:`date +%T`" + echo "end:`date +%T`" return 0; fi fi; diff --git a/sanity/system_light/scripts/kernel_compile.sh b/sanity/system_light/scripts/compile_kernel/kernel_compile.sh index 646d639..646d639 100755 --- a/sanity/system_light/scripts/kernel_compile.sh +++ b/sanity/system_light/scripts/compile_kernel/kernel_compile.sh diff --git a/sanity/system_light/scripts/dbench/dbench.sh b/sanity/system_light/scripts/dbench/dbench.sh new file mode 100755 index 0000000..2d2633d --- /dev/null +++ b/sanity/system_light/scripts/dbench/dbench.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +function main() +{ + $TOOLDIR/dbench_run.sh; + if [ $? -eq 0 ]; then + rm -r clients && echo "removed clients"; + return 0; + else + return 1; + fi +} + +main "$@";
\ No newline at end of file diff --git a/sanity/system_light/scripts/dbench_test.sh b/sanity/system_light/scripts/dbench/dbench_run.sh index fee116f..6887512 100755 --- a/sanity/system_light/scripts/dbench_test.sh +++ b/sanity/system_light/scripts/dbench/dbench_run.sh @@ -2,19 +2,19 @@ function main() { -#runs $3 multiple clients on mount point and tests the performance and -t option ($2) tells the time for which it should be run +#runs $3 multiple clients on mount point and tests the performance and -t option ($2) tells the time for which it should be run echo "start:`date +%T`" time dbench -t $TIME -s -S $DBENCH_CLNTS 2>>$LOG_FILE 1>>$LOG_FILE if [ $? -ne 0 ]; then - echo "end:`date +%T`" + echo "end:`date +%T`" return 11; else - echo "end:`date +%T`" + echo "end:`date +%T`" return 0; fi - + } main "$@";
\ No newline at end of file diff --git a/sanity/system_light/scripts/dd/dd.sh b/sanity/system_light/scripts/dd/dd.sh new file mode 100755 index 0000000..7a1a23e --- /dev/null +++ b/sanity/system_light/scripts/dd/dd.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +function main () +{ + $TOOLDIR/dd_run.sh; + if [ $? -eq 0 ]; then + rm -f $PWD/$OF && echo "dd file removed"; + return 0; + else + return 1; + fi +} + +main "$@"
\ No newline at end of file diff --git a/sanity/system_light/scripts/dd_test.sh b/sanity/system_light/scripts/dd/dd_run.sh index c4df53c..f2857cc 100755 --- a/sanity/system_light/scripts/dd_test.sh +++ b/sanity/system_light/scripts/dd/dd_run.sh @@ -2,16 +2,16 @@ function main () { - echo "start:`date +%T`" + echo "start:`date +%T`" time dd if=/dev/zero of=$PWD/$OF bs=$BS_SIZE count=$DD_CNT 2>>$LOG_FILE #copies specified amount of data from the input file to the output file if [ $? -ne 0 ]; then - echo "end:`date +%T`" - return 11; + echo "end:`date +%T`" + return 11; else - echo "end:`date +%T`" - return 0; + echo "end:`date +%T`" + return 0; fi } diff --git a/sanity/system_light/scripts/ffsb/ffsb.sh b/sanity/system_light/scripts/ffsb/ffsb.sh new file mode 100755 index 0000000..7e654a0 --- /dev/null +++ b/sanity/system_light/scripts/ffsb/ffsb.sh @@ -0,0 +1,21 @@ +#!/bin/bash + +function main () +{ + cp $BIN_PATH/system_light/profile_everything $THIS_TEST_DIR/profile_everything + sed -i "s[/mnt/test1[$THIS_TEST_DIR[" profile_everything + $TOOLDIR/ffsb_run.sh + if [ "${?}" -eq 0 ]; then + echo "Removing data" + rm -rfv data && echo "Removed" + echo "Removing meta" + rm -rfv meta && echo "Removed" + echo "Removing profile_everything" + rm $FFSB_FILE && echo "Removed" + return 0; + else + return 1; + fi +} + +main "$@"
\ No newline at end of file diff --git a/sanity/system_light/scripts/ffsb/ffsb_run.sh b/sanity/system_light/scripts/ffsb/ffsb_run.sh new file mode 100755 index 0000000..918b95e --- /dev/null +++ b/sanity/system_light/scripts/ffsb/ffsb_run.sh @@ -0,0 +1,19 @@ +#!/bin/bash + +function main () +{ + echo "start:`date +%T`" + time ffsb $FFSB_FILE 2>>$LOG_FILE 1>>$LOG_FILE + + + if [ $? -ne 0 ]; then + echo "end:`date +%T`" >>$LOG_FILE + return 11; + else + echo "end:`date +%T`" >>$LOG_FILE + return 0; + fi + +} + +main "$@";
\ No newline at end of file diff --git a/sanity/system_light/scripts/ffsb_test.sh b/sanity/system_light/scripts/ffsb_test.sh deleted file mode 100755 index 6a9e8c2..0000000 --- a/sanity/system_light/scripts/ffsb_test.sh +++ /dev/null @@ -1,19 +0,0 @@ -#!/bin/bash - -function main () -{ - echo "start:`date +%T`" - time ffsb $FFSB_FILE 2>>$LOG_FILE 1>>$LOG_FILE - - - if [ $? -ne 0 ]; then - echo "end:`date +%T`" >>$LOG_FILE - return 11; - else - echo "end:`date +%T`" >>$LOG_FILE - return 0; - fi - -} - -main "$@";
\ No newline at end of file diff --git a/sanity/system_light/scripts/fileop_test.sh b/sanity/system_light/scripts/fileop/fileop.sh index d99992c..18e07e6 100755 --- a/sanity/system_light/scripts/fileop_test.sh +++ b/sanity/system_light/scripts/fileop/fileop.sh @@ -8,15 +8,15 @@ function main() #in this example it creates 2 directories.In each directory 2 subdirectories are created and in each subdirectory 2 files are created. - + if [ $? -ne 0 ]; then - echo "end:`date +%T`" - return 11; + echo "end:`date +%T`" + return 11; else - echo "end:`date +%T`" - return 0; + echo "end:`date +%T`" + return 0; fi - + } main "$@";
\ No newline at end of file diff --git a/sanity/system_light/scripts/fs_mark.sh b/sanity/system_light/scripts/fs_mark/fs_mark.sh index 53404c5..53404c5 100755 --- a/sanity/system_light/scripts/fs_mark.sh +++ b/sanity/system_light/scripts/fs_mark/fs_mark.sh diff --git a/sanity/system_light/scripts/fsx/fsx.sh b/sanity/system_light/scripts/fsx/fsx.sh new file mode 100755 index 0000000..d76b5c4 --- /dev/null +++ b/sanity/system_light/scripts/fsx/fsx.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +function main () +{ + $TOOLDIR/fsx_run.sh; + if [ $? -eq 0 ]; then + rm $FSX_FILE* && echo "Removed fsx file" + return 0; + else + return 1; + fi +} + +main "$@";
\ No newline at end of file diff --git a/sanity/system_light/scripts/fsx_test.sh b/sanity/system_light/scripts/fsx/fsx_run.sh index 8cf5fe8..4807001 100755 --- a/sanity/system_light/scripts/fsx_test.sh +++ b/sanity/system_light/scripts/fsx/fsx_run.sh @@ -6,14 +6,14 @@ function main () { cp $FSX_FILE_ORIG $FSX_FILE - echo "start:`date +%T`" + echo "start:`date +%T`" time fsx -R -W -N $NUM_OPS $FSX_FILE 2>>$LOG_FILE 1>>$LOG_FILE - - if [ $? -ne 0 ]; then - echo "end:`date +%T`" + + if [ $? -ne 0 ]; then + echo "end:`date +%T`" return 11; else - echo "end:`date +%T`" + echo "end:`date +%T`" return 0; fi } diff --git a/sanity/system_light/scripts/glusterfs_build.sh b/sanity/system_light/scripts/glusterfs_build/build_glusterfs.sh index d12e191..a07d0ad 100755 --- a/sanity/system_light/scripts/glusterfs_build.sh +++ b/sanity/system_light/scripts/glusterfs_build/build_glusterfs.sh @@ -6,7 +6,7 @@ function main () { echo "cloning from the git:`date +%T`" >>$LOG_FILE - time git clone git://git.gluster.com/glusterfs.git glusterfs.git 2>>$LOG_FILE 1>>$LOG_FILE + time git clone git://github.com/gluster/glusterfs.git glusterfs.git 2>>$LOG_FILE 1>>$LOG_FILE if [ $? -ne 0 ]; then echo "Cannot clone the git repository" tar -xvf $GLUSTERFS_TAR_FILE @@ -16,7 +16,7 @@ function main () cd $GLUSTERFS_DIR echo "running autogen.sh:`date +%T`" - time ./autogen.sh 2>>$LOG_FILE 1>>$LOG_FILE + time ./autogen.sh 2>>$LOG_FILE 1>>$LOG_FILE if [ $? -ne 0 ]; then echo "autogen failed:`date +%T`"; @@ -24,22 +24,22 @@ function main () fi echo "running configure:`date +%T`" - time ./configure 2>>$LOG_FILE 1>>$LOG_FILE + time ./configure 2>>$LOG_FILE 1>>$LOG_FILE if [ $? -ne 0 ]; then echo "configure failed:`date +%T`"; return 11; fi - + echo "running make:`date +%T`" - time make -j 32 2>>$LOG_FILE 1>>$LOG_FILE + time make -j 32 2>>$LOG_FILE 1>>$LOG_FILE if [ $? -ne 0 ]; then echo "make failed:`date +%T`"; - return 11; + return 11; else - echo "all successful:`date +%T`" + echo "all successful:`date +%T`" return 0; fi diff --git a/sanity/system_light/scripts/glusterfs_build/glusterfs_build.sh b/sanity/system_light/scripts/glusterfs_build/glusterfs_build.sh new file mode 100755 index 0000000..4a16874 --- /dev/null +++ b/sanity/system_light/scripts/glusterfs_build/glusterfs_build.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +function main () +{ + $TOOLDIR/build_glusterfs.sh; + if [ $? -eq 0 ]; then + rm -r $GLUSTERFS_DIR && echo "glusterfs directory removed"; + return 0; + else + return 1; + fi +} + +main "$@"
\ No newline at end of file diff --git a/sanity/system_light/scripts/iozone_test.sh b/sanity/system_light/scripts/iozone/iozone.sh index 870f18f..4c99a87 100755 --- a/sanity/system_light/scripts/iozone_test.sh +++ b/sanity/system_light/scripts/iozone/iozone.sh @@ -7,7 +7,8 @@ function main() { echo "start:`date +%T`" #time iozone -i 0 -i 1 -i 2 -i 3 -i 4 -i 5 -i 6 -i 7 -i 8 -i 9 -i 10 -i 11 -i 12 -s $FILE_SIZE -r $RECORD_SIZE 2>&1 1>>$LOG_FILE 1>>/tmp/iozone - time iozone -i 0 -i 1 -i 2 -i 3 -i 4 -i 5 -i 6 -i 7 -i 8 -i 9 -i 10 -i 11 -i 12 -s 1m -r 22k 2>&1 1>>$LOG_FILE 1>>/tmp/iozone + #time iozone -i 0 -i 1 -i 2 -i 3 -i 4 -i 5 -i 6 -i 7 -i 8 -i 9 -i 10 -i 11 -i 12 -s 1m -r 22k 2>&1 1>>$LOG_FILE 1>>/tmp/iozone + time iozone -a 2>&1 1>>$LOG_FILE 1>>/tmp/iozone if [ $? -ne 0 ]; then echo "end:`date +%T`" return 11; diff --git a/sanity/system_light/scripts/kernel.sh~ b/sanity/system_light/scripts/kernel.sh~ deleted file mode 100755 index 863e3b7..0000000 --- a/sanity/system_light/scripts/kernel.sh~ +++ /dev/null @@ -1,43 +0,0 @@ -#!/bin/bash - -#This script 1st searches in the pwd for the kernel tar file. If its not there then based on the value of the vriable KERNEL_PATH it either searches from the path given or searches in http://www.kernel.org - -function main() -{ - if [ -e "linux-$VERSION.tar.bz2" ] - then - echo "start:`date +%T`" - time $SCRIPTS_PATH/kernel_compile.sh linux-$VERSION.tar.bz2 2>>$LOG_FILE 1>>$LOG_FILE - if [ $? -ne 0 ]; then - err=$? - echo "end:`date +%T`" - return 11; - else - echo "end:`date +%T`" - return 0; - fi - elif [ -z "$KERNEL_PATH" ] - then - time $SCRIPTS_PATH/kernel_compile.sh 2>>$LOG_FILE 1>>$LOG_FILE - if [ $? -ne 0 ]; then - err=$? - echo "end:`date +%T`" - return 11; - else - echo "end:`date +%T`" - return 0; - fi - else - time $SCRIPTS_PATH/kernel_compile.sh $KERNEL_PATH 2>>$LOG_FILE 1>>$LOG_FILE - if [ $? -ne 0 ]; then - err=$? - echo "end:`date +%T`" - return 11; - else - echo "end:`date +%T`" - return 0; - fi - fi; -} - -main "$@";
\ No newline at end of file diff --git a/sanity/system_light/scripts/locks/locks.sh b/sanity/system_light/scripts/locks/locks.sh new file mode 100755 index 0000000..8dce136 --- /dev/null +++ b/sanity/system_light/scripts/locks/locks.sh @@ -0,0 +1,17 @@ +#!/bin/bash + +function main() +{ + locks_dirname=$(dirname $LOCK_BIN); + cp $locks_dirname/test $LOCK_TEST_FILE; + + $TOOLDIR/locks_run.sh; + if [ $? -eq 0 ]; then + rm $LOCK_TEST_FILE; + return 0; + else + return 1; + fi +} + +main "$@";
\ No newline at end of file diff --git a/sanity/system_light/scripts/locks.sh b/sanity/system_light/scripts/locks/locks_run.sh index 2c3db0e..cba6ede 100755 --- a/sanity/system_light/scripts/locks.sh +++ b/sanity/system_light/scripts/locks/locks_run.sh @@ -1,32 +1,32 @@ #!/bin/bash # GOAL : This tests try to stress fcntl locking functions. A master process set a lock on a file region (byte range locking). -# * Some slaves process tries to perform operations on this region, like read, write, set a new lock ... Expected results of this +# * Some slaves process tries to perform operations on this region, like read, write, set a new lock ... Expected results of this # * operations are known. If the operation result is the same as the expected one, the test sucess, else it fails. function main() { echo "testing the locking through concurrent processes:`date +%T`" - time $LOCK_BIN -n $CON_PROC -f $LOCK_TEST_FILE 2>>$LOG_FILE 1>>$LOG_FILE - + time $LOCK_BIN -n $CON_PROC -f $LOCK_TEST_FILE 2>>$LOG_FILE 1>>$LOG_FILE + if [ $? -ne 0 ]; then echo "locks by processes failed:`date +%T`" err=11 else - echo "end:`date +%T`" + echo "end:`date +%T`" err=0 fi - + echo "DONE" - + echo "testing the locking through concurrent threads:`date +%T`" - time $LOCK_BIN -n $CON_PROC -f $LOCK_TEST_FILE -T 2>>$LOG_FILE 1>>$LOG_FILE - + time $LOCK_BIN -n $CON_PROC -f $LOCK_TEST_FILE -T 2>>$LOG_FILE 1>>$LOG_FILE + if [ $? -ne 0 ]; then echo "locks by threads failed:`date +%T`" return 11; else - echo "end threads:`date +%T`" + echo "end threads:`date +%T`" if [ $err -ne 0 ]; then return 11; else diff --git a/sanity/system_light/scripts/ltp/ltp.sh b/sanity/system_light/scripts/ltp/ltp.sh new file mode 100755 index 0000000..819e689 --- /dev/null +++ b/sanity/system_light/scripts/ltp/ltp.sh @@ -0,0 +1,23 @@ +#!/bin/bash + +function main () +{ + + ### test this part later ##### + #old_PWD=$PWD; + ### test the above part later ##### + + mkdir ltp; + cd ltp; + + $TOOLDIR/ltp_run.sh; + if [ $? -eq 0 ]; then + rm -rfv ltp && echo "removed ltp directories"; + return 0; + else + return 1; + fi + +} + +main "$@"
\ No newline at end of file diff --git a/sanity/system_light/scripts/ltp_test.sh b/sanity/system_light/scripts/ltp/ltp_run.sh index a4242b6..515e421 100755 --- a/sanity/system_light/scripts/ltp_test.sh +++ b/sanity/system_light/scripts/ltp/ltp_run.sh @@ -13,7 +13,7 @@ run_fs_perms_simpletest () #cp $LTP_DIR/fs_perms/fs_perms.sh . cp $LTP_DIR/fs_perms/fs_perms . cp $LTP_DIR/fs_perms/testx . - time $LTP_DIR/fs_perms/fs_perms_simpletest.sh 2>>$LOG_FILE 1>>$LOG_FILE + time $LTP_DIR/fs_perms/fs_perms_simpletest.sh 2>>$LOG_FILE 1>>$LOG_FILE if [ $? -eq 0 ]; then let PASS=$PASS+1 @@ -27,7 +27,7 @@ run_fs_perms_simpletest () run_lftest () { echo "Executing $LTP_DIR/lftest/lftest" - time $LTP_DIR/lftest/lftest 5000 2>>$LOG_FILE 1>>$LOG_FILE + time $LTP_DIR/lftest/lftest 5000 2>>$LOG_FILE 1>>$LOG_FILE if [ $? -eq 0 ]; then let PASS=$PASS+1 @@ -55,8 +55,8 @@ run_stream1 () run_stream2 () { echo "Executing $LTP_DIR/stream/stream02" - time $LTP_DIR/stream/stream02 -c 22 -i 22 2>>$LOG_FILE 1>>$LOG_FILE - + time $LTP_DIR/stream/stream02 -c 22 -i 22 2>>$LOG_FILE 1>>$LOG_FILE + if [ $? -eq 0 ]; then let PASS=$PASS+1 echo $PASS @@ -69,8 +69,8 @@ run_stream2 () run_stream3 () { echo "Executing $LTP_DIR/stream/stream03" - time $LTP_DIR/stream/stream03 -c 22 -i 22 2>>$LOG_FILE 1>>$LOG_FILE - + time $LTP_DIR/stream/stream03 -c 22 -i 22 2>>$LOG_FILE 1>>$LOG_FILE + if [ $? -eq 0 ]; then let PASS=$PASS+1 echo $PASS @@ -83,8 +83,8 @@ run_stream3 () run_stream4 () { echo "Executing $LTP_DIR/stream/stream04" - time $LTP_DIR/stream/stream04 -c 22 -i 22 2>>$LOG_FILE 1>>$LOG_FILE - + time $LTP_DIR/stream/stream04 -c 22 -i 22 2>>$LOG_FILE 1>>$LOG_FILE + if [ $? -eq 0 ]; then let PASS=$PASS+1 echo $PASS @@ -97,7 +97,7 @@ run_stream4 () run_stream5 () { echo "Executing $LTP_DIR/stream/stream05" - time $LTP_DIR/stream/stream05 -c 22 -i 22 2>>$LOG_FILE 1>>$LOG_FILE + time $LTP_DIR/stream/stream05 -c 22 -i 22 2>>$LOG_FILE 1>>$LOG_FILE if [ $? -eq 0 ]; then let PASS=$PASS+1 @@ -120,7 +120,7 @@ run_stream () run_openfile () { echo "Executing $LTP_DIR/openfile/openfile" - time $LTP_DIR/openfile/openfile -f 100 -t 100 2>>$LOG_FILE 1>>$LOG_FILE + time $LTP_DIR/openfile/openfile -f 100 -t 100 2>>$LOG_FILE 1>>$LOG_FILE if [ $? -eq 0 ]; then let PASS=$PASS+1 @@ -134,7 +134,7 @@ run_openfile () run_inode1 () { echo "Executing $LTP_DIR/inode/inode01" - time $LTP_DIR/inode/inode01 2>>$LOG_FILE 1>>$LOG_FILE + time $LTP_DIR/inode/inode01 2>>$LOG_FILE 1>>$LOG_FILE if [ $? -eq 0 ]; then let PASS=$PASS+1 @@ -148,7 +148,7 @@ run_inode1 () run_inode2 () { echo "Executing $LTP_DIR/inode/inode02" - time $LTP_DIR/inode/inode02 2>>$LOG_FILE 1>>$LOG_FILE + time $LTP_DIR/inode/inode02 2>>$LOG_FILE 1>>$LOG_FILE if [ $? -eq 0 ]; then let PASS=$PASS+1 @@ -168,8 +168,8 @@ run_inode () run_ftest1 () { echo "Executing $LTP_DIR/ftest/ftest01" - time $LTP_DIR/ftest/ftest01 2>>$LOG_FILE 1>>$LOG_FILE - + time $LTP_DIR/ftest/ftest01 2>>$LOG_FILE 1>>$LOG_FILE + if [ $? -eq 0 ]; then let PASS=$PASS+1 echo $PASS @@ -182,8 +182,8 @@ run_ftest1 () run_ftest2 () { echo "Executing $LTP_DIR/ftest/ftest02" - time $LTP_DIR/ftest/ftest02 2>>$LOG_FILE 1>>$LOG_FILE - + time $LTP_DIR/ftest/ftest02 2>>$LOG_FILE 1>>$LOG_FILE + if [ $? -eq 0 ]; then let PASS=$PASS+1 echo $PASS @@ -196,7 +196,7 @@ run_ftest2 () run_ftest3 () { echo "Executing $LTP_DIR/ftest/ftest03" - time $LTP_DIR/ftest/ftest03 2>>$LOG_FILE 1>>$LOG_FILE + time $LTP_DIR/ftest/ftest03 2>>$LOG_FILE 1>>$LOG_FILE if [ $? -eq 0 ]; then let PASS=$PASS+1 @@ -210,7 +210,7 @@ run_ftest3 () run_ftest4 () { echo "Executing $LTP_DIR/ftest/ftest04" - time $LTP_DIR/ftest/ftest04 2>>$LOG_FILE 1>>$LOG_FILE + time $LTP_DIR/ftest/ftest04 2>>$LOG_FILE 1>>$LOG_FILE if [ $? -eq 0 ]; then let PASS=$PASS+1 @@ -224,8 +224,8 @@ run_ftest4 () run_ftest5 () { echo "Executing $LTP_DIR/ftest/ftest05" - time $LTP_DIR/ftest/ftest05 2>>$LOG_FILE 1>>$LOG_FILE - + time $LTP_DIR/ftest/ftest05 2>>$LOG_FILE 1>>$LOG_FILE + if [ $? -eq 0 ]; then let PASS=$PASS+1 echo $PASS @@ -238,8 +238,8 @@ run_ftest5 () run_ftest6 () { echo "Executing $LTP_DIR/ftest/ftest06" - time $LTP_DIR/ftest/ftest06 2>>$LOG_FILE 1>>$LOG_FILE - + time $LTP_DIR/ftest/ftest06 2>>$LOG_FILE 1>>$LOG_FILE + if [ $? -eq 0 ]; then let PASS=$PASS+1 echo $PASS @@ -252,8 +252,8 @@ run_ftest6 () run_ftest7 () { echo "Executing $LTP_DIR/ftest/ftest07" - time $LTP_DIR/ftest/ftest07 2>>$LOG_FILE 1>>$LOG_FILE - + time $LTP_DIR/ftest/ftest07 2>>$LOG_FILE 1>>$LOG_FILE + if [ $? -eq 0 ]; then let PASS=$PASS+1 echo $PASS @@ -266,7 +266,7 @@ run_ftest7 () run_ftest8 () { echo "Executing $LTP_DIR/ftest/ftest08" - time $LTP_DIR/ftest/ftest08 2>>$LOG_FILE 1>>$LOG_FILE + time $LTP_DIR/ftest/ftest08 2>>$LOG_FILE 1>>$LOG_FILE if [ $? -eq 0 ]; then let PASS=$PASS+1 @@ -292,7 +292,7 @@ run_ftest () run_fsstress () { echo "Executing $LTP_DIR/fsstress/fsstress" - time $LTP_DIR/fsstress/fsstress -d $THIS_TEST_DIR -l 22 -n 22 -p 22 2>>$LOG_FILE 1>>$LOG_FILE + time $LTP_DIR/fsstress/fsstress -d $THIS_TEST_DIR -l 22 -n 22 -p 22 2>>$LOG_FILE 1>>$LOG_FILE if [ $? -eq 0 ]; then let PASS=$PASS+1 @@ -306,7 +306,7 @@ run_fsstress () run_fs_inod () { echo "Executing $LTP_DIR/fs_inod/fs_inod" - time $LTP_DIR/fs_inod/fs_inod $THIS_TEST_DIR 22 22 22 2>>$LOG_FILE 1>>$LOG_FILE + time $LTP_DIR/fs_inod/fs_inod $THIS_TEST_DIR 22 22 22 2>>$LOG_FILE 1>>$LOG_FILE if [ $? -eq 0 ]; then let PASS=$PASS+1 diff --git a/sanity/system_light/scripts/ltp_test.sh~ b/sanity/system_light/scripts/ltp_test.sh~ deleted file mode 100755 index 090b663..0000000 --- a/sanity/system_light/scripts/ltp_test.sh~ +++ /dev/null @@ -1,335 +0,0 @@ -#!/bin/bash - -_init () -{ - TOTAL=20; - PASS=0; -} - - -run_fs_perms_simpletest () -{ - echo "Executing $LTP_DIR/fs_perms/fs_perms_simpletest.sh" -#cp $LTP_DIR/fs_perms/fs_perms.sh . - cp $LTP_DIR/fs_perms/fs_perms . - cp $LTP_DIR/fs_perms/testx . - time $LTP_DIR/fs_perms/fs_perms_simpletest.sh 2>>$LOG_FILE 1>>$LOG_FILE - - if [ $? -eq 0 ]; then - let PASS=$PASS+1 - echo $PASS - else - echo "fs_perms_simpletest failed:$(date +%T)" - echo $PASS - fi -} - -run_lftest () -{ - echo "Executing $LTP_DIR/lftest/lftest" - time $LTP_DIR/lftest/lftest 5000 2>>$LOG_FILE 1>>$LOG_FILE - - if [ $? -eq 0 ]; then - let PASS=$PASS+1 - echo $PASS - else - echo "lftest failed:$(date +%T)" - echo $PASS - fi -} - -run_stream1 () -{ - echo "Executing $LTP_DIR/stream/stream01" - time $LTP_DIR/stream/stream01 -c 22 -i 22 2>>$LOG_FILE 1>>$LOG_FILE - - if [ $? -eq 0 ]; then - let PASS=$PASS+1 - echo $PASS - else - echo "stream01 failed:$(date +%T)" - echo $PASS - fi -} - -run_stream2 () -{ - echo "Executing $LTP_DIR/stream/stream02" - time $LTP_DIR/stream/stream02 -c 22 -i 22 2>>$LOG_FILE 1>>$LOG_FILE - - if [ $? -eq 0 ]; then - let PASS=$PASS+1 - echo $PASS - else - echo "stream02 failed:$(date +%T)" - echo $PASS - fi -} - -run_stream2 () -{ - echo "Executing $LTP_DIR/stream/stream03" - time $LTP_DIR/stream/stream03 -c 22 -i 22 2>>$LOG_FILE 1>>$LOG_FILE - - if [ $? -eq 0 ]; then - let PASS=$PASS+1 - echo $PASS - else - echo "stream03 failed:$(date +%T)" - echo $PASS - fi -} - -run_stream4 () -{ - echo "Executing $LTP_DIR/stream/stream04" - time $LTP_DIR/stream/stream04 -c 22 -i 22 2>>$LOG_FILE 1>>$LOG_FILE - - if [ $? -eq 0 ]; then - let PASS=$PASS+1 - echo $PASS - else - echo "stream04 failed:$(date +%T)" - echo $PASS - fi -} - -run_stream5 () -{ - echo "Executing $LTP_DIR/stream/stream05" - time $LTP_DIR/stream/stream05 -c 22 -i 22 2>>$LOG_FILE 1>>$LOG_FILE - - if [ $? -eq 0 ]; then - let PASS=$PASS+1 - echo $PASS - else - echo "stream05 failed:$(date +%T)" - echo $PASS - fi -} - -run_stream () -{ - run_stream1; - run_stream2; - run_stream3; - run_stream4; - run_stream5; -} - -run_openfile () -{ - echo "Executing $LTP_DIR/openfile/openfile" - time $LTP_DIR/openfile/openfile -f 100 -t 100 2>>$LOG_FILE 1>>$LOG_FILE - - if [ $? -eq 0 ]; then - let PASS=$PASS+1 - echo $PASS - else - echo "openfile failed:$(date +%T)" - echo $PASS - fi -} - -run_inode1 () -{ - echo "Executing $LTP_DIR/inode/inode01" - time $LTP_DIR/inode/inode01 2>>$LOG_FILE 1>>$LOG_FILE - - if [ $? -eq 0 ]; then - let PASS=$PASS+1 - echo $PASS - else - echo "inode01 failed:$(date +%T)" - echo $PASS - fi -} - -run_inode2 () -{ - echo "Executing $LTP_DIR/inode/inode02" - time $LTP_DIR/inode/inode02 2>>$LOG_FILE 1>>$LOG_FILE - - if [ $? -eq 0 ]; then - let PASS=$PASS+1 - echo $PASS - else - echo "inode02 failed:$(date +%T)" - echo $PASS - fi -} - -run_inode () -{ - run_inode1; - run_inode2; -} - -run_ftest1 () -{ - echo "Executing $LTP_DIR/ftest/ftest01" - time $LTP_DIR/ftest/ftest01 2>>$LOG_FILE 1>>$LOG_FILE - - if [ $? -eq 0 ]; then - let PASS=$PASS+1 - echo $PASS - else - echo "ftest01 failed:$(date +%T)" - echo $PASS - fi -} - -run_ftest2 () -{ - echo "Executing $LTP_DIR/ftest/ftest02" - time $LTP_DIR/ftest/ftest02 2>>$LOG_FILE 1>>$LOG_FILE - - if [ $? -eq 0 ]; then - let PASS=$PASS+1 - echo $PASS - else - echo "ftest02 failed:$(date +%T)" - echo $PASS - fi -} - -run_ftest3 () -{ - echo "Executing $LTP_DIR/ftest/ftest03" - time $LTP_DIR/ftest/ftest03 2>>$LOG_FILE 1>>$LOG_FILE - - if [ $? -eq 0 ]; then - let PASS=$PASS+1 - echo $PASS - else - echo "ftest03 failed:$(date +%T)" - echo $PASS - fi -} - -run_ftest4 () -{ - echo "Executing $LTP_DIR/ftest/ftest04" - time $LTP_DIR/ftest/ftest04 2>>$LOG_FILE 1>>$LOG_FILE - - if [ $? -eq 0 ]; then - let PASS=$PASS+1 - echo $PASS - else - echo "ftest04 failed:$(date +%T)" - echo $PASS - fi -} - -run_ftest5 () -{ - echo "Executing $LTP_DIR/ftest/ftest05" - time $LTP_DIR/ftest/ftest05 2>>$LOG_FILE 1>>$LOG_FILE - - if [ $? -eq 0 ]; then - let PASS=$PASS+1 - echo $PASS - else - echo "ftest05 failed:$(date +%T)" - echo $PASS - fi -} - -run_ftest6 () -{ - echo "Executing $LTP_DIR/ftest/ftest06" - time $LTP_DIR/ftest/ftest06 2>>$LOG_FILE 1>>$LOG_FILE - - if [ $? -eq 0 ]; then - let PASS=$PASS+1 - echo $PASS - else - echo "ftest06 failed:$(date +%T)" - echo $PASS - fi -} - -run_ftest7 () -{ - echo "Executing $LTP_DIR/ftest/ftest07" - time $LTP_DIR/ftest/ftest07 2>>$LOG_FILE 1>>$LOG_FILE - - if [ $? -eq 0 ]; then - let PASS=$PASS+1 - echo $PASS - else - echo "ftest07 failed:$(date +%T)" - echo $PASS - fi -} - -run_ftest8 () -{ - echo "Executing $LTP_DIR/ftest/ftest08" - time $LTP_DIR/ftest/ftest08 2>>$LOG_FILE 1>>$LOG_FILE - - if [ $? -eq 0 ]; then - let PASS=$PASS+1 - echo $PASS - else - echo "ftest08 failed:$(date +%T)" - echo $PASS - fi -} - -run_ftest () -{ - run_ftest1; - run_ftest2; - run_ftest3; - run_ftest4; - run_ftest5; - run_ftest6; - run_ftest7; - run_ftest8; -} - -run_fsstress () -{ - echo "Executing $LTP_DIR/fsstress/fsstress" - time $LTP_DIR/fsstress/fsstress -d $THIS_TEST_DIR -l 22 -n 22 -p 22 2>>$LOG_FILE 1>>$LOG_FILE - - if [ $? -eq 0 ]; then - let PASS=$PASS+1 - echo $PASS - else - echo "fsstress failed:$(date +%T)" - echo $PASS - fi -} - -run_fs_inod () -{ - echo "Executing $LTP_DIR/fs_inod/fs_inod" - time $LTP_DIR/fs_inod/fs_inod $THIS_TEST_DIR 22 22 22 2>>$LOG_FILE 1>>$LOG_FILE - - if [ $? -eq 0 ]; then - let PASS=$PASS+1 - echo $PASS - else - echo "fs_inod failed:$(date +%T)" - echo $PASS - fi -} - -main () -{ - echo "start ltp tests:$(date +%T)"; - run_fs_perms_simpletest; - run_lftest; - run_stream; - run_openfile; - run_inode; - run_ftest; - run_fsstress; - run_fs_inod; - echo "end ltp tests: $(date +%T)"; - echo "total $PASS tests were successful out of $TOTAL tests" -} - -_init && main "$@"
\ No newline at end of file diff --git a/sanity/system_light/scripts/multiple_files.sh b/sanity/system_light/scripts/multiple_files/multiple_files.sh index 8f36b5f..94b859e 100755 --- a/sanity/system_light/scripts/multiple_files.sh +++ b/sanity/system_light/scripts/multiple_files/multiple_files.sh @@ -6,16 +6,16 @@ function main () { mkdir test cd test - + echo "start:`date +%T`" for i in `seq 1 $NUM_OF_FILES` ; do dd if=/dev/zero of=file$i bs=10K count=1 1>/dev/null 2>/dev/null - done + done echo "end:`date +%T`" echo "Creation of $NUM_OF_FILES done" - - TOTAL_FILES=$(ls | wc -l) + + TOTAL_FILES=$(ls | wc -l) if [ $TOTAL_FILES -ne $NUM_OF_FILES ]; then echo "Total files created is not $NUM_OF_FILES" @@ -23,9 +23,9 @@ function main () else err=0 fi - + echo "Removing all the files" - + for i in `seq 1 $NUM_OF_FILES` ; do rm file$i done diff --git a/sanity/system_light/scripts/openssl/openssl.sh b/sanity/system_light/scripts/openssl/openssl.sh new file mode 100755 index 0000000..5289945 --- /dev/null +++ b/sanity/system_light/scripts/openssl/openssl.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +function main() +{ + $TOOLDIR/openssl_run.sh; + if [ $? -eq 0 ]; then + rm -rf openssl* && echo "removed openssl directories and files"; + return 0; + else + return 1; + fi +} + +main "$@"; diff --git a/sanity/system_light/scripts/open.sh b/sanity/system_light/scripts/openssl/openssl_run.sh index 366c3a7..6ec8893 100755 --- a/sanity/system_light/scripts/open.sh +++ b/sanity/system_light/scripts/openssl/openssl_run.sh @@ -6,35 +6,35 @@ function main() { echo "untarring the openssl tarball" echo "start:`date +%T`" - time tar -xvf $OPENSSL_TAR_FILE 2>>$LOG_FILE 1>>$LOG_FILE + time tar -xvf $OPENSSL_TAR_FILE 2>>$LOG_FILE 1>>$LOG_FILE cd $OPENSSL_DIR if [ -z "$PREFIX" -a -z "$OPENSSLDIR" ]; then echo "executing ./config:`date +%T`" - time ./config 2>>$LOG_FILE 1>>$LOG_FILE + time ./config 2>>$LOG_FILE 1>>$LOG_FILE if [ $? -ne 0 ]; then echo "./config failed:`date +%T`" return 11; - fi + fi else echo "executing ./config with prefix:`date +%T`" - time ./config --prefix=$PREFIX --openssldir=$OPENSSLDIR 2>>$LOG_FILE 1>>$LOG_FILE + time ./config --prefix=$PREFIX --openssldir=$OPENSSLDIR 2>>$LOG_FILE 1>>$LOG_FILE if [ $? -ne 0 ]; then echo "config prefix failed:`date +%T`" return 11; fi fi - + echo "executing make:`date +%T`" - time make 2>>$LOG_FILE 1>>$LOG_FILE - if [ $? -ne 0 ]; then + time make 2>>$LOG_FILE 1>>$LOG_FILE + if [ $? -ne 0 ]; then echo "make failed:`date +%T`" return 11 fi - + echo "executing make test:`date +%T`" - time make test 2>>$LOG_FILE 1>>$LOG_FILE - if [ $? -ne 0 ]; then + time make test 2>>$LOG_FILE 1>>$LOG_FILE + if [ $? -ne 0 ]; then echo "make test failed:`date +%T`" return 11; else diff --git a/sanity/system_light/scripts/posix_compliance.sh b/sanity/system_light/scripts/posix_compliance.sh deleted file mode 100755 index 69c8dcd..0000000 --- a/sanity/system_light/scripts/posix_compliance.sh +++ /dev/null @@ -1,6 +0,0 @@ -#! /bin/bash - -(time prove -r $DIR/tests "$@" | tee -a /tmp/posix - -grep FAILED /tmp/posix 2> /dev/null) 2>>$LOG_FILE 1>>$LOG_FILE - diff --git a/sanity/system_light/scripts/posix_compliance/posix_compliance.sh b/sanity/system_light/scripts/posix_compliance/posix_compliance.sh new file mode 100755 index 0000000..203c04b --- /dev/null +++ b/sanity/system_light/scripts/posix_compliance/posix_compliance.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +function main () +{ + $TOOLDIR/posix_compliance_run.sh; + if [ $? -eq 0 ]; then + rm -rf fstest_* && echo "removed posix compliance directories"; + return 0; + else + return 1; + fi +} + +main "$@"
\ No newline at end of file diff --git a/sanity/system_light/scripts/posix_compliance/posix_compliance_run.sh b/sanity/system_light/scripts/posix_compliance/posix_compliance_run.sh new file mode 100755 index 0000000..c1301ff --- /dev/null +++ b/sanity/system_light/scripts/posix_compliance/posix_compliance_run.sh @@ -0,0 +1,19 @@ +#! /bin/bash + +function main () +{ + echo "start: `date +%T`"; + time prove -r $DIR/tests "$@" | tee -a /tmp/posix 2>/dev/null 1>/dev/null; + cat /tmp/posix && sleep 2; + grep FAILED /tmp/posix 2>>$LOG_FILE 1>>$LOG_FILE + + if [ $? -ne 0 ]; then + echo "end: `date +%T`"; + return 0; + else + echo "end: `date +%T`"; + return 1; + fi +} + +main "$@"
\ No newline at end of file diff --git a/sanity/system_light/scripts/postmark.sh b/sanity/system_light/scripts/postmark/postmark.sh index 51ce440..51ce440 100755 --- a/sanity/system_light/scripts/postmark.sh +++ b/sanity/system_light/scripts/postmark/postmark.sh diff --git a/sanity/system_light/scripts/read_large/read_large.sh b/sanity/system_light/scripts/read_large/read_large.sh new file mode 100755 index 0000000..98d40ce --- /dev/null +++ b/sanity/system_light/scripts/read_large/read_large.sh @@ -0,0 +1,15 @@ +#!/bin/bash + +function main () +{ + $TOOLDIR/read_large_run.sh; + + if [ $? -eq 0 ]; then + rm $PWD/$OF && echo "Removed large file"; + return 0; + else + return 1; + fi +} + +main "$@"
\ No newline at end of file diff --git a/sanity/system_light/scripts/read_large.sh b/sanity/system_light/scripts/read_large/read_large_run.sh index 7a315a0..457581f 100755 --- a/sanity/system_light/scripts/read_large.sh +++ b/sanity/system_light/scripts/read_large/read_large_run.sh @@ -2,6 +2,8 @@ function main() { + dd if=/dev/zero of=$PWD/$OF bs=$BS_SIZE count=$DD_CNT; + echo "start:`date +%T`" time cat $LARGE_FILE_SOURCE > $LARGE_FILE_DEST 2>>$LOG_FILE 1>>$LOG_FILE diff --git a/sanity/system_light/scripts/rpc-fops.sh~ b/sanity/system_light/scripts/rpc-fops.sh~ deleted file mode 100644 index 07d2b89..0000000 --- a/sanity/system_light/scripts/rpc-fops.sh~ +++ /dev/null @@ -1,20 +0,0 @@ -#!/bin/bash - -function main () -{ - set -x; - SCRIPTS_DIR=$(dirname $0); - - echo "start: $(date +%T)"; - #time $SCRIPTS_PATH/rpc-coverage.sh $THIS_TEST_DIR 2>>$LOG_FILE 1>>$LOG_FILE; - time $SCRIPTS_DIR/rpc-coverage.sh $THIS_TEST_DIR 2>>$LOG_FILE 1>>$LOG_FILE; - if [ $? -ne 0 ]; then - echo "end: $(date +%T)"; - return 22; - else - echo "end: $(date +%T)"; - return 0; - fi -} - -main "$@"
\ No newline at end of file diff --git a/sanity/system_light/scripts/rpc-coverage.sh b/sanity/system_light/scripts/rpc/rpc-coverage.sh index ebb92a2..ebb92a2 100755 --- a/sanity/system_light/scripts/rpc-coverage.sh +++ b/sanity/system_light/scripts/rpc/rpc-coverage.sh diff --git a/sanity/system_light/scripts/rpc-fops.sh b/sanity/system_light/scripts/rpc/rpc.sh index 07ad9d6..07ad9d6 100755 --- a/sanity/system_light/scripts/rpc-fops.sh +++ b/sanity/system_light/scripts/rpc/rpc.sh diff --git a/sanity/system_light/scripts/syscallbench.sh b/sanity/system_light/scripts/syscallbench/syscallbench.sh index c257e41..ac49b23 100755 --- a/sanity/system_light/scripts/syscallbench.sh +++ b/sanity/system_light/scripts/syscallbench/syscallbench.sh @@ -1,7 +1,7 @@ #!/bin/bash function update_tools() -{ +{ cd $TOOL_DIR echo "In $TOOL_DIR" git pull @@ -25,7 +25,7 @@ function main() echo "start:`date +%T`" syscall_test; if [ $? -ne 0 ]; then - echo "end:`date +%T`" + echo "end:`date +%T`" return 11; fi } diff --git a/sanity/system_light/scripts/tiobench.sh b/sanity/system_light/scripts/tiobench/tiobench.sh index 593bf6a..1a86bf3 100755 --- a/sanity/system_light/scripts/tiobench.sh +++ b/sanity/system_light/scripts/tiobench/tiobench.sh @@ -4,12 +4,12 @@ function main () { echo "start:`date +%T`" time $TIO_BIN -d $THIS_TEST_DIR -W -S -c 2>>$LOG_FILE 1>>$LOG_FILE - + if [ $? -ne 0 ]; then - echo "end:`date +%T`" + echo "end:`date +%T`" return 11; else - echo "end:`date +%T`" + echo "end:`date +%T`" return 0; fi } diff --git a/scalability/README b/scalability/README new file mode 100644 index 0000000..2828682 --- /dev/null +++ b/scalability/README @@ -0,0 +1,40 @@ +Clustered mode iozone setup: +---------------------------- +Before continuing reading, please make sure the iozone version is greater +than 3.394 to run iozone in clustered mode. + +Iozone needs either passwordless rsh or passwordless ssh to be setup to run in +clustered mode. +By default, iozone uses rsh. To force iozone to use ssh, the shell variable RSH +needs to be set to 'ssh'. The command line listed earlier uses this feature. + +Contents of the iconf file used [ iozone cluster mode configuration file]: +fusion1 /mnt/fioperf /usr/bin/iozone +fusion1 /mnt/fioperf /usr/bin/iozone +fusion2 /mnt/fioperf /usr/bin/iozone +fusion2 /mnt/fioperf /usr/bin/iozone +fusion3 /mnt/fioperf /usr/bin/iozone +fusion3 /mnt/fioperf /usr/bin/iozone +fusion4 /mnt/fioperf /usr/bin/iozone +fusion4 /mnt/fioperf /usr/bin/iozone +fusion5 /mnt/fioperf /usr/bin/iozone +fusion5 /mnt/fioperf /usr/bin/iozone + +[Note that each entry is repeated twice so that two child processes +could be run on each system.] + +Pitfalls: + +1.When the config file uses IP addresses instead of hostnames, +iozone fails to proceed. For a successful iozone run in clustered mode, +one needs to populate the config file with hostnames. +For name resolution, one could use /etc/hosts. + +2.Even after successfully configuring passwordless rsh/ssh, +if you are unable to run iozone, make sure you flush firewall rules + +To look at the configured firewall rules, one could use: +# iptables -L + +To flush the rules, once could use: +# iptables -F
\ No newline at end of file diff --git a/scalability/iconf b/scalability/iconf new file mode 100644 index 0000000..6d4a66c --- /dev/null +++ b/scalability/iconf @@ -0,0 +1,5 @@ +#Example config file +Engg.client1 /data/perfmount /data/iozone +Engg.client14 /data/perfmount /data/iozone +Engg.client15 /data/perfmount /data/iozone +Engg.client16 /data/perfmount /data/iozone
\ No newline at end of file diff --git a/scalability/run_iozone b/scalability/run_iozone new file mode 100755 index 0000000..5d0457c --- /dev/null +++ b/scalability/run_iozone @@ -0,0 +1,11 @@ +#!/bin/bash + +for size in 500m 1g 10g +do + for bs in 16k 32k 64k 128k 256k 512k + do + echo "Filesize - $size, blksize - $bs" + RSH="ssh" iozone -t 4 -i 0 -i 1 -r $bs -s $size -+m iconf -b iozone.$bs-$size.xls + sleep 10 + done +done |