From 18445ae1a94366c955cc7626fb8ec749dedcf73e Mon Sep 17 00:00:00 2001 From: Shwetha-H-Panduranga Date: Tue, 6 Dec 2011 14:11:13 +0530 Subject: Adding New/Updated Automation Files --- TestUnits/replicate/self_heal/Main.py | 69 ++++++++++++++++++++ TestUnits/replicate/self_heal/testcases.py | 96 ++++++++++++++++++++++++++++ TestUnits/replicate/self_heal/testcaseslist | 9 +++ TestUnits/replicate/self_heal/testenv.cfg | 98 +++++++++++++++++++++++++++++ 4 files changed, 272 insertions(+) create mode 100644 TestUnits/replicate/self_heal/Main.py create mode 100644 TestUnits/replicate/self_heal/testcases.py create mode 100644 TestUnits/replicate/self_heal/testcaseslist create mode 100644 TestUnits/replicate/self_heal/testenv.cfg (limited to 'TestUnits/replicate/self_heal') diff --git a/TestUnits/replicate/self_heal/Main.py b/TestUnits/replicate/self_heal/Main.py new file mode 100644 index 0000000..4e37265 --- /dev/null +++ b/TestUnits/replicate/self_heal/Main.py @@ -0,0 +1,69 @@ +"""Main module for the testunit. + +This module "main" function is called from atfexecute to execute the testunit. +""" +import parser +import atfutils +import glusterutils +import managerutils +import testcases + +def initialize(filename): + """ + """ + return_status = 1 + if parser.parse_testenv_configfile(filename): + return return_status + if managerutils.ssh_connect_allhosts(): + return return_status + + return 0 + +def setup(): + """ + """ + return_status = 1 + if atfutils.set_active_volume("volume1"): + return return_status + return 0 + +def execute(*testcaselist): + """ + """ + passedtestcases = 0 + failedtestcases = 0 + selectedtestcases = len(testcaselist) + + for testcase in testcaselist: + function_obj = getattr(testcases, testcase) + if function_obj: + print "Starting Test: ' %s '" % testcase + return_status = function_obj() + if return_status: + print "TestCase ' %s ' Failed" % testcase + failedtestcases += 1 + else: + print "TestCase ' %s ' Passed" % testcase + passedtestcases += 1 + print "Ending Test: ' %s '" % testcase + else: + print "TestCase %s not defined in 'testcases' module" % testcase + continue + + print "Selected %d : Passed %d, Failed %d" % (selectedtestcases, + passedtestcases, + failedtestcases) + +def cleanup(): + """ + """ + pass + +def main(testenvfile, *testcaselist): + """ + """ + initialize(testenvfile) + setup() + execute(*testcaselist) + cleanup() + return diff --git a/TestUnits/replicate/self_heal/testcases.py b/TestUnits/replicate/self_heal/testcases.py new file mode 100644 index 0000000..0015e5f --- /dev/null +++ b/TestUnits/replicate/self_heal/testcases.py @@ -0,0 +1,96 @@ +"""testcases for replicate/self-heal +""" + +import sys +import time +import hostutils +import managerutils +import glusterutils +import atfutils +import clientutils +import serverutils +import parser + +def reset_testenv(): + return_status = 1 + if clientutils.umountall(): + return return_status + glusterutils.volume_stop("server1", force=True) + glusterutils.volume_delete("server1") + glusterutils.glusterd_stop_allservers() + glusterutils.glusterd_remove_dir_allservers() + glusterutils.glusterd_remove_logs_allservers() + return 0 + +def setup_testenv(): + """ + """ + return_status = 1 + if glusterutils.glusterd_start_allservers(force=True): + return return_status + if glusterutils.peer_probe("server1"): + return return_status + if glusterutils.create_brick_allservers(): + return return_status + if glusterutils.volume_create("server1"): + return 1 + glusterutils.volume_set("server1", key="diagnostics.client-log-level", value="DEBUG") + if glusterutils.volume_start("server1"): + return 1 + if clientutils.mountall(): + return 1 + return 0 + +def test001(): + """ + Description: Test for failure when the entry does not exist + """ + return_status = 1 + if reset_testenv(): + return return_status + if setup_testenv(): + return return_status + return_status = clientutils.execute_on_mount("mount1", "ls abcd") + + if return_status: + test_status = 0 + else: + test_status = 1 + + return test_status + +def test002(): + """ + Description: Test for estale when the fs is stale + """ + return_status = 1 + if reset_testenv(): + return return_status + if setup_testenv(): + return return_status + + clientutils.execute_on_mount("mount1", "touch file") + serverutils.execute_on_brick("brick1", + "setfattr -n trusted.gfid -v 0sBfz5vAdHTEK1GZ99qjqTIg== file") + return_status = clientutils.execute_on_mount("mount1", "find file | xargs stat") + + if return_status: + test_status = 0 + else: + test_status = 1 + + return test_status + + + + + + + + + + + + + + diff --git a/TestUnits/replicate/self_heal/testcaseslist b/TestUnits/replicate/self_heal/testcaseslist new file mode 100644 index 0000000..ba90492 --- /dev/null +++ b/TestUnits/replicate/self_heal/testcaseslist @@ -0,0 +1,9 @@ +################################################################# +# List of testcases for the self-heal feature of afr. +# TestCaseId : Version : Keyword +################################################################## +test001 : >= 3.2 : art +test002 : >= 3.2 : art + + + diff --git a/TestUnits/replicate/self_heal/testenv.cfg b/TestUnits/replicate/self_heal/testenv.cfg new file mode 100644 index 0000000..a8b15c8 --- /dev/null +++ b/TestUnits/replicate/self_heal/testenv.cfg @@ -0,0 +1,98 @@ +[DEFAULT] +user = root +password = syst3m +glusterversion = 3.2.5 +installpath = +downloadpath = + +# ExportDir Section. +# Necessary Options: dir +# Optional: fstype, device +[export1] +dir = /export +fstype = xfs +device = /dev/sdb1 + +[export2] +dir = /export +fstype = xfs +device = /dev/sda1 + +# Server Section +# Necessary Options: hostname, username, password, glusterversion. +# The username, password, glusterversion defaults from DEFAULTS Section and +# can be overridden +# Optional: installpath +[server1] +hostname = 10.1.11.110 + +[server2] +hostname = 10.1.11.111 + +# Brick Section +# Necessary Options: hostname, path +[brick1] +hostname = server1.hostname +path = export1 + +[brick2] +hostname = server2.hostname +path = export2 + + +# Volume Section +# Necessary Options: volumename, volumetype, count, transporttype, bricks +[volume1] +volumename = replicate +volumetype = replica +count = 2 +transporttype = tcp +bricks = brick1, brick2 + +# Client Section +# Necessary Options: hostname, username, password, glusterversion. +# The username, password, glusterversion defaults from DEFAULTS Section and +# can be overridden +# Optional: installpath +[client1] +hostname = 10.1.11.109 + +# MountDevice Section +# Necessary Options: hostname, volume +# The Server1.hostname could be a VIP also. Need not be a server hostname +# IN a general case,(without NFS) we keep the 1st server from serverpool +# The volume specified in this section is the "active_volume" onto which all +# clients will be mounting to. This active volume and hostname can be changed +# during testrun. +[mountdevice1] +hostname = server1.hostname +volumename = volume1.volumename + +# Mount Section +# addMount(dir, type, client, device=master.volume, logfile=None) +[mount1] +dir = /mnt/replicate1 +client = client1 +device = mountdevice1 +type = glusterfs +logfile = +options = + +[mount2] +dir = /mnt/replicate2 +client = client1 +device = mountdevice1 +type = glusterfs + +[mount3] +dir = /mnt/replicate3 +client = client1 +device = mountdevice1 +type = glusterfs + + + + + + + -- cgit