summaryrefslogtreecommitdiffstats
path: root/tests/functional/bvt/test_bvt_lite_and_plus.py
blob: 074d57910c23cf73e6e088a69464505d5e2474e2 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
#!/usr/bin/env python
#  Copyright (C) 2015-2016  Red Hat, Inc. <http://www.redhat.com>
#
#  This program is free software; you can redistribute it and/or modify
#  it under the terms of the GNU General Public License as published by
#  the Free Software Foundation; either version 2 of the License, or
#  any later version.
#
#  This program is distributed in the hope that it will be useful,
#  but WITHOUT ANY WARRANTY; without even the implied warranty of
#  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
#  GNU General Public License for more details.
#
#  You should have received a copy of the GNU General Public License along
#  with this program; if not, write to the Free Software Foundation, Inc.,
#  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.

import os
from glusto.core import Glusto as g
from glustolibs.gluster.gluster_base_class import (GlusterVolumeBaseClass,
                                                   runs_on)


@runs_on([['replicated', 'distributed', 'distributed-replicated',
           'dispersed', 'distributed-dispersed'],
          ['glusterfs', 'nfs', 'cifs']])
class BvtTestsClass(GlusterVolumeBaseClass):
    """Class containing case for : BVT Lite and BVT Plus.

    BVT Lite: Run the case on dis-rep volume with glusterfs, nfs, cifs
        protocols

    BVT Plus: Run the case on all volume types and all protocol types
        combinations
    """
    @classmethod
    def setUpClass(cls):
        """Setup Volume and Mounts.
        """
        g.log.info("Starting %s:" % cls.__name__)
        GlusterVolumeBaseClass.setUpClass.im_func(cls)

        # Upload io scripts
        cls.script_local_path = ("/usr/share/glustolibs/io/"
                                 "scripts/file_dir_ops.py")
        cls.script_upload_path = "/tmp/file_dir_ops.py"
        ret = os.path.exists(cls.script_local_path)
        if not ret:
            raise Exception("Unable to find the io scripts")

        for client in cls.clients:
            g.upload(client, cls.script_local_path, cls.script_upload_path)
            g.run(client, "ls -l %s" % cls.script_upload_path)
            g.run(client, "chmod +x %s" % cls.script_upload_path)
            g.run(client, "ls -l %s" % cls.script_upload_path)

    def setUp(self):
        pass

    def test_bvt(self):
        """Test IO from the mounts.
        """
        g.log.info("Starting Test: %s on %s %s" %
                   (self.id(), self.volume_type, self.mount_type))

        # Get stat of mount before the IO
        for mount_obj in self.mounts:
            cmd = "mount | grep %s" % mount_obj.mountpoint
            ret, out, err = g.run(mount_obj.client_system, cmd)
            cmd = "df -h %s" % mount_obj.mountpoint
            ret, out, err = g.run(mount_obj.client_system, cmd)
            cmd = "ls -ld %s" % mount_obj.mountpoint
            ret, out, err = g.run(mount_obj.client_system, cmd)
            cmd = "stat %s" % mount_obj.mountpoint
            ret, out, err = g.run(mount_obj.client_system, cmd)

        # Start IO on all mounts.
        all_mounts_procs = []
        count = 1
        for mount_obj in self.mounts:
            cmd = ("python %s create_deep_dirs_with_files "
                   "--dirname-start-num %d "
                   "--dir-depth 2 "
                   "--dir-length 10 "
                   "--max-num-of-dirs 5 "
                   "--num-of-files 5 %s" % (self.script_upload_path,
                                            count, mount_obj.mountpoint))
            proc = g.run_async(mount_obj.client_system, cmd,
                               user=mount_obj.user)
            all_mounts_procs.append(proc)
            count = count + 10

        # Get IO status
        rc = True
        for i, proc in enumerate(all_mounts_procs):
            ret, _, _ = proc.async_communicate()
            if ret != 0:
                g.log.error("IO Failed on %s:%s" %
                            (self.mounts[i].client_system,
                             self.mounts[i].mountpoint))
                rc = False
        assert (rc is True), "IO failed on some of the clients"

        # Get stat of all the files/dirs created.
        all_mounts_procs = []
        for mount_obj in self.mounts:
            cmd = ("python %s stat "
                   "-R %s" % (self.script_upload_path, mount_obj.mountpoint))
            proc = g.run_async(mount_obj.client_system, cmd,
                               user=mount_obj.user)
            all_mounts_procs.append(proc)
        rc = True
        for i, proc in enumerate(all_mounts_procs):
            ret, _, _ = proc.async_communicate()
            if ret != 0:
                g.log.error("Stat of files and dirs under %s:%s Failed" %
                            (self.mounts[i].client_system,
                             self.mounts[i].mountpoint))
                rc = False
        assert (rc is True), "Stat failed on some of the clients"

    def tearDown(self):
        pass

    @classmethod
    def tearDownClass(cls):
        """Cleanup mount and Cleanup the volume
        """
        GlusterVolumeBaseClass.tearDownClass.im_func(cls)