summaryrefslogtreecommitdiffstats
path: root/tests/functional/arbiter/test_mount_point_while_deleting_files.py
blob: e1ac94ac567cd9b8371e3b477ff50a0060ad00d8 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
#  Copyright (C) 2016-2020  Red Hat, Inc. <http://www.redhat.com>
#
#  This program is free software; you can redistribute it and/or modify
#  it under the terms of the GNU General Public License as published by
#  the Free Software Foundation; either version 2 of the License, or
#  any later version.
#
#  This program is distributed in the hope that it will be useful,
#  but WITHOUT ANY WARRANTY; without even the implied warranty of
#  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
#  GNU General Public License for more details.
#
#  You should have received a copy of the GNU General Public License along
#  with this program; if not, write to the Free Software Foundation, Inc.,
#  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.

import os

from glusto.core import Glusto as g

from glustolibs.gluster.exceptions import ExecutionError
from glustolibs.gluster.glusterdir import rmdir
from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
from glustolibs.gluster.volume_libs import setup_volume, cleanup_volume
from glustolibs.gluster.volume_ops import get_volume_list
from glustolibs.gluster.brick_libs import (bring_bricks_offline,
                                           select_bricks_to_bring_offline)
from glustolibs.io.utils import (validate_io_procs,
                                 list_all_files_and_dirs_mounts,
                                 wait_for_io_to_complete)
from glustolibs.gluster.mount_ops import (mount_volume,
                                          umount_volume,
                                          create_mount_objs)
from glustolibs.misc.misc_libs import upload_scripts


@runs_on([['arbiter'],
          ['glusterfs']])
class VolumeSetDataSelfHealTests(GlusterBaseClass):
    @classmethod
    def setUpClass(cls):
        # Calling GlusterBaseClass setUpClass
        cls.get_super_method(cls, 'setUpClass')()

        # Upload io scripts for running IO on mounts
        g.log.info("Upload io scripts to clients %s for running IO on mounts",
                   cls.clients)
        cls.script_upload_path = ("/usr/share/glustolibs/io/scripts/"
                                  "file_dir_ops.py")
        ret = upload_scripts(cls.clients, cls.script_upload_path)
        if not ret:
            raise ExecutionError("Failed to upload IO scripts to clients %s"
                                 % cls.clients)
        g.log.info("Successfully uploaded IO scripts to clients %s",
                   cls.clients)

        # Setup Volumes
        cls.volume_configs = []
        cls.mounts_dict_list = []

        # Define two replicated volumes
        for i in range(1, 3):
            volume_config = {
                'name': 'testvol_%s_%d' % (cls.volume['voltype']['type'], i),
                'servers': cls.servers,
                'voltype': cls.volume['voltype']}
            cls.volume_configs.append(volume_config)

            # Redefine mounts
            for client in cls.all_clients_info.keys():
                mount = {
                    'protocol': cls.mount_type,
                    'server': cls.mnode,
                    'volname': volume_config['name'],
                    'client': cls.all_clients_info[client],
                    'mountpoint': (os.path.join(
                        "/mnt", '_'.join([volume_config['name'],
                                          cls.mount_type]))),
                    'options': ''
                    }
                cls.mounts_dict_list.append(mount)

            cls.mounts = create_mount_objs(cls.mounts_dict_list)

        # Create and mount volumes
        cls.mount_points = []
        cls.client = cls.clients[0]
        for volume_config in cls.volume_configs:

            # Setup volume
            ret = setup_volume(mnode=cls.mnode,
                               all_servers_info=cls.all_servers_info,
                               volume_config=volume_config,
                               force=False)
            if not ret:
                raise ExecutionError("Failed to setup Volume %s"
                                     % volume_config['name'])
            g.log.info("Successful in setting volume %s",
                       volume_config['name'])

            # Mount volume
            mount_point = (os.path.join("/mnt", '_'.join(
                [volume_config['name'], cls.mount_type])))
            cls.mount_points.append(mount_point)
            ret, _, _ = mount_volume(volume_config['name'],
                                     cls.mount_type,
                                     mount_point,
                                     cls.mnode, cls.client)
            if ret:
                raise ExecutionError(
                    "Failed to do gluster mount on volume %s "
                    % cls.volname)
            g.log.info("Successfully mounted %s on client %s",
                       cls.volname, cls.client)

    def setUp(self):
        """
        setUp method for every test
        """

        # calling GlusterBaseClass setUp
        self.get_super_method(self, 'setUp')()

        self.all_mounts_procs = []
        self.io_validation_complete = False

    def tearDown(self):
        """
        If test method failed before validating IO, tearDown waits for the
        IO's to complete and checks for the IO exit status

        Cleanup and umount volume
        """
        if not self.io_validation_complete:
            g.log.info("Wait for IO to complete as IO validation did not "
                       "succeed in test method")
            ret = wait_for_io_to_complete(self.all_mounts_procs, self.mounts)
            if not ret:
                raise ExecutionError("IO failed on some of the clients")
            g.log.info("IO is successful on all mounts")

            # List all files and dirs created
            g.log.info("List all files and directories:")
            ret = list_all_files_and_dirs_mounts(self.mounts)
            if not ret:
                raise ExecutionError("Failed to list all files and dirs")
            g.log.info("Listing all files and directories is successful")

    @classmethod
    def tearDownClass(cls):
        """
        Clean up the volume and umount volume from client
        """
        # umount all volumes
        for mount_obj in cls.mounts:
            ret, _, _ = umount_volume(
                mount_obj.client_system, mount_obj.mountpoint)
            if ret:
                raise ExecutionError(
                    "Failed to umount on volume %s "
                    % cls.volname)
            g.log.info("Successfully umounted %s on client %s",
                       cls.volname, mount_obj.client_system)
            ret = rmdir(mount_obj.client_system, mount_obj.mountpoint)
            if not ret:
                raise ExecutionError(
                    ret, "Failed to remove directory mount directory.")
            g.log.info("Mount directory is removed successfully")

        # stopping all volumes
        g.log.info("Starting to Cleanup all Volumes")
        volume_list = get_volume_list(cls.mnode)
        for volume in volume_list:
            ret = cleanup_volume(cls.mnode, volume)
            if not ret:
                raise ExecutionError("Failed to cleanup Volume %s" % volume)
            g.log.info("Volume: %s cleanup is done", volume)
        g.log.info("Successfully Cleanedup all Volumes")

        # calling GlusterBaseClass tearDownClass
        cls.get_super_method(cls, 'tearDownClass')()

    def test_mount_point_not_go_to_rofs(self):
        """
        - create two volumes with arbiter1 and mount it on same client
        - create IO
        - start deleting files from both mountpoints
        - kill brick from one of the node
        - Check if all the files are deleted from the mount point
        from both the servers
        """
        # pylint: disable=too-many-locals,too-many-statements
        # create files on all mounts
        g.log.info("Starting IO on all mounts...")
        for mount_obj in self.mounts:
            g.log.info("Generating data for %s:%s",
                       mount_obj.client_system, mount_obj.mountpoint)
            # Create files
            g.log.info('Creating files...')
            command = ("/usr/bin/env python %s create_files "
                       "-f 100 "
                       "--fixed-file-size 1M "
                       "%s" % (
                           self.script_upload_path,
                           mount_obj.mountpoint))

            proc = g.run_async(mount_obj.client_system, command,
                               user=mount_obj.user)
            self.all_mounts_procs.append(proc)

        # Validate IO
        self.assertTrue(
            validate_io_procs(self.all_mounts_procs, self.mounts),
            "IO failed on some of the clients")

        # select bricks to bring offline
        volume_list = get_volume_list(self.mnode)
        for volname in volume_list:
            bricks_to_bring_offline_dict = (select_bricks_to_bring_offline(
                self.mnode, volname))
            bricks_to_bring_offline = (
                bricks_to_bring_offline_dict['volume_bricks'])

            # bring bricks offline
            g.log.info("Going to bring down the brick process for %s",
                       bricks_to_bring_offline)
            ret = bring_bricks_offline(volname, bricks_to_bring_offline)
            self.assertTrue(ret, ("Failed to bring down the bricks. Please "
                                  "check the log file for more details."))
            g.log.info("Brought down the brick process for %s successfully",
                       bricks_to_bring_offline)

        # delete files on all mounts
        g.log.info("Deleting IO on all mounts...")
        self.all_mounts_procs = []
        for mount_obj in self.mounts:
            g.log.info("Deleting data for %s:%s",
                       mount_obj.client_system, mount_obj.mountpoint)
            # Delete files
            g.log.info('Deleting files...')
            command = "/usr/bin/env python %s delete %s" % (
                self.script_upload_path,
                mount_obj.mountpoint)
            proc = g.run_async(mount_obj.client_system, command,
                               user=mount_obj.user)
            self.all_mounts_procs.append(proc)

        # Validate IO
        self.assertTrue(
            validate_io_procs(self.all_mounts_procs, self.mounts),
            "IO failed on some of the clients")