summaryrefslogtreecommitdiffstats
path: root/tests/functional/glusterd/test_remove_brick_scenarios.py
blob: e7ff27b55609a5fa167f4eb387e5a1feebb74e78 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
#  Copyright (C) 2018  Red Hat, Inc. <http://www.redhat.com>
#
#  This program is free software; you can redistribute it and/or modify
#  it under the terms of the GNU General Public License as published by
#  the Free Software Foundation; either version 2 of the License, or
#  any later version.
#
#  This program is distributed in the hope that it will be useful,
#  but WITHOUT ANY WARRANTY; without even the implied warranty of
#  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
#  GNU General Public License for more details.
#
#  You should have received a copy of the GNU General Public License along`
#  with this program; if not, write to the Free Software Foundation, Inc.,
#  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.

from glusto.core import Glusto as g
from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
from glustolibs.gluster.exceptions import ExecutionError
from glustolibs.gluster.brick_ops import remove_brick
from glustolibs.gluster.brick_libs import get_all_bricks
from glustolibs.gluster.rebalance_ops import (rebalance_start,
                                              rebalance_status,
                                              wait_for_fix_layout_to_complete)
from glustolibs.gluster.glusterdir import mkdir
from glustolibs.gluster.glusterfile import get_fattr


@runs_on([['distributed-replicated'], ['glusterfs']])
class TestRemoveBrickScenarios(GlusterBaseClass):

    @classmethod
    def setUpClass(cls):
        # Calling GlusterBaseClass setUpClass
        GlusterBaseClass.setUpClass.im_func(cls)

        # Override Volumes
        cls.volume['voltype'] = {
            'type': 'distributed-replicated',
            'dist_count': 2,
            'replica_count': 3,
            'transport': 'tcp'}

    def setUp(self):
        # calling GlusterBaseClass setUp
        GlusterBaseClass.setUp.im_func(self)

        # Creating Volume
        ret = self.setup_volume_and_mount_volume(self.mounts)
        if not ret:
            raise ExecutionError("Volume creation or mount failed: %s"
                                 % self.volname)
        g.log.info("Volme created and mounted successfully : %s",
                   self.volname)

    def tearDown(self):

        # Unmounting and cleaning volume
        ret = self.unmount_volume_and_cleanup_volume(self.mounts)
        if not ret:
            raise ExecutionError("Unable to delete volume % s" % self.volname)
        g.log.info("Volume deleted successfully : %s", self.volname)

        GlusterBaseClass.tearDown.im_func(self)

    def test_remove_brick_scenarios(self):
        # pylint: disable=too-many-statements
        """
        Test case:
        1. Create a cluster by peer probing and create a volume.
        2. Mount it and write some IO like 100000 files.
        3. Initiate the remove-brick operation on pair of bricks.
        4. Stop the remove-brick operation using other pairs of bricks.
        5. Get the remove-brick status using other pair of bricks in
           the volume.
        6. stop the rebalance process using non-existing brick.
        7. Check for the remove-brick status using non-existent bricks.
        8. Stop the remove-brick operation where remove-brick start have been
            initiated.
        9. Perform fix-layout on the volume.
        10. Get the rebalance fix-layout.
        11. Create a directory from mountpoint.
        12. check for 'trusted.glusterfs.dht' extended attribute in the
            newly created directory in the bricks where remove brick stopped
            (which was tried to be removed in step 8).
        13. Umount, stop and delete the volume.
        """

        # Getting a list of all the bricks.
        g.log.info("Get all the bricks of the volume")
        bricks_list = get_all_bricks(self.mnode, self.volname)
        self.assertIsNotNone(bricks_list, "Failed to get the brick list")
        g.log.info("Successfully got the list of bricks of volume")

        # Running IO.
        command = ('for number in `seq 1 100000`;do touch ' +
                   self.mounts[0].mountpoint + '/test_file$number; done')
        ret, _, _ = g.run(self.mounts[0].client_system, command)
        self.assertEqual(ret, 0, "File creation: failed.")
        g.log.info("Files create on mount point.")

        # Removing bricks from volume.
        remove_brick_list_original = bricks_list[3:6]
        ret, _, _ = remove_brick(self.mnode, self.volname,
                                 remove_brick_list_original, 'start')
        self.assertEqual(ret, 0, "Failed to start remove brick operation.")
        g.log.info("Remove bricks operation started successfully.")

        # Stopping brick remove operation for other pair of bricks.
        remove_brick_list_other_pair = bricks_list[0:3]
        ret, _, _ = remove_brick(self.mnode, self.volname,
                                 remove_brick_list_other_pair, 'stop')
        self.assertEqual(ret, 1, "Successfully stopped remove brick operation "
                         "on other pair of bricks.")
        g.log.info("Failed to stop remove brick operation on"
                   " other pair of bricks.")

        # Checking status of brick remove operation for other pair of bricks.
        ret, _, _ = remove_brick(self.mnode, self.volname,
                                 remove_brick_list_other_pair, 'status')
        self.assertEqual(ret, 1, "Error: Got status on other pair of bricks.")
        g.log.info("EXPECTED: Failed to get status on other pair of bricks.")

        # Stopping remove operation for non-existent bricks.
        remove_brick_list_non_existent = [bricks_list[0]+'non-existent',
                                          bricks_list[1]+'non-existent',
                                          bricks_list[2]+'non-existent']
        ret, _, _ = remove_brick(self.mnode, self.volname,
                                 remove_brick_list_non_existent, 'stop')
        self.assertEqual(ret, 1, "Error: Successfully stopped remove brick"
                         " operation on non-existent bricks.")
        g.log.info("EXPECTED: Failed to stop remove brick operation"
                   " on non existent bricks.")

        # Checking status of brick remove opeation for non-existent bricks.
        ret, _, _ = remove_brick(self.mnode, self.volname,
                                 remove_brick_list_non_existent, 'status')
        self.assertEqual(ret, 1,
                         "Error: Status on non-existent bricks successful.")
        g.log.info("EXPECTED: Failed to get status on non existent bricks.")

        # Stopping the initial brick remove opeation.
        ret, _, _ = remove_brick(self.mnode, self.volname,
                                 remove_brick_list_original, 'stop')
        self.assertEqual(ret, 0, "Failed to stop remove brick operation")
        g.log.info("Remove bricks operation stop successfully")

        # Start rebalance fix layout for volume.
        g.log.info("Starting Fix-layout on the volume")
        ret, _, _ = rebalance_start(self.mnode, self.volname, fix_layout=True)
        self.assertEqual(ret, 0, ("Failed to start rebalance for fix-layout"
                                  "on the volume %s", self.volname))
        g.log.info("Successfully started fix-layout on the volume %s",
                   self.volname)

        # Checking status of rebalance fix layout for the volume.
        ret, _, _ = rebalance_status(self.mnode, self.volname)
        self.assertEqual(ret, 0, ("Failed to check status of rebalance"
                                  "on the volume %s", self.volname))
        g.log.info("Successfully checked status on the volume %s",
                   self.volname)
        ret = wait_for_fix_layout_to_complete(self.mnode,
                                              self.volname, timeout=30000)
        self.assertTrue(ret, ("Failed to check for rebalance."))
        g.log.info("Rebalance completed.")

        # Creating directory.
        dir_name = ''
        for counter in range(0, 10):
            ret = mkdir(self.mounts[0].client_system,
                        self.mounts[0].mountpoint+"/dir1"+str(counter),
                        parents=True)
            if ret:
                dir_name = "/dir1"+str(counter)
                break
        self.assertTrue(ret, ("Failed to create directory dir1."))
        g.log.info("Directory dir1 created successfully.")

        # Checking value of attribute for dht.
        brick_server, brick_dir = bricks_list[0].split(':')
        folder_name = brick_dir+dir_name
        g.log.info("Check trusted.glusterfs.dht on host  %s for directory %s",
                   brick_server, folder_name)
        ret = get_fattr(brick_server, folder_name, 'trusted.glusterfs.dht')
        self.assertTrue(ret, ("Failed to get trusted.glusterfs.dht for %s"
                              % folder_name))
        g.log.info("Get trusted.glusterfs.dht xattr for %s successfully",
                   folder_name)