summaryrefslogtreecommitdiffstats
path: root/tests/functional/glusterd/test_reserve_limt_change_while_rebalance.py
blob: 2a7aacdac1570bcdc6aecbc6cd6cb4ebf7d66169 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
#  Copyright (C) 2020  Red Hat, Inc. <http://www.redhat.com>
#
#  This program is free software; you can redistribute it and/or modify
#  it under the terms of the GNU General Public License as published by
#  the Free Software Foundation; either version 2 of the License, or
#  any later version.
#
#  This program is distributed in the hope that it will be useful,
#  but WITHOUT ANY WARRANTY; without even the implied warranty of
#  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
#  GNU General Public License for more details.
#
#  You should have received a copy of the GNU General Public License along
#  with this program; if not, write to the Free Software Foundation, Inc.,
#  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.

from glustolibs.gluster.exceptions import ExecutionError
from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
from glustolibs.gluster.glusterdir import mkdir
from glustolibs.gluster.rebalance_ops import (
    rebalance_start,
    rebalance_stop,
    wait_for_rebalance_to_complete
)
from glustolibs.gluster.volume_libs import expand_volume
from glustolibs.gluster.volume_ops import set_volume_options
from glustolibs.io.utils import run_linux_untar


@runs_on([['distributed-replicated'], ['glusterfs']])
class TestReserveLimitChangeWhileRebalance(GlusterBaseClass):

    def _set_vol_option(self, option):
        """Method for setting volume option"""
        ret = set_volume_options(
            self.mnode, self.volname, option)
        self.assertTrue(ret)

    @classmethod
    def setUpClass(cls):
        # Calling GlusterBaseClass setUpClass
        cls.get_super_method(cls, 'setUpClass')()

        # Set I/O flag to false
        cls.is_io_running = False

        # Setup Volume and Mount Volume
        ret = cls.setup_volume_and_mount_volume(mounts=cls.mounts)
        if not ret:
            raise ExecutionError("Failed to Setup_Volume and Mount_Volume")

    def tearDown(self):
        if not wait_for_rebalance_to_complete(
                self.mnode, self.volname, timeout=300):
            raise ExecutionError(
                "Failed to complete rebalance on volume '{}'".format(
                    self.volname))

        # Unmounting and cleaning volume
        ret = self.unmount_volume_and_cleanup_volume([self.mounts[0]])
        if not ret:
            raise ExecutionError("Unable to delete volume % s" % self.volname)

        self.get_super_method(self, 'tearDown')()

    def test_reserve_limt_change_while_rebalance(self):
        """
        1) Create a distributed-replicated volume and start it.
        2) Enable storage.reserve option on the volume using below command,
           gluster volume set storage.reserve 50
        3) Mount the volume on a client
        4) Add some data on the mount point (should be within reserve limits)
        5) Now, add-brick and trigger rebalance.
           While rebalance is in-progress change the reserve limit to a lower
           value say (30)
        6. Stop the rebalance
        7. Reset the storage reserve value to 50 as in step 2
        8. trigger rebalance
        9. while rebalance in-progress change the reserve limit to a higher
         value say (70)
        """

        # Setting storage.reserve 50
        self._set_vol_option({"storage.reserve": "50"})

        self.list_of_io_processes = []
        # Create a dir to start untar
        self.linux_untar_dir = "{}/{}".format(self.mounts[0].mountpoint,
                                              "linuxuntar")
        ret = mkdir(self.clients[0], self.linux_untar_dir)
        self.assertTrue(ret, "Failed to create dir linuxuntar for untar")

        # Start linux untar on dir linuxuntar
        ret = run_linux_untar(self.clients[0], self.mounts[0].mountpoint,
                              dirs=tuple(['linuxuntar']))
        self.list_of_io_processes += ret
        self.is_io_running = True

        # Add bricks to the volume
        ret = expand_volume(self.mnode, self.volname, self.servers,
                            self.all_servers_info)
        self.assertTrue(ret, "Failed to add brick with rsync on volume %s"
                        % self.volname)

        # Trigger rebalance on the volume
        ret, _, _ = rebalance_start(self.mnode, self.volname)
        self.assertEqual(ret, 0, "Failed to start rebalance on the volume %s"
                         % self.volname)

        # Setting storage.reserve 30
        self._set_vol_option({"storage.reserve": "30"})

        # Stopping Rebalance
        ret, _, _ = rebalance_stop(self.mnode, self.volname)
        self.assertEqual(ret, 0, "Failed to stop rebalance on the volume %s"
                         % self.volname)

        # Setting storage.reserve 500
        self._set_vol_option({"storage.reserve": "500"})

        # Trigger rebalance on the volume
        ret, _, _ = rebalance_start(self.mnode, self.volname)
        self.assertEqual(ret, 0, "Failed to start rebalance on the volume %s"
                         % self.volname)

        # Setting storage.reserve 70
        self._set_vol_option({"storage.reserve": "70"})