summaryrefslogtreecommitdiffstats
path: root/tests/functional/glusterd/test_peer_probe.py
blob: 24268134315138361a00e3de6730933e733e72bc (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
#  Copyright (C) 2016-2017  Red Hat, Inc. <http://www.redhat.com>
#
#  This program is free software; you can redistribute it and/or modify
#  it under the terms of the GNU General Public License as published by
#  the Free Software Foundation; either version 2 of the License, or
#  any later version.
#
#  This program is distributed in the hope that it will be useful,
#  but WITHOUT ANY WARRANTY; without even the implied warranty of
#  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
#  GNU General Public License for more details.
#
#  You should have received a copy of the GNU General Public License along
#  with this program; if not, write to the Free Software Foundation, Inc.,
#  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.

from glusto.core import Glusto as g
from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
from glustolibs.gluster.volume_ops import (volume_create, volume_start,
                                           get_volume_list, volume_stop,
                                           volume_delete)
from glustolibs.gluster.volume_libs import (cleanup_volume)
from glustolibs.gluster.peer_ops import (peer_probe, peer_detach,
                                         peer_probe_servers,
                                         nodes_from_pool_list)
from glustolibs.gluster.lib_utils import form_bricks_list
from glustolibs.gluster.exceptions import ExecutionError


@runs_on([['distributed'], ['glusterfs']])
class TestPeerProbe(GlusterBaseClass):

    def setUp(self):

        # Performing peer detach
        for server in self.servers[1:]:
            ret, _, _ = peer_detach(self.mnode, server)
            if ret != 0:
                raise ExecutionError("Peer detach failed")
            g.log.info("Peer detach SUCCESSFUL.")
        GlusterBaseClass.setUp.im_func(self)

    def tearDown(self):
        """
        clean up all volumes and peer probe to form cluster
        """
        vol_list = get_volume_list(self.mnode)
        if vol_list is not None:
            for volume in vol_list:
                ret = cleanup_volume(self.mnode, volume)
                if not ret:
                    raise ExecutionError("Failed to cleanup volume")
                g.log.info("Volume deleted successfully : %s", volume)

        # Peer probe detached servers
        pool = nodes_from_pool_list(self.mnode)
        for node in pool:
            peer_detach(self.mnode, node)
        ret = peer_probe_servers(self.mnode, self.servers)
        if not ret:
            raise ExecutionError("Failed to probe detached "
                                 "servers %s" % self.servers)
        g.log.info("Peer probe success for detached "
                   "servers %s", self.servers)
        GlusterBaseClass.tearDown.im_func(self)

    def test_peer_probe(self):
        """
        In this test case:
        1. Create Dist Volume on Node 1
        2. Create Replica Volume on Node 2
        3. Peer Probe N2 from N1(should fail)
        4. Clean All Volumes
        5. Peer Probe N1 to N2(should success)
           Peer Probe N3 to N2(should fail)
        6. Create replica Volume on N1 and N2
        7. Peer probe from N3 to N1(should fail)
        8. Peer probe from N1 to N3(should succeed)
        9. Create replica Volume on N1, N2 and N2
        10.Start Volume
        11. delete volume (should fail)
        12. Stop volume
        13. Clean up all volumes
        """

        # pylint: disable=too-many-statements
        # Create a distributed volume on Node1
        number_of_brick = 1
        servers_info_from_single_node = {}
        servers_info_from_single_node[
            self.servers[0]] = self.all_servers_info[self.servers[0]]
        self.volname = "testvol"
        bricks_list = form_bricks_list(self.servers[0], self.volname,
                                       number_of_brick, self.servers[0],
                                       servers_info_from_single_node)
        ret, _, _ = volume_create(self.servers[0], self.volname,
                                  bricks_list, True)
        self.assertEqual(ret, 0, "Volume create failed")
        g.log.info("Volume %s created successfully", self.volname)

        # Create a replicate volume on Node2 without force
        number_of_brick = 2
        servers_info_from_single_node = {}
        servers_info_from_single_node[
            self.servers[1]] = self.all_servers_info[self.servers[1]]
        kwargs = {'replica_count': 2}
        self.volname = "new-volume"
        bricks_list = form_bricks_list(self.servers[1], self.volname,
                                       number_of_brick, self.servers[1],
                                       servers_info_from_single_node)

        # creation of replicate volume without force should fail
        ret, _, _ = volume_create(self.servers[1], self.volname,
                                  bricks_list, False, **kwargs)
        self.assertNotEqual(ret, 0, ("Unexpected: Successfully created "
                                     "the replicate volume on node2 "
                                     "without force"))
        g.log.info("Failed to create the replicate volume %s as "
                   " expected without force", self.volname)

        # Create a replica volume on Node2 with force
        number_of_brick = 3
        servers_info_from_single_node = {}
        servers_info_from_single_node[
            self.servers[1]] = self.all_servers_info[self.servers[1]]
        kwargs = {'replica_count': 3}
        self.volname = "new-volume"
        bricks_list = form_bricks_list(self.servers[1], self.volname,
                                       number_of_brick, self.servers[1],
                                       servers_info_from_single_node)

        # creation of replicate volume with force should succeed
        ret, _, _ = volume_create(self.servers[1], self.volname,
                                  bricks_list, True, **kwargs)
        self.assertEqual(ret, 0, "Volume create failed")
        g.log.info("Volume %s created", self.volname)

        # Perform peer probe from N1 to N2
        ret, _, _ = peer_probe(self.servers[0], self.servers[1])
        self.assertNotEqual(ret, 0, (
            "peer probe is success from %s to %s even if %s "
            " is a part of another cluster or having volumes "
            " configured", self.servers[0], self.servers[1], self.servers[1]))
        g.log.info("peer probe failed from %s to "
                   "%s as expected", self.servers[0], self.servers[1])

        # clean up all volumes
        for server in self.servers[0:2]:
            # Listing all the volumes
            vol_list = get_volume_list(server)
            self.assertIsNotNone(vol_list, "Unable to get volumes list")
            g.log.info("Getting the volume list from %s", self.mnode)
            for vol in vol_list:
                g.log.info("deleting volume : %s", vol)
                ret = cleanup_volume(server, vol)
                self.assertTrue(ret, ("Failed to Cleanup the Volume %s", vol))
                g.log.info("Volume deleted successfully : %s", vol)

        # Perform peer probe from N1 to N2 should success
        ret, _, _ = peer_probe(self.servers[0], self.servers[1])
        self.assertEqual(
            ret, 0, ("peer probe from %s to %s is "
                     "failed", self.servers[0], self.servers[1]))
        g.log.info("peer probe is success from %s to "
                   "%s", self.servers[0], self.servers[1])

        # Perform peer probe from N3 to N2 should fail
        ret, _, _ = peer_probe(self.servers[2], self.servers[1])
        self.assertNotEqual(ret, 0, (
            "peer probe is success from %s to %s even if %s "
            "is a part of another cluster or having volumes "
            "configured", self.servers[2], self.servers[1], self.servers[1]))
        g.log.info("peer probe failed from %s to "
                   "%s as expected", self.servers[2], self.servers[1])

        # Create a replica volume on N1 and N2 with force
        number_of_brick = 2
        servers_info_from_two_node = {}
        for server in self.servers[0:2]:
            servers_info_from_two_node[server] = self.all_servers_info[server]
        kwargs = {'replica_count': 2}
        self.volname = "new-volume"
        bricks_list = form_bricks_list(self.servers[0], self.volname,
                                       number_of_brick, self.servers[0:2],
                                       servers_info_from_two_node)
        ret, _, _ = volume_create(self.servers[1], self.volname,
                                  bricks_list, True, **kwargs)
        self.assertEqual(ret, 0, "Volume create failed")
        g.log.info("Volume %s created succssfully", self.volname)

        # Perform peer probe from N3 to N1 should fail
        ret, _, _ = peer_probe(self.servers[2], self.servers[0])
        self.assertNotEqual(ret, 0, (
            "peer probe is success from %s to %s even if %s "
            "a part of another cluster or having volumes "
            "configured", self.servers[2], self.servers[0], self.servers[0]))
        g.log.info("peer probe is failed from %s to "
                   "%s as expected", self.servers[2], self.servers[0])

        # Perform peer probe from N1 to N3 should succed
        ret, _, _ = peer_probe(self.servers[0], self.servers[2])
        self.assertEqual(
            ret, 0, ("peer probe from %s to %s is "
                     "failed", self.servers[0], self.servers[2]))
        g.log.info("peer probe is success from %s to "
                   "%s", self.servers[0], self.servers[2])

        # Create a replica volume on N1, N2 and N3 with force
        number_of_brick = 3
        server_info_from_three_node = {}
        for server in self.servers[0:3]:
            server_info_from_three_node[server] = self.all_servers_info[server]
        kwargs = {'replica_count': 3}
        self.volname = "new-replica-volume"
        bricks_list = form_bricks_list(self.servers[2], self.volname,
                                       number_of_brick, self.servers[0:3],
                                       server_info_from_three_node)
        ret, _, _ = volume_create(self.servers[1], self.volname,
                                  bricks_list, True, **kwargs)
        self.assertEqual(ret, 0, "Volume create failed")
        g.log.info("creation of replica volume should succeed")

        ret, _, _ = volume_start(self.servers[2], self.volname, True)
        self.assertEqual(ret, 0, ("Failed to start the "
                                  "volume %s", self.volname))
        g.log.info("Volume %s start with force is success", self.volname)

        # Volume delete should fail without stopping volume
        self.assertTrue(
            volume_delete(self.servers[2], self.volname, xfail=True),
            "Unexpected Error: Volume deleted "
            "successfully without stopping volume"
        )
        g.log.info("Expected: volume delete should fail without "
                   "stopping volume: %s", self.volname)

        # Volume stop with force
        ret, _, _ = volume_stop(self.mnode, self.volname, True)
        self.assertEqual(ret, 0, ("Failed to stop the volume "
                                  "%s", self.volname))
        g.log.info("Volume stop with force is success")