1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
|
# Copyright (C) 2017-2018 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import random
from time import sleep
from glusto.core import Glusto as g
from glustolibs.gluster.exceptions import ExecutionError
from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
from glustolibs.gluster.lib_utils import form_bricks_list
from glustolibs.gluster.volume_ops import (volume_create,
set_volume_options, volume_start)
from glustolibs.gluster.snap_ops import snap_create, snap_activate
from glustolibs.gluster.peer_ops import peer_detach_servers, peer_probe
@runs_on([['distributed'], ['glusterfs']])
class TestSnapInfoOnPeerDetachedNode(GlusterBaseClass):
def tearDown(self):
# stopping the volume and Cleaning up the volume
ret = self.cleanup_volume()
if not ret:
raise ExecutionError("Failed Cleanup the Volume %s" % self.volname)
g.log.info("Volume deleted successfully : %s", self.volname)
# Calling GlusterBaseClass tearDown
GlusterBaseClass.tearDown.im_func(self)
def test_snap_info_from_detached_node(self):
# pylint: disable=too-many-statements
"""
Create a volume with single brick
Create a snapshot
Activate the snapshot created
Enabled uss on the volume
Validated snap info on all the nodes
Peer detach one node
Validate /var/lib/glusterd/snaps on the detached node
Probe the detached node
"""
# Creating volume with single brick on one node
servers_info_single_node = {self.servers[0]:
self.all_servers_info[self.servers[0]]}
bricks_list = form_bricks_list(self.mnode, self.volname,
1, self.servers[0],
servers_info_single_node)
ret, _, _ = volume_create(self.servers[0], self.volname, bricks_list)
self.assertEqual(ret, 0, "Volume creation failed")
g.log.info("Volume %s created successfully", self.volname)
# Create a snapshot of the volume without volume start should fail
self.snapname = "snap1"
ret, _, _ = snap_create(
self.mnode, self.volname, self.snapname, timestamp=False)
self.assertNotEqual(
ret, 0, "Snapshot created without starting the volume")
g.log.info("Snapshot creation failed as expected")
# Start the volume
ret, _, _ = volume_start(self.mnode, self.volname)
self.assertEqual(
ret, 0, "Failed to start the volume %s" % self.volname)
g.log.info("Volume start succeeded")
# Create a snapshot of the volume after volume start
ret, _, _ = snap_create(
self.mnode, self.volname, self.snapname, timestamp=False)
self.assertEqual(
ret, 0, "Snapshot creation failed on the volume %s" % self.volname)
g.log.info("Snapshot create succeeded")
# Activate snapshot created
ret, _, err = snap_activate(self.mnode, self.snapname)
self.assertEqual(
ret, 0, "Snapshot activate failed with following error %s" % (err))
g.log.info("Snapshot activated successfully")
# Enable uss
self.vol_options['features.uss'] = 'enable'
ret = set_volume_options(self.mnode, self.volname, self.vol_options)
self.assertTrue(ret, "gluster volume set %s features.uss "
"enable failed" % self.volname)
g.log.info("gluster volume set %s features.uss "
"enable successfully", self.volname)
# Validate files /var/lib/glusterd/snaps on all the servers is same
self.pathname = "/var/lib/glusterd/snaps/%s" % self.snapname
for server in self.servers:
conn = g.rpyc_get_connection(server)
ret = conn.modules.os.path.isdir(self.pathname)
self.assertTrue(ret, "%s directory doesn't exist on node %s" %
(self.pathname, server))
g.log.info("%s path exists on node %s", self.pathname, server)
g.rpyc_close_deployed_servers()
# Peer detach one node
self.random_node_peer_detach = random.choice(self.servers[1:])
ret = peer_detach_servers(self.mnode,
self.random_node_peer_detach, validate=True)
self.assertTrue(ret, "Peer detach of node: %s failed" %
self.random_node_peer_detach)
g.log.info("Peer detach succeeded")
# /var/lib/glusterd/snaps/<snapname> directory should not present
conn = g.rpyc_get_connection(self.random_node_peer_detach)
ret = conn.modules.os.path.isdir(self.pathname)
self.assertFalse(ret, "%s directory should not exist on the peer"
"which is detached from cluster%s" % (
self.pathname, self.random_node_peer_detach))
g.log.info("Expected: %s path doesn't exist on peer detached node %s",
self.pathname, self.random_node_peer_detach)
g.rpyc_close_deployed_servers()
# Peer probe the detached node
ret, _, _ = peer_probe(self.mnode, self.random_node_peer_detach)
self.assertEqual(ret, 0, "Peer probe of node: %s failed" %
self.random_node_peer_detach)
g.log.info("Peer probe succeeded")
# Validating peers are in connected state
count = 0
while count < 10:
sleep(2)
ret = self.validate_peers_are_connected()
if ret:
break
count += 1
self.assertTrue(ret, "Peers are not in connected state")
g.log.info("Peer are in connected state")
|