1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
|
# Copyright (C) 2016-2017 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import random
from glusto.core import Glusto as g
from glustolibs.gluster.exceptions import ExecutionError
from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
from glustolibs.gluster.volume_libs import cleanup_volume
from glustolibs.gluster.volume_ops import (get_volume_list, volume_create)
from glustolibs.gluster.lib_utils import (form_bricks_list,
is_core_file_created)
@runs_on([['distributed'], ['glusterfs']])
class TestConcurrentSet(GlusterBaseClass):
@classmethod
def setUpClass(cls):
cls.get_super_method(cls, 'setUpClass')()
g.log.info("Starting %s ", cls.__name__)
ret = cls.validate_peers_are_connected()
if not ret:
raise ExecutionError("Nodes are not in peer probe state")
def tearDown(self):
'''
clean up all volumes and detaches peers from cluster
'''
vol_list = get_volume_list(self.mnode)
for volume in vol_list:
ret = cleanup_volume(self.mnode, volume)
self.assertTrue(ret, "Failed to Cleanup the Volume %s" % volume)
g.log.info("Volume deleted successfully : %s", volume)
self.get_super_method(self, 'tearDown')()
def test_concurrent_set(self):
# time stamp of current test case
ret, test_timestamp, _ = g.run_local('date +%s')
test_timestamp = test_timestamp.strip()
# Create a volume
self.volname = "first-vol"
self.brick_list = form_bricks_list(self.mnode, self.volname, 3,
self.servers,
self.all_servers_info)
ret = volume_create(self.mnode, self.volname,
self.brick_list, force=False)
self.assertEqual(ret[0], 0, ("Unable"
"to create volume %s" % self.volname))
g.log.info("Volume created successfully %s", self.volname)
# Create a volume
self.volname = "second-vol"
self.brick_list = form_bricks_list(self.mnode, self.volname, 3,
self.servers,
self.all_servers_info)
g.log.info("Creating a volume")
ret = volume_create(self.mnode, self.volname,
self.brick_list, force=False)
self.assertEqual(ret[0], 0, ("Unable"
"to create volume %s" % self.volname))
g.log.info("Volume created successfully %s", self.volname)
cmd1 = ("for i in `seq 1 100`; do gluster volume set first-vol "
"read-ahead on; done")
cmd2 = ("for i in `seq 1 100`; do gluster volume set second-vol "
"write-behind on; done")
proc1 = g.run_async(random.choice(self.servers), cmd1)
proc2 = g.run_async(random.choice(self.servers), cmd2)
ret1, _, _ = proc1.async_communicate()
ret2, _, _ = proc2.async_communicate()
self.assertEqual(ret1, 0, "Concurrent volume set on different volumes "
"simultaneously failed")
self.assertEqual(ret2, 0, "Concurrent volume set on different volumes "
"simultaneously failed")
g.log.info("Setting options on different volumes @ same time "
"successfully completed")
ret = is_core_file_created(self.servers, test_timestamp)
if ret:
g.log.info("No core file found, glusterd service "
"running successfully")
else:
g.log.error("core file found in directory, it "
"indicates the glusterd service crash")
self.assertTrue(ret, ("glusterd service should not crash"))
|