summaryrefslogtreecommitdiffstats
path: root/tests/distaf/distaf_libs/distaflibs-gluster/distaflibs/gluster/ganesha.py
blob: 7e58077eb23781957cc8c3622b9195f27e5d5b56 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
#!/usr/bin/env python
#  This file is part of DiSTAF
#  Copyright (C) 2015-2016  Red Hat, Inc. <http://www.redhat.com>
#
#  This program is free software; you can redistribute it and/or modify
#  it under the terms of the GNU General Public License as published by
#  the Free Software Foundation; either version 2 of the License, or
#  any later version.
#
#  This program is distributed in the hope that it will be useful,
#  but WITHOUT ANY WARRANTY; without even the implied warranty of
#  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
#  GNU General Public License for more details.
#
#  You should have received a copy of the GNU General Public License along
#  with this program; if not, write to the Free Software Foundation, Inc.,
#  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.

"""
    Description: Library for gluster NFS-Ganesha operations.
"""

import re
import os
import time
import socket
from collections import OrderedDict
from distaf.util import tc
from distaflibs.gluster.volume_ops import get_volume_info
from distaflibs.gluster.peer_ops import (peer_probe_servers,
                                         nodes_from_pool_list)


def vol_set_nfs_disable(volname, option=True, mnode=None):
    '''Enables/Disables nfs for the volume.
    Args:
        volname (str): Volume name.
    Kwargs:
        option (Optional[bool]): If True it disables nfs for
            that volume else enables nfs for that volume.
            Default value is True.
        mnode (Optional[str]): Node on which the command has
            to be executed. Default value is tc.servers[0].
    Returns:
        bool: True if successful, False otherwise.
    '''
    if mnode is None:
        mnode = tc.servers[0]
    if option:
        volinfo = get_volume_info(volname, mnode)
        nfs_disable = volinfo[volname]['options'].get('nfs.disable')
        if nfs_disable == "on":
            tc.logger.info(" nfs is already disabled for the volume %s"
                           % volname)
            return True
        ret, _, _ = tc.run(mnode, "gluster volume set %s nfs.disable on "
                           "--mode=script" % volname)
        if ret != 0:
            tc.logger.error("failed to set nfs.disable on %s" % volname)
            return False
    else:
        ret, _, _ = tc.run(mnode, "gluster volume set %s nfs.disable off "
                           "--mode=script" % volname)
        if ret != 0:
            return False

    return True


def vol_set_ganesha(volname, option=True, mnode=None):
    '''Enables/Disables ganesha for the volume.
    Args:
        volname (str): Volume name.
    Kwargs:
        option (Optional[bool]): If True it enables ganesha for
            that volume else disables ganesha for that volume.
            Default value is True.
        mnode (Optional[str]): Node on which the command has
            to be executed. Default value is tc.servers[0].
    Returns:
        bool: True if successful, False otherwise.
    '''
    if mnode is None:
        mnode = tc.servers[0]
    if option:
        ret = vol_set_nfs_disable(volname)
        if not ret:
            return False
        volinfo = get_volume_info(volname, mnode)
        enable = volinfo[volname]['options'].get('ganesha.enable')
        if enable == "on":
            tc.logger.info(" ganesha is already enabled for the volume %s"
                           % volname)
            return True
        ret, _, _ = tc.run(mnode, "gluster volume set %s ganesha.enable on "
                           "--mode=script" % volname)
        if ret != 0:
            tc.logger.error("failed to set ganesha.enable on %s" % volname)
            return False
    else:
        ret, _, _ = tc.run(mnode, "gluster volume set %s ganesha.enable off "
                           "--mode=script" % volname)
        if ret != 0:
            return False

    return True


def validate_ganesha_ha_status(mnode=None):
    '''Validates Ganesha HA Status.
    Kwargs:
        mnode (Optional[str]): Node on which the command has
            to be executed. Default value is tc.servers[0].
    Returns:
        bool: True if successful(HA status is correct),
            False otherwise.
    '''
    if mnode is None:
        mnode = tc.servers[0]
    ret, out, _ = tc.run(mnode, "/usr/libexec/ganesha/ganesha-ha.sh --status "
                         "| grep -v 'Online' | cut -d ' ' -f 1 | sed s/"
                         "'-cluster_ip-1'//g | sed s/'-trigger_ip-1'//g")
    if ret != 0:
        tc.logger.error("failed to execute the ganesha-ha status command")
        return False
    list1 = filter(None, out.split("\n"))

    ret, out, _ = tc.run(mnode, "/usr/libexec/ganesha/ganesha-ha.sh --status "
                         "| grep -v 'Online' | cut -d ' ' -f 2")
    if ret != 0:
        tc.logger.error("failed to execute the ganesha-ha status command")
        return False
    list2 = filter(None, out.split("\n"))

    if list1 == list2:
        tc.logger.info("ganesha ha status is correct")
        return True

    tc.logger.error("ganesha ha status is incorrect")
    return False


def set_nfs_ganesha(option=True, mnode=None):
    '''Enables/Disables NFS-Ganesha Cluster
    Kwargs:
        option (Optional[bool]): If True it enables the nfs-ganesha
            HA Cluster, else disables the nfs-ganesha HA Cluster.
            Default value is True.
        mnode (Optional[str]): Node on which the command has
            to be executed. Default value is tc.servers[0].
    Returns:
        bool: True if successful, False otherwise.
    '''
    if mnode is None:
        mnode = tc.servers[0]
    servers = nodes_from_pool_list()
    no_of_servers = len(servers)
    if option:
        ret, _, _ = tc.run(mnode, "gluster nfs-ganesha enable --mode=script")
        if ret == 0:
            tc.logger.info("nfs-ganesha enable success")
            time.sleep(45)
            ret, _, _ = tc.run(mnode, "pcs status")
            ret = validate_ganesha_ha_status(mnode)
            if ret:
                return True
            else:
                return False
        else:
            tc.logger.error("nfs-ganesha enable falied")
            return False
    else:
        ret, _, _ = tc.run(tc.servers[0], "gluster nfs-ganesha disable "
                           "--mode=script")
        if ret == 0:
            tc.logger.info("nfs-ganesha disable success")
            time.sleep(10)
            for node in tc.servers[0:no_of_servers]:
                ret, _, _ = tc.run(node, "pcs status")
            return True
        else:
            tc.logger.error("nfs-ganesha disable falied")
            return False


def get_host_by_name(servers=None):
    '''Get hostname of the specified servers.
    Kwargs:
        servers (Optional[str]): Get hostnames of the specified servers.
    Returns:
        dict: dict with 'hostname or ip_address" of the server as key and
              'hostname' of the server as value.
    '''
    if servers is None:
        servers = nodes_from_pool_list()

    if not isinstance(servers, list):
        servers = [servers]

    server_hostname_dict = OrderedDict()
    for server in servers:
        server_hostname_dict[server] = socket.gethostbyaddr(server)[0]

    return server_hostname_dict


def create_nfs_passwordless_ssh(snodes=[], guser=None, mnode=None):
    '''Sets up the passwordless ssh between mnode and all other snodes.
    Args:
        snodes (list): List of nodes for which we require passwordless
            ssh from mnode.
    Kwargs:
        guser (Optional[str]): Username . Default value is root.
        mnode (Optional[str]): Node from which we require passwordless
            ssh to snodes. Default value is tc.servers[0].
    Returns:
        bool: True if successfull, False otherwise
    '''
    if guser is None:
        guser = 'root'
    if mnode is None:
        mnode = tc.servers[0]
    if not isinstance(snodes, list):
        snodes = [snodes]
    loc = "/var/lib/glusterd/nfs/"
    mconn = tc.get_connection(mnode, user='root')
    if not mconn.modules.os.path.isfile('/root/.ssh/id_rsa'):
        if not mconn.modules.os.path.isfile('%s/secret.pem' % loc):
            ret, _, _ = tc.run(mnode, "ssh-keygen -f /var/lib/glusterd/nfs/"
                               "secret.pem -q -N ''")
            if ret != 0:
                tc.logger.error("Unable to generate the secret pem file")
                return False
        mconn.modules.os.chmod("%s/secret.pem" % loc, 0600)
        mconn.modules.shutil.copyfile("%s/secret.pem" % loc,
                                      "/root/.ssh/id_rsa")
        mconn.modules.os.chmod("/root/.ssh/id_rsa", 0600)
        tc.logger.debug("Copying the secret.pem.pub to id_rsa.pub")
        mconn.modules.shutil.copyfile("%s/secret.pem.pub" % loc,
                                      "/root/.ssh/id_rsa.pub")
    else:
        mconn.modules.shutil.copyfile("/root/.ssh/id_rsa",
                                      "%s/secret.pem" % loc)
        mconn.modules.os.chmod("%s/secret.pem" % loc, 0600)
        tc.logger.debug("Copying the id_rsa.pub to secret.pem.pub")
        mconn.modules.shutil.copyfile("/root/.ssh/id_rsa.pub",
                                      "%s/secret.pem.pub" % loc)
    if not isinstance(snodes, list):
        snodes = [snodes]
    for snode in snodes:
        sconn = tc.get_connection(snode, user=guser)
        try:
            slocal = sconn.modules.os.path.expanduser('~')
            sfh = sconn.builtin.open("%s/.ssh/authorized_keys" % slocal, "a")
            with mconn.builtin.open("/root/.ssh/id_rsa.pub", 'r') as f:
                for line in f:
                    sfh.write(line)
        except:
            tc.logger.error("Unable to establish passwordless ssh %s@%s to "
                            "%s@%s" % ('root', mnode, guser, snode))
            return False
        finally:
            sfh.close()
            sconn.close()
    mconn.close()
    time.sleep(30)
    for snode in snodes:
        ret, _, _ = tc.run(mnode, "ssh-keyscan -H %s  >> ~/.ssh/known_hosts"
                           % snode)
        if snode != mnode:
            ret, _, _ = tc.run(mnode, "scp /var/lib/glusterd/nfs/secret.*  "
                               "%s:/var/lib/glusterd/nfs/" % snode)
            if ret != 0:
                return False

    return True


def validate_ganesha_ha_failover(mnode=None, snodes=None):
    '''Validates HA failover status
    Kwargs:
         mnode (Optional[str]): Node on which the ha status command has
            to be executed. Default value is tc.servers[0].
         snodes (Optional[str]): Node/Nodes on which ganesha process is
            Killed/stopped or Node shutdown
    Returns:
         bool: True if successfull, False otherwise.
    '''
    if mnode is None:
        mnode = tc.servers[0]
    if snodes is None:
        snodes = tc.servers[1]
    if not isinstance(snodes, list):
        snodes = [snodes]
    ha_flag = True
    ret, out, _ = tc.run(mnode, "/usr/libexec/ganesha/ganesha-ha.sh --status "
                         "| grep -v 'Online' | grep -v 'dead' | cut -d ' ' "
                         "-f 1 | sed s/'-cluster_ip-1'//g | sed s/"
                         "'-trigger_ip-1'//g")
    if ret == 0:
        list1 = filter(None, out.split("\n"))
    ret, out, _ = tc.run(mnode, "/usr/libexec/ganesha/ganesha-ha.sh --status "
                         "| grep -v 'Online' | grep -v 'dead' | cut -d ' ' "
                         "-f 2 | sed s/'-cluster_ip-1'//g | sed s/"
                         "'-trigger_ip-1'//g")
    if ret == 0:
        list2 = filter(None, out.split("\n"))
    server_hostname_dict = get_host_by_name()
    snodes_hostnames = []
    for snode in snodes:
        snodes_hostnames.append(server_hostname_dict[snode])
    for val1, val2 in zip(list1, list2):
        if val1 in snodes_hostnames:
            if val1 == val2:
                tc.logger.error("Failover dint happen, wrong failover status "
                                "-> %s %s" % (val1, val2))
                ha_flag = False
            else:
                tc.logger.info("%s successfully failed over on %s"
                               % (val1, val2))
        else:
            if val1 != val2:
                tc.logger.error("Failover not required, wrong failover status "
                                "-> %s %s" % (val1, val2))
                ha_flag = False

    return ha_flag


def get_ganesha_ha_failover_nodes(mnode=None, snodes=None):
    '''Returns HA status and dictionary of
    Kwargs:
         mnode (Optional[str]): Node on which the ha status command has
            to be executed. Default value is tc.servers[0].
         snodes (Optional[str]): Node/Nodes on which ganesha process
            is Killed/stopped or Node shutdown
    Returns:
         bool,dict: If successfull True,dict
            False otherwise
    '''
    if mnode is None:
        mnode = tc.servers[0]
    if snodes is None:
        snodes = tc.servers[1]
    if not isinstance(snodes, list):
        snodes = [snodes]
    ha_flag = True
    tnode = OrderedDict()
    ret, out, _ = tc.run(mnode, "/usr/libexec/ganesha/ganesha-ha.sh --status "
                         "| grep -v 'Online' | grep -v 'dead' | cut -d ' ' "
                         "-f 1 | sed s/'-cluster_ip-1'//g | sed s/"
                         "'-trigger_ip-1'//g")
    if ret == 0:
        list1 = filter(None, out.split("\n"))
    ret, out, _ = tc.run(mnode, "/usr/libexec/ganesha/ganesha-ha.sh --status "
                         "| grep -v 'Online' | grep -v 'dead' | cut -d ' ' "
                         "-f 2 | sed s/'-cluster_ip-1'//g | sed s/"
                         "'-trigger_ip-1'//g")
    if ret == 0:
        list2 = filter(None, out.split("\n"))
    server_hostname_dict = get_host_by_name()
    snodes_hostnames = []
    for snode in snodes:
        snodes_hostnames.append(server_hostname_dict[snode])
    for val1, val2 in zip(list1, list2):
        if val1 in snodes_hostnames:
            if val1 == val2:
                tc.logger.error("Failover dint happen, wrong failover status "
                                "-> %s %s" % (val1, val2))
                ha_flag = False
            else:
                tnode[server_hostname_dict[val1]] = server_hostname_dict[val2]
                tc.logger.info("%s successfully failed over on %s"
                               % (val1, val2))
        else:
            if val1 != val2:
                tc.logger.error("Failover not required, wrong failover status "
                                "-> %s %s" % (val1, val2))
                ha_flag = False

    return (ha_flag, tnode)


def update_ganesha_ha_conf(no_of_servers=None):
    '''Updates the ganesha-ha.conf file, with VIPs and hostnames.
    Kwargs:
        no_of_servers (Optional[int]): The number of nodes on which we have
            to modify the ganesha-ha.conf file. Default it takes
            the number of servers from the pool list.
    Returns:
        bool: True if successfull, False otherwise.
    '''
    if no_of_servers is None:
        servers = nodes_from_pool_list()
        no_of_servers = len(servers)
    else:
        servers = tc.servers[0:no_of_servers]
    server_hostname_dict = get_host_by_name(servers)
    hostnames = server_hostname_dict.values()
    hosts = ','.join(hostnames)
    file_src_path = "/etc/ganesha/ganesha-ha.conf.sample"
    file_dest_path = "/etc/ganesha/ganesha-ha.conf"
    ha_server = tc.run(tc.servers[0], "hostname")
    conn = tc.get_connection(tc.servers[0], "root")
    if conn.modules.os.path.isfile(file_src_path) == True:
        tc.logger.info("%s file available and should be updated as "
                       "ganesha-ha.conf" % file_src_path)
        try:
            conn.modules.shutil.copy(file_src_path, file_dest_path)
            FH = conn.builtin.open(file_dest_path, "r+")
        except IOError as e:
            tc.logger.error(e)
            return False
    lines = FH.readlines()
    FH.seek(0)
    FH.truncate()
    for i in range(len(lines)):
        if re.search("HA_NAME", lines[i]) != None:
            lines[i] = re.sub(r'^HA_NAME.*', "HA_NAME=\"G"+str(time.time()) +
                              "\"", lines[i])
        if re.search("HA_VOL_SERVER", lines[i]) != None:
            lines[i] = re.sub(r'^HA_VOL_SERVER.*', "HA_VOL_SERVER=\"" +
                              ha_server[1].strip()+"\"", lines[i])
        if re.search("HA_CLUSTER_NODES", lines[i]) != None:
            lines[i] = re.sub(r'^HA_CLUSTER_NODES.*', "HA_CLUSTER_NODES=\"" +
                              hosts+"\"", lines[i])
        if re.search("VIP_", lines[i]) != None:
            lines[i] = re.sub(r'.*VIP_.*\n', "", lines[i])
    vips = (tc.global_config["gluster"]["cluster_config"]
            ["nfs_ganesha"]["vips"])
    for i in range(no_of_servers):
        lines += "VIP_%s=\"%s\"\n" % (hostnames[i], vips[i])
    FH.write(''.join(lines))
    # create a local copy of this ha.conf file
    f = open("/tmp/ganesha-ha.conf", "w")
    f.write(''.join(lines))
    f.close()
    FH.close()
    conn.close()
    # copy this ha.conf file to all the other nodes
    for node in tc.servers[1:no_of_servers]:
        ret = tc.upload(node, "/tmp/ganesha-ha.conf", file_dest_path)

    return True


def cluster_auth_setup(no_of_servers=None):
    '''Sets the hacluster password, starts pcsd service and runs
       pcs cluster auth command.
    Kwargs:
        no_of_servers (Optional[int]): The number of nodes on which we have
            to setup the HA cluster. Default it takes the number
            of servers from the pool list.
    Returns:
        bool: True if successfull, False otherwise.
    '''
    if no_of_servers is None:
        servers = nodes_from_pool_list()
        no_of_servers = len(servers)
    else:
        servers = tc.servers[0:no_of_servers]
    result = True
    for node in tc.servers[0:no_of_servers]:
        ret, _, _ = tc.run(node, "echo hacluster | passwd --stdin hacluster")
        if ret != 0:
            tc.logger.error("unable to set password for hacluster on %s"
                            % node)
            return False
        else:
            ret, _, _ = tc.run(node, "service pcsd start")
            if ret != 0:
                tc.looger.error("service pcsd start command failed on %s"
                                % node)
                return False
    server_hostname_dict = get_host_by_name(servers)
    for node in tc.servers[0:no_of_servers]:
        val = ""
        for key in server_hostname_dict:
            val += server_hostname_dict[key]
            val += " "
        ret, _, _ = tc.run(node, "pcs cluster auth %s -u hacluster -p "
                           "hacluster" % val)
        if ret != 0:
                tc.logger.error("pcs cluster auth command failed on %s" % node)
                result = False

    return result


def setup_nfs_ganesha(no_of_servers=None):
    '''Setup NFS-Ganesha HA cluster.
    Kwargs:
        no_of_servers (Optional[int]): The number of nodes on which we have
            to setup the HA cluster. Default it takes the number
            of servers from the pool list.
    Returns:
        bool: True if successfull, False otherwise.
    '''
    if ('setup_nfs_ganesha' in tc.global_flag and
            tc.global_flag['setup_nfs_ganesha'] == True):
        tc.logger.debug("The setup nfs-ganesha is already setup, returning...")
        return True
    if no_of_servers is None:
        servers = tc.servers
        no_of_servers = len(servers)
    servers = tc.servers[0:no_of_servers]
    no_of_servers = int(no_of_servers)
    # Step 1: Peer probe
    ret = peer_probe_servers(tc.servers[1:no_of_servers], mnode=tc.servers[0])
    if not ret:
        return False
    # Step 2: Passwordless ssh for nfs
    ret = create_nfs_passwordless_ssh(snodes=tc.servers[0:no_of_servers],
                                      mnode=tc.servers[0])
    if ret:
        tc.logger.info("passwordless ssh between nodes successfull")
    else:
        tc.logger.error("passwordless ssh between nodes unsuccessfull")
        return False
    # Step 3: Update ganesha-ha.conf file
    ret = update_ganesha_ha_conf(no_of_servers)
    if ret:
        tc.logger.info("ganesha-ha.conf files succeessfully updated on all "
                       "the nodes")
    else:
        tc.logger.error("ganesha-ha.conf files not succeessfully updated on "
                        "all the nodes")
        return False
    # Step 4: Cluster setup
    ret = cluster_auth_setup(no_of_servers)
    if ret:
        tc.logger.info("successfull cluster setup")
    else:
        tc.logger.error("unsuccessfull cluster setup")
        return False
    # Step 5: Using CLI to create shared volume
    ret, _, _ = tc.run(tc.servers[0], "gluster v list | grep "
                       "'gluster_shared_storage'")
    if ret != 0:
        ret, _, _ = tc.run(tc.servers[0], "gluster volume set all "
                           "cluster.enable-shared-storage enable")
        if ret != 0:
            tc.logger.error("shared volume creation unsuccessfull")
            return False
        else:
            tc.logger.info("shared volume creation successfull")
            time.sleep(10)
    else:
        tc.logger.info("shared volume already exists")
    time.sleep(60)
    # Step 6: Enable NFS-Ganesha
    ret = set_nfs_ganesha(True)
    if ret:
        tc.logger.info("gluster nfs-ganesha enable success")
    else:
        tc.logger.error("gluster nfs-ganesha enable failed")
        return False
    # Setting globalflag to True
    tc.global_flag["setup_nfs_ganesha"] = True

    return True


def teardown_nfs_ganesha_setup(mnode=None):
    '''Teardowns the NFS-Ganesha HA setup.
    Kwargs:
        mnode (Optional[str]): Node on which the command has
            to be executed. Default value is tc.servers[0].
    Returns:
        bool: True if successful, False otherwise.
    '''
    if mnode is None:
        mnode = tc.servers[0]
    # Step 1: Disable NFS-Ganesha
    ret = set_nfs_ganesha(False)
    if ret:
        tc.logger.info("gluster nfs-ganesha disable success")
    else:
        tc.logger.error("gluster nfs-ganesha disable failed")
        return False
    # Step 2: Using CLI to delete the shared volume
    ret, _, _ = tc.run(mnode, "gluster volume set all "
                       "cluster.enable-shared-storage disable --mode=script")
    if ret != 0:
        tc.logger.error("shared volume deletion unsuccessfull")
        return False
    else:
        tc.logger.info("shared volume deletion successfull")
    # Setting globalflag to False
    tc.global_flag["setup_nfs_ganesha"] = False

    return True