summaryrefslogtreecommitdiffstats
path: root/extras/hook-scripts/set/post/S32gluster_enable_shared_storage.sh
blob: ad51babd5f74f93fc5967e8cab68a0c568b05e22 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
#!/bin/bash

key=`echo $3 | cut -d '=' -f 1`
val=`echo $3 | cut -d '=' -f 2`
if [ ! "$key" -eq "enable-shared-storage" -o "$key" -eq "cluster.enable-shared-storage" ]; then
    exit;
fi
if [ "$val" != 'enable' ]; then
    if [ "$val" != 'disable' ]; then
        exit;
    fi
fi

option=$val

key_val_pair1=`echo $4 | cut -d ',' -f 1`
key_val_pair2=`echo $4 | cut -d ',' -f 2`

key=`echo $key_val_pair1 | cut -d '=' -f 1`
val=`echo $key_val_pair1 | cut -d '=' -f 2`
if [ "$key" != "is_originator" ]; then
    exit;
fi
is_originator=$val;

key=`echo $key_val_pair2 | cut -d '=' -f 1`
val=`echo $key_val_pair2 | cut -d '=' -f 2`
if [ "$key" != "local_node_hostname" ]; then
    exit;
fi
local_node_hostname=$val;

# Read gluster peer status to find the peers
# which are in 'Peer in Cluster' mode and
# are connected.

number_of_connected_peers=0
while read -r line
do
    # Already got two connected peers. Including the current node
    # we have 3 peers which is enough to create a shared storage
    # with replica 3
    if [ "$number_of_connected_peers" == "2" ]; then
        break;
    fi

    key=`echo $line | cut -d ':' -f 1`
    if [ "$key" == "Hostname" ]; then
        hostname=`echo $line | cut -d ':' -f 2 | xargs`
    fi

    if [ "$key" == "State" ]; then
        peer_state=`echo $line | cut -d ':' -f 2 | cut -d '(' -f 1 | xargs`
        conn_state=`echo $line | cut -d '(' -f 2 | cut -d ')' -f 1 | xargs`

        if [ "$peer_state" == "Peer in Cluster" ]; then
            if [ "$conn_state" == "Connected" ]; then
                ((number_of_connected_peers++))
                connected_peer[$number_of_connected_peers]=$hostname
            fi
        fi
    fi

done < <(gluster peer status)

# Include current node in connected peer list
((number_of_connected_peers++))
connected_peer[$number_of_connected_peers]=$local_node_hostname

# forming the create vol command
create_cmd="gluster --mode=script --wignore volume create \
            gluster_shared_storage replica $number_of_connected_peers"

# Adding the brick names in the command
for i in "${connected_peer[@]}"
do
    create_cmd=$create_cmd" "$i:"$GLUSTERD_WORKDIR"/ss_brick
done

if [ "$option" == "disable" ]; then
    # Unmount the volume on all the nodes
    umount /var/run/gluster/shared_storage
    cat /etc/fstab  | grep -v "gluster_shared_storage /var/run/gluster/shared_storage/" > /var/run/gluster/fstab.tmp
    mv /var/run/gluster/fstab.tmp /etc/fstab
fi

if [ "$is_originator" == 1 ]; then
    if [ "$option" == "enable" ]; then
        # Create and start the volume
        $create_cmd
        gluster --mode=script --wignore volume start gluster_shared_storage
    fi

    if [ "$option" == "disable" ]; then
        # Stop and delete the volume
        gluster --mode=script --wignore volume stop gluster_shared_storage
        gluster --mode=script --wignore volume delete gluster_shared_storage
    fi
fi

function check_volume_status()
{
    status=`gluster volume info gluster_shared_storage  | grep Status | cut -d ':' -f 2 | xargs`
    echo $status
}

mount_cmd="mount -t glusterfs "$local_node_hostname":/gluster_shared_storage \
           /var/run/gluster/shared_storage"

if [ "$option" == "enable" ]; then
    retry=0;
    # Wait for volume to start before mounting
    status=$(check_volume_status)
    while [ "$status" != "Started" ]; do
        sleep 5;
        ((retry++))
        if [ "$retry" == 3 ]; then
            break;
        fi
        status = check_volume_status;
    done
    # Mount the volume on all the nodes
    umount /var/run/gluster/shared_storage
    mkdir -p /var/run/gluster/shared_storage
    $mount_cmd
    cp /etc/fstab /var/run/gluster/fstab.tmp
    echo "$local_node_hostname:/gluster_shared_storage /var/run/gluster/shared_storage/ glusterfs defaults        0 0" >> /var/run/gluster/fstab.tmp
    mv /var/run/gluster/fstab.tmp /etc/fstab
fi