summaryrefslogtreecommitdiffstats
path: root/extras
diff options
context:
space:
mode:
authorAvra Sengupta <asengupt@redhat.com>2015-05-14 15:00:59 +0530
committerKrishnan Parthasarathi <kparthas@redhat.com>2015-06-04 02:37:19 -0700
commit402589f58cbb350dfedafa83e133664855ed37b2 (patch)
tree0c81042fa7cfc15a636f4fc353c6f385d213e062 /extras
parentc2898f040937492c69a603ab3605cbd441e1e1f3 (diff)
glusterd/shared_storage: Provide a volume set option to create and mount the shared storage
Introducing a global volume set option(cluster.enable-shared-storage) which helps create and set-up the shared storage meta volume. gluster volume set all cluster.enable-shared-storage enable On enabling this option, the system analyzes the number of peers in the cluster, which are currently connected, and chooses three such peers(including the node the command is issued from). From these peers a volume(gluster_shared_storage) is created. Depending on the number of peers available the volume is either a replica 3 volume(if there are 3 connected peers), or a replica 2 volume(if there are 2 connected peers). "/var/run/gluster/ss_brick" serves as the brick path on each node for the shared storage volume. We also mount the shared storage at "/var/run/gluster/shared_storage" on all the nodes in the cluster as part of enabling this option. If there is only one node in the cluster, or only one node is up then the command will fail Once the volume is created, and mounted the maintainance of the volume like adding-bricks, removing bricks etc., is expected to be the onus of the user. On disabling the option, we provide the user a warning, and on affirmation from the user we stop the shared storage volume, and unmount it from all the nodes in the cluster. gluster volume set all cluster.enable-shared-storage disable Change-Id: Idd92d67b93f444244f99ede9f634ef18d2945dbc BUG: 1222013 Signed-off-by: Avra Sengupta <asengupt@redhat.com> Reviewed-on: http://review.gluster.org/10793 Tested-by: Gluster Build System <jenkins@build.gluster.com> Tested-by: NetBSD Build System <jenkins@build.gluster.org> Reviewed-by: Rajesh Joseph <rjoseph@redhat.com> Reviewed-by: Krishnan Parthasarathi <kparthas@redhat.com>
Diffstat (limited to 'extras')
-rw-r--r--extras/hook-scripts/set/post/Makefile.am2
-rwxr-xr-xextras/hook-scripts/set/post/S32gluster_enable_shared_storage.sh124
2 files changed, 125 insertions, 1 deletions
diff --git a/extras/hook-scripts/set/post/Makefile.am b/extras/hook-scripts/set/post/Makefile.am
index 3ec25d94134..99dfaa3eafb 100644
--- a/extras/hook-scripts/set/post/Makefile.am
+++ b/extras/hook-scripts/set/post/Makefile.am
@@ -1 +1 @@
-EXTRA_DIST = S30samba-set.sh S31ganesha-set.sh
+EXTRA_DIST = S30samba-set.sh S31ganesha-set.sh S32gluster_enable_shared_storage.sh
diff --git a/extras/hook-scripts/set/post/S32gluster_enable_shared_storage.sh b/extras/hook-scripts/set/post/S32gluster_enable_shared_storage.sh
new file mode 100755
index 00000000000..28fa0e53316
--- /dev/null
+++ b/extras/hook-scripts/set/post/S32gluster_enable_shared_storage.sh
@@ -0,0 +1,124 @@
+#!/bin/bash
+
+key=`echo $3 | cut -d '=' -f 1`
+val=`echo $3 | cut -d '=' -f 2`
+if [ "$key" != "cluster.enable-shared-storage" ]; then
+ exit;
+fi
+if [ "$val" != 'enable' ]; then
+ if [ "$val" != 'disable' ]; then
+ exit;
+ fi
+fi
+
+option=$val
+
+key_val_pair1=`echo $4 | cut -d ',' -f 1`
+key_val_pair2=`echo $4 | cut -d ',' -f 2`
+
+key=`echo $key_val_pair1 | cut -d '=' -f 1`
+val=`echo $key_val_pair1 | cut -d '=' -f 2`
+if [ "$key" != "is_originator" ]; then
+ exit;
+fi
+is_originator=$val;
+
+key=`echo $key_val_pair2 | cut -d '=' -f 1`
+val=`echo $key_val_pair2 | cut -d '=' -f 2`
+if [ "$key" != "local_node_hostname" ]; then
+ exit;
+fi
+local_node_hostname=$val;
+
+# Read gluster peer status to find the peers
+# which are in 'Peer in Cluster' mode and
+# are connected.
+
+number_of_connected_peers=0
+while read -r line
+do
+ # Already got two connected peers. Including the current node
+ # we have 3 peers which is enough to create a shared storage
+ # with replica 3
+ if [ "$number_of_connected_peers" == "2" ]; then
+ break;
+ fi
+
+ key=`echo $line | cut -d ':' -f 1`
+ if [ "$key" == "Hostname" ]; then
+ hostname=`echo $line | cut -d ':' -f 2 | xargs`
+ fi
+
+ if [ "$key" == "State" ]; then
+ peer_state=`echo $line | cut -d ':' -f 2 | cut -d '(' -f 1 | xargs`
+ conn_state=`echo $line | cut -d '(' -f 2 | cut -d ')' -f 1 | xargs`
+
+ if [ "$peer_state" == "Peer in Cluster" ]; then
+ if [ "$conn_state" == "Connected" ]; then
+ ((number_of_connected_peers++))
+ connected_peer[$number_of_connected_peers]=$hostname
+ fi
+ fi
+ fi
+
+done < <(gluster peer status)
+
+# Include current node in connected peer list
+((number_of_connected_peers++))
+connected_peer[$number_of_connected_peers]=$local_node_hostname
+
+# forming the create vol command
+create_cmd="gluster --mode=script --wignore volume create \
+ gluster_shared_storage replica $number_of_connected_peers"
+
+# Adding the brick names in the command
+for i in "${connected_peer[@]}"
+do
+ create_cmd=$create_cmd" "$i:/var/run/gluster/ss_brick
+done
+
+if [ "$option" == "disable" ]; then
+ # Unmount the volume on all the nodes
+ umount /var/run/gluster/shared_storage
+fi
+
+if [ "$is_originator" == 1 ]; then
+ if [ "$option" == "enable" ]; then
+ # Create and start the volume
+ $create_cmd
+ gluster --mode=script --wignore volume start gluster_shared_storage
+ fi
+
+ if [ "$option" == "disable" ]; then
+ # Stop and delete the volume
+ gluster --mode=script --wignore volume stop gluster_shared_storage
+ gluster --mode=script --wignore volume delete gluster_shared_storage
+ fi
+fi
+
+function check_volume_status()
+{
+ status=`gluster volume info gluster_shared_storage | grep Status | cut -d ':' -f 2 | xargs`
+ echo $status
+}
+
+mount_cmd="mount -t glusterfs "$local_node_hostname":/gluster_shared_storage \
+ /var/run/gluster/shared_storage"
+
+if [ "$option" == "enable" ]; then
+ retry=0;
+ # Wait for volume to start before mounting
+ status=$(check_volume_status)
+ while [ "$status" != "Started" ]; do
+ sleep 5;
+ ((retry++))
+ if [ "$retry" == 3 ]; then
+ break;
+ fi
+ status = check_volume_status;
+ done
+ # Mount the volume on all the nodes
+ umount /var/run/gluster/shared_storage
+ mkdir -p /var/run/gluster/shared_storage
+ $mount_cmd
+fi