From 15ba102feba723b7707ab5efbd614e3da2c59555 Mon Sep 17 00:00:00 2001 From: Sanju Rakonde Date: Mon, 22 Apr 2019 18:10:05 +0530 Subject: tests: add .t file to increase cli code coverage updates: bz#1693692 Change-Id: I848e622d7b8562e864f0e208aafdc21d9cb757d3 Signed-off-by: Sanju Rakonde --- tests/basic/volume-status.t | 20 +++++++ tests/bugs/cli/bug-983317-volume-get.t | 13 ++++- .../line-coverage/cli-peer-and-volume-operations.t | 65 ++++++++++++++++++++++ 3 files changed, 97 insertions(+), 1 deletion(-) create mode 100644 tests/line-coverage/cli-peer-and-volume-operations.t (limited to 'tests') diff --git a/tests/basic/volume-status.t b/tests/basic/volume-status.t index a79e202d4ab..01d7ebf6c07 100644 --- a/tests/basic/volume-status.t +++ b/tests/basic/volume-status.t @@ -34,6 +34,7 @@ EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" nfs_up_status ## Mount FUSE TEST $GFS -s $H0 --volfile-id $V0 $M0; +TEST touch $M0/file{1..20} EXPECT_WITHIN $PROCESS_UP_TIMEOUT "6" gluster_fd_status @@ -57,6 +58,8 @@ function test_nfs_cmds () { for cmd in ${nfs_cmds[@]}; do $CLI volume status $V0 nfs $cmd (( ret += $? )) + $CLI volume status $V0 nfs $cmd --xml + (( ret += $? )) done return $ret } @@ -67,6 +70,8 @@ function test_shd_cmds () { for cmd in ${shd_cmds[@]}; do $CLI volume status $V0 shd $cmd (( ret += $? )) + $CLI volume status $V0 shd $cmd --xml + (( ret += $? )) done return $ret } @@ -78,14 +83,29 @@ function test_brick_cmds () { for i in {1..2}; do $CLI volume status $V0 $H0:$B0/${V0}$i $cmd (( ret += $? )) + $CLI volume status $V0 $H0:$B0/${V0}$i $cmd --xml + (( ret += $? )) done done return $ret } +function test_status_cmds () { + local ret=0 + declare -a cmds=("detail" "clients" "mem" "inode" "fd" "callpool" "tasks" "client-list") + for cmd in ${cmds[@]}; do + $CLI volume status $V0 $cmd + (( ret += $? )) + $CLI volume status $V0 $cmd --xml + (( ret += $? )) + done + return $ret +} + TEST test_shd_cmds; TEST test_nfs_cmds; TEST test_brick_cmds; +TEST test_status_cmds; ## Before killing daemon to avoid deadlocks diff --git a/tests/bugs/cli/bug-983317-volume-get.t b/tests/bugs/cli/bug-983317-volume-get.t index 8f09d588565..c793bbc9f0c 100644 --- a/tests/bugs/cli/bug-983317-volume-get.t +++ b/tests/bugs/cli/bug-983317-volume-get.t @@ -7,7 +7,8 @@ cleanup; TEST glusterd TEST pidof glusterd -TEST $CLI volume create $V0 $H0:$B0/$V0 +TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2}; +EXPECT 'Created' volinfo_field $V0 'Status'; # Set a volume option TEST $CLI volume set $V0 open-behind on @@ -32,3 +33,13 @@ EXPECT '80' volume_get_field $V0 'server-quorum-ratio' # Check user.* options can also be retrived using volume get EXPECT 'dummy' volume_get_field $V0 'user.metadata' + +TEST $CLI volume set all brick-multiplex enable +EXPECT 'enable' volume_get_field $V0 'brick-multiplex' + +TEST $CLI volume set all brick-multiplex disable +EXPECT 'disable' volume_get_field $V0 'brick-multiplex' + +#setting an cluster level option for single volume should fail +TEST ! $CLI volume set $V0 brick-multiplex enable + diff --git a/tests/line-coverage/cli-peer-and-volume-operations.t b/tests/line-coverage/cli-peer-and-volume-operations.t new file mode 100644 index 00000000000..81a849885a3 --- /dev/null +++ b/tests/line-coverage/cli-peer-and-volume-operations.t @@ -0,0 +1,65 @@ +#!/bin/bash + +. $(dirname $0)/../include.rc +. $(dirname $0)/../cluster.rc +. $(dirname $0)/../volume.rc + +function peer_count { +eval \$CLI_$1 peer status | grep 'Peer in Cluster (Connected)' | wc -l +} + +cleanup + +TEST launch_cluster 3 + +## basic peer commands +TEST $CLI_1 peer probe $H2 +EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count 1 +EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count 2 + +#probe a unreachable node +TEST kill_glusterd 3 +TEST ! $CLI_1 peer probe $H3 + +#detach a node which is not a part of cluster +TEST ! $CLI_1 peer detach $H3 +TEST ! $CLI_1 peer detach $H3 force + +TEST start_glusterd 3 +TEST $CLI_1 peer probe $H3 +EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count 1 +EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count 2 +EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count 3 + +# probe a node which is already part of cluster +TEST $CLI_1 peer probe $H3 + +#probe an invalid address +TEST ! $CLI_1 peer probe 1024.1024.1024.1024 + +TEST $CLI_1 pool list + +## all help commands +TEST $CLI_1 global help +TEST $CLI_1 help + +TEST $CLI_1 peer help +TEST $CLI_1 volume help +TEST $CLI_1 volume bitrot help +TEST $CLI_1 volume quota help +TEST $CLI_1 snapshot help + +## volume operations +TEST $CLI_1 volume create $V0 $H1:$B1/$V0 $H2:$B2/$V0 $H3:$B3/$V0 +# create a volume with already existing volume name +TEST ! $CLI_1 volume create $V0 $H1:$B1/$V1 $H2:$B2/$V1 +TEST $CLI_1 volume start $V0 + +# Mount the volume and create files +TEST glusterfs -s $H1 --volfile-id $V0 $M1 +TEST touch $M1/file{1..100} + +#fails because $V0 is not shd compatible +TEST ! $CLI_1 volume status $V0 shd + +cleanup -- cgit