From 64954eb3c58f4ef077e54e8a3726fd2d27419b12 Mon Sep 17 00:00:00 2001 From: Niels de Vos Date: Fri, 26 Dec 2014 12:57:48 +0100 Subject: tests: move all test-cases into component subdirectories There are around 300 regression tests, 250 being in tests/bugs. Running partial set of tests/bugs is not easy because this is a flat directory with almost all tests inside. It would be valuable to make partial test/bugs easier, and allow the use of mulitple build hosts for a single commit, each running a subset of the tests for a quicker result. Additional changes made: - correct the include path for *.rc shell libraries and *.py utils - make the testcases pass checkpatch - arequal-checksum in afr/self-heal.t was never executed, now it is - include.rc now complains loudly if it fails to find env.rc Change-Id: I26ffd067e9853d3be1fd63b2f37d8aa0fd1b4fea BUG: 1178685 Reported-by: Emmanuel Dreyfus Reported-by: Atin Mukherjee URL: http://www.gluster.org/pipermail/gluster-devel/2014-December/043414.html Signed-off-by: Niels de Vos Reviewed-on: http://review.gluster.org/9353 Reviewed-by: Kaleb KEITHLEY Reviewed-by: Emmanuel Dreyfus Tested-by: Gluster Build System Reviewed-by: Vijay Bellur --- tests/bugs/859927/repl.t | 69 ------- tests/bugs/886998/strict-readdir.t | 52 ------ tests/bugs/949327.t | 23 --- tests/bugs/access-control/bug-1051896.c | 97 ++++++++++ tests/bugs/access-control/bug-1051896.t | 34 ++++ .../bugs/access-control/bug-887098-gmount-crash.t | 42 +++++ tests/bugs/access-control/bug-958691.t | 49 +++++ tests/bugs/afr-quota-xattr-mdata-heal.t | 138 -------------- tests/bugs/brick-uid-reset-on-volume-restart.t | 54 ------ tests/bugs/bug-000000.t | 9 - tests/bugs/bug-1002207.t | 53 ------ tests/bugs/bug-1002556.t | 25 --- tests/bugs/bug-1004218.t | 26 --- tests/bugs/bug-1004744.t | 46 ----- tests/bugs/bug-1015990-rep.t | 80 -------- tests/bugs/bug-1015990.t | 95 ---------- tests/bugs/bug-1022055.t | 26 --- tests/bugs/bug-1022905.t | 39 ---- tests/bugs/bug-1023974.t | 35 ---- tests/bugs/bug-1027171.t | 53 ------ tests/bugs/bug-1030208.t | 35 ---- tests/bugs/bug-1030580.t | 48 ----- tests/bugs/bug-1032894.t | 33 ---- tests/bugs/bug-1032927.t | 32 ---- tests/bugs/bug-1034085.t | 31 ---- tests/bugs/bug-1034716.t | 60 ------ tests/bugs/bug-1035576.t | 52 ------ tests/bugs/bug-1037501.t | 104 ----------- tests/bugs/bug-1038598.t | 80 -------- tests/bugs/bug-1040408.t | 31 ---- tests/bugs/bug-1040423.t | 72 -------- tests/bugs/bug-1042725.t | 49 ----- tests/bugs/bug-1043886.t | 55 ------ tests/bugs/bug-1045333.t | 44 ----- tests/bugs/bug-1046308.t | 19 -- tests/bugs/bug-1046624.t | 46 ----- tests/bugs/bug-1047378.t | 12 -- tests/bugs/bug-1047416.t | 66 ------- tests/bugs/bug-1047955.t | 23 --- tests/bugs/bug-1049323.t | 64 ------- tests/bugs/bug-1049834.t | 44 ----- tests/bugs/bug-1051896.c | 94 ---------- tests/bugs/bug-1051896.t | 34 ---- tests/bugs/bug-1053579.t | 111 ------------ tests/bugs/bug-1058663.c | 119 ------------ tests/bugs/bug-1058663.t | 28 --- tests/bugs/bug-1058797.t | 45 ----- tests/bugs/bug-1063230.t | 29 --- tests/bugs/bug-1064768.t | 20 -- tests/bugs/bug-1066798.t | 86 --------- tests/bugs/bug-1070734.t | 74 -------- tests/bugs/bug-1075087.t | 33 ---- tests/bugs/bug-1077682.t | 34 ---- tests/bugs/bug-1085330.t | 80 -------- tests/bugs/bug-1086228.t | 34 ---- tests/bugs/bug-1087198.t | 77 -------- tests/bugs/bug-1087203.t | 103 ----------- tests/bugs/bug-1087487.t | 23 --- tests/bugs/bug-1088231.t | 161 ----------------- tests/bugs/bug-1089668.t | 27 --- tests/bugs/bug-1090042.t | 30 --- ...091935-brick-order-check-from-cli-to-glusterd.t | 27 --- tests/bugs/bug-1092841.t | 24 --- tests/bugs/bug-1095097.t | 21 --- tests/bugs/bug-1099890.t | 125 ------------- tests/bugs/bug-1100050.t | 25 --- tests/bugs/bug-1101647.t | 29 --- tests/bugs/bug-1102656.t | 20 -- tests/bugs/bug-1104642.t | 47 ----- tests/bugs/bug-1104692.t | 32 ---- tests/bugs/bug-1109741-auth-mgmt-handshake.t | 50 ----- tests/bugs/bug-1109770.t | 65 ------- tests/bugs/bug-1109889.t | 74 -------- tests/bugs/bug-1110917.t | 39 ---- tests/bugs/bug-1111041.t | 36 ---- tests/bugs/bug-1111454.t | 18 -- tests/bugs/bug-1111490.t | 34 ---- tests/bugs/bug-1111557.t | 12 -- tests/bugs/bug-1112559.t | 61 ------- tests/bugs/bug-1112613.t | 49 ----- tests/bugs/bug-1113476.t | 44 ----- tests/bugs/bug-1113975.t | 38 ---- tests/bugs/bug-1116503.t | 45 ----- tests/bugs/bug-1117851.t | 95 ---------- tests/bugs/bug-1117951.t | 24 --- tests/bugs/bug-1119582.t | 24 --- tests/bugs/bug-1120647.t | 17 -- tests/bugs/bug-1122443.t | 59 ------ tests/bugs/bug-1125824.t | 100 ---------- tests/bugs/bug-1126048.c | 37 ---- tests/bugs/bug-1126048.t | 30 --- tests/bugs/bug-1130892.t | 60 ------ tests/bugs/bug-1132102.t | 28 --- tests/bugs/bug-1134691-afr-lookup-metadata-heal.t | 50 ----- .../bug-1135514-allow-setxattr-with-null-value.t | 18 -- tests/bugs/bug-1139230.t | 58 ------ ...hot-and-features-encryption-option-validation.t | 33 ---- ...ug-1155042-dont-display-deactivated-snapshots.t | 36 ---- tests/bugs/bug-1157223-symlink-mounting.t | 124 ------------- tests/bugs/bug-1157991.t | 30 --- tests/bugs/bug-1161092-nfs-acls.t | 36 ---- tests/bugs/bug-1161156.t | 54 ------ tests/bugs/bug-1162462.t | 38 ---- tests/bugs/bug-1162498.t | 56 ------ tests/bugs/bug-1164613.t | 34 ---- tests/bugs/bug-1166197.t | 48 ----- ...7580-set-proper-uid-and-gid-during-nfs-access.t | 201 --------------------- .../bugs/bug-1168803-snapd-option-validation-fix.t | 30 --- tests/bugs/bug-1168875.t | 96 ---------- .../bugs/bug-1173414-mgmt-v3-remote-lock-failure.t | 34 ---- tests/bugs/bug-1178079.t | 24 --- tests/bugs/bug-762989.t | 40 ---- tests/bugs/bug-764638.t | 13 -- tests/bugs/bug-765230.t | 60 ------ tests/bugs/bug-765380.t | 39 ---- tests/bugs/bug-765473.t | 35 ---- tests/bugs/bug-765564.t | 86 --------- tests/bugs/bug-767095.t | 51 ------ tests/bugs/bug-767585-gfid.t | 42 ----- tests/bugs/bug-770655.t | 168 ----------------- tests/bugs/bug-782095.t | 48 ----- tests/bugs/bug-797171.t | 41 ----- tests/bugs/bug-802417.t | 108 ----------- tests/bugs/bug-808400-dist.t | 32 ---- tests/bugs/bug-808400-fcntl.c | 117 ------------ tests/bugs/bug-808400-flock.c | 96 ---------- tests/bugs/bug-808400-repl.t | 31 ---- tests/bugs/bug-808400-stripe.t | 32 ---- tests/bugs/bug-808400.t | 35 ---- tests/bugs/bug-811493.t | 18 -- tests/bugs/bug-821056.t | 52 ------ tests/bugs/bug-822830.t | 64 ------- tests/bugs/bug-823081.t | 41 ----- tests/bugs/bug-824753-file-locker.c | 42 ----- tests/bugs/bug-824753.t | 45 ----- tests/bugs/bug-830665.t | 120 ------------ tests/bugs/bug-834465.c | 61 ------- tests/bugs/bug-834465.t | 48 ----- tests/bugs/bug-839595.t | 31 ---- tests/bugs/bug-844688.t | 34 ---- tests/bugs/bug-845213.t | 19 -- tests/bugs/bug-846240.t | 58 ------ tests/bugs/bug-847622.t | 36 ---- tests/bugs/bug-847624.t | 25 --- tests/bugs/bug-848251.t | 51 ------ tests/bugs/bug-852147.t | 85 --------- tests/bugs/bug-853258.t | 45 ----- tests/bugs/bug-853680.t | 53 ------ tests/bugs/bug-853690.t | 91 ---------- tests/bugs/bug-856455.t | 42 ----- tests/bugs/bug-857330/common.rc | 55 ------ tests/bugs/bug-857330/normal.t | 79 -------- tests/bugs/bug-857330/xml.t | 103 ----------- tests/bugs/bug-858215.t | 78 -------- tests/bugs/bug-858242.c | 81 --------- tests/bugs/bug-858242.t | 28 --- tests/bugs/bug-858488-min-free-disk.t | 108 ----------- tests/bugs/bug-859581.t | 53 ------ tests/bugs/bug-859927.t | 70 ------- tests/bugs/bug-860297.t | 13 -- tests/bugs/bug-860663.c | 42 ----- tests/bugs/bug-860663.t | 54 ------ tests/bugs/bug-861015-index.t | 36 ---- tests/bugs/bug-861015-log.t | 29 --- tests/bugs/bug-861542.t | 50 ----- tests/bugs/bug-862834.t | 46 ----- tests/bugs/bug-862967.t | 59 ------ tests/bugs/bug-864222.t | 27 --- tests/bugs/bug-865825.t | 82 --------- tests/bugs/bug-866459.t | 45 ----- tests/bugs/bug-867252.t | 41 ----- tests/bugs/bug-867253.t | 69 ------- tests/bugs/bug-869724.t | 37 ---- tests/bugs/bug-872923.t | 56 ------ tests/bugs/bug-873367.t | 45 ----- tests/bugs/bug-873549.t | 17 -- tests/bugs/bug-873962-spb.t | 39 ---- tests/bugs/bug-873962.t | 107 ----------- tests/bugs/bug-874498.t | 64 ------- tests/bugs/bug-877293.t | 41 ----- tests/bugs/bug-877885.t | 36 ---- tests/bugs/bug-877992.t | 61 ------- tests/bugs/bug-878004.t | 29 --- tests/bugs/bug-879490.t | 37 ---- tests/bugs/bug-879494.t | 37 ---- tests/bugs/bug-880898.t | 23 --- tests/bugs/bug-882278.t | 73 -------- tests/bugs/bug-884328.t | 12 -- tests/bugs/bug-884452.t | 47 ----- tests/bugs/bug-884455.t | 84 --------- tests/bugs/bug-884597.t | 173 ------------------ tests/bugs/bug-886998.t | 52 ------ tests/bugs/bug-887098-gmount-crash.t | 42 ----- tests/bugs/bug-887145.t | 88 --------- tests/bugs/bug-888174.t | 62 ------- tests/bugs/bug-888752.t | 24 --- tests/bugs/bug-889630.t | 56 ------ tests/bugs/bug-889996.t | 19 -- tests/bugs/bug-892730.t | 77 -------- tests/bugs/bug-893338.t | 34 ---- tests/bugs/bug-893378.t | 73 -------- tests/bugs/bug-895235.t | 23 --- tests/bugs/bug-896431.t | 124 ------------- tests/bugs/bug-902610.t | 65 ------- tests/bugs/bug-903336.t | 13 -- tests/bugs/bug-904065.t | 91 ---------- tests/bugs/bug-904300.t | 62 ------- tests/bugs/bug-905307.t | 36 ---- tests/bugs/bug-905864.c | 82 --------- tests/bugs/bug-905864.t | 32 ---- tests/bugs/bug-906646.t | 93 ---------- tests/bugs/bug-907072.t | 47 ----- tests/bugs/bug-908146.t | 39 ---- tests/bugs/bug-912297.t | 44 ----- tests/bugs/bug-912564.t | 92 ---------- tests/bugs/bug-913051.t | 67 ------- tests/bugs/bug-913487.t | 14 -- tests/bugs/bug-913544.t | 24 --- tests/bugs/bug-913555.t | 54 ------ tests/bugs/bug-915280.t | 51 ------ tests/bugs/bug-915554.t | 76 -------- tests/bugs/bug-916226.t | 26 --- tests/bugs/bug-916549.t | 19 -- tests/bugs/bug-918437-sh-mtime.t | 71 -------- tests/bugs/bug-921072.t | 124 ------------- tests/bugs/bug-921215.t | 13 -- tests/bugs/bug-921231.t | 31 ---- tests/bugs/bug-921408.t | 90 --------- tests/bugs/bug-924075.t | 23 --- tests/bugs/bug-924265.t | 35 ---- tests/bugs/bug-924726.t | 45 ----- tests/bugs/bug-927616.t | 62 ------- tests/bugs/bug-948686.t | 46 ----- tests/bugs/bug-948729/bug-948729-force.t | 103 ----------- tests/bugs/bug-948729/bug-948729-mode-script.t | 77 -------- tests/bugs/bug-948729/bug-948729.t | 80 -------- tests/bugs/bug-949242.t | 55 ------ tests/bugs/bug-949298.t | 12 -- tests/bugs/bug-949930.t | 27 --- tests/bugs/bug-954057.t | 44 ----- tests/bugs/bug-955588.t | 27 --- tests/bugs/bug-957877.t | 33 ---- tests/bugs/bug-958691.t | 49 ----- tests/bugs/bug-958790.t | 21 --- tests/bugs/bug-961307.t | 20 -- tests/bugs/bug-961615.t | 34 ---- tests/bugs/bug-961669.t | 48 ----- tests/bugs/bug-963541.t | 33 ---- tests/bugs/bug-963678.t | 57 ------ tests/bugs/bug-964059.t | 30 --- tests/bugs/bug-966018.t | 35 ---- tests/bugs/bug-969193.t | 13 -- tests/bugs/bug-970070.t | 13 -- tests/bugs/bug-973073.t | 48 ----- tests/bugs/bug-974007.t | 52 ------ tests/bugs/bug-974972.t | 37 ---- tests/bugs/bug-976800.t | 28 --- tests/bugs/bug-977246.t | 21 --- tests/bugs/bug-977797.t | 95 ---------- tests/bugs/bug-978794.t | 29 --- tests/bugs/bug-979365.t | 47 ----- tests/bugs/bug-982174.t | 36 ---- tests/bugs/bug-983317.t | 25 --- tests/bugs/bug-983477.t | 53 ------ tests/bugs/bug-985074.t | 55 ------ tests/bugs/bug-986429.t | 19 -- tests/bugs/bug-986905.t | 27 --- tests/bugs/bug-990028.t | 155 ---------------- tests/bugs/bug-991622.t | 35 ---- tests/bugs/cli/bug-1004218.t | 26 +++ tests/bugs/cli/bug-1022905.t | 39 ++++ tests/bugs/cli/bug-1030580.t | 48 +++++ tests/bugs/cli/bug-1047378.t | 12 ++ tests/bugs/cli/bug-1047416.t | 66 +++++++ tests/bugs/cli/bug-1077682.t | 34 ++++ tests/bugs/cli/bug-1087487.t | 23 +++ tests/bugs/cli/bug-1113476.t | 44 +++++ tests/bugs/cli/bug-764638.t | 13 ++ tests/bugs/cli/bug-770655.t | 168 +++++++++++++++++ tests/bugs/cli/bug-822830.t | 64 +++++++ tests/bugs/cli/bug-867252.t | 41 +++++ tests/bugs/cli/bug-921215.t | 13 ++ tests/bugs/cli/bug-949298.t | 12 ++ tests/bugs/cli/bug-961307.t | 20 ++ tests/bugs/cli/bug-969193.t | 13 ++ tests/bugs/cli/bug-977246.t | 21 +++ tests/bugs/cli/bug-982174.t | 36 ++++ tests/bugs/cli/bug-983317.t | 25 +++ tests/bugs/core/949327.t | 23 +++ tests/bugs/core/bug-1110917.t | 39 ++++ tests/bugs/core/bug-1111557.t | 12 ++ tests/bugs/core/bug-1117951.t | 24 +++ tests/bugs/core/bug-1119582.t | 24 +++ .../bug-1135514-allow-setxattr-with-null-value.t | 18 ++ .../core/bug-1168803-snapd-option-validation-fix.t | 30 +++ tests/bugs/core/bug-1168875.t | 96 ++++++++++ tests/bugs/core/bug-834465.c | 61 +++++++ tests/bugs/core/bug-834465.t | 48 +++++ tests/bugs/core/bug-845213.t | 19 ++ tests/bugs/core/bug-903336.t | 13 ++ tests/bugs/core/bug-908146.t | 39 ++++ tests/bugs/core/bug-913544.t | 24 +++ tests/bugs/core/bug-924075.t | 23 +++ tests/bugs/core/bug-927616.t | 62 +++++++ tests/bugs/core/bug-949242.t | 55 ++++++ tests/bugs/core/bug-986429.t | 19 ++ tests/bugs/distribute/bug-1042725.t | 49 +++++ tests/bugs/distribute/bug-1063230.t | 29 +++ tests/bugs/distribute/bug-1066798.t | 86 +++++++++ tests/bugs/distribute/bug-1086228.t | 34 ++++ tests/bugs/distribute/bug-1088231.t | 161 +++++++++++++++++ tests/bugs/distribute/bug-1099890.t | 125 +++++++++++++ tests/bugs/distribute/bug-1117851.t | 95 ++++++++++ tests/bugs/distribute/bug-1122443.t | 59 ++++++ tests/bugs/distribute/bug-1125824.t | 100 ++++++++++ tests/bugs/distribute/bug-1161156.t | 54 ++++++ tests/bugs/distribute/bug-853258.t | 45 +++++ tests/bugs/distribute/bug-860663.c | 43 +++++ tests/bugs/distribute/bug-860663.t | 54 ++++++ tests/bugs/distribute/bug-862967.t | 59 ++++++ tests/bugs/distribute/bug-882278.t | 73 ++++++++ tests/bugs/distribute/bug-884455.t | 84 +++++++++ tests/bugs/distribute/bug-884597.t | 173 ++++++++++++++++++ tests/bugs/distribute/bug-907072.t | 47 +++++ tests/bugs/distribute/bug-912564.t | 92 ++++++++++ tests/bugs/distribute/bug-915554.t | 76 ++++++++ tests/bugs/distribute/bug-921408.t | 90 +++++++++ tests/bugs/distribute/bug-924265.t | 35 ++++ tests/bugs/distribute/bug-961615.t | 34 ++++ tests/bugs/distribute/bug-973073.t | 48 +++++ tests/bugs/distribute/overlap.py | 59 ++++++ tests/bugs/error-gen/bug-767095.t | 51 ++++++ tests/bugs/fuse/bug-1030208.t | 35 ++++ tests/bugs/fuse/bug-1126048.c | 37 ++++ tests/bugs/fuse/bug-1126048.t | 30 +++ tests/bugs/fuse/bug-858215.t | 78 ++++++++ tests/bugs/fuse/bug-858488-min-free-disk.t | 108 +++++++++++ tests/bugs/fuse/bug-924726.t | 45 +++++ tests/bugs/fuse/bug-963678.t | 57 ++++++ tests/bugs/fuse/bug-983477.t | 53 ++++++ tests/bugs/fuse/bug-985074.t | 55 ++++++ tests/bugs/geo-replication/bug-1111490.t | 34 ++++ tests/bugs/geo-replication/bug-877293.t | 41 +++++ tests/bugs/getlk_owner.c | 120 ------------ tests/bugs/glusterd/859927/repl.t | 69 +++++++ tests/bugs/glusterd/bug-000000.t | 9 + tests/bugs/glusterd/bug-1002556.t | 25 +++ tests/bugs/glusterd/bug-1004744.t | 46 +++++ tests/bugs/glusterd/bug-1022055.t | 26 +++ tests/bugs/glusterd/bug-1027171.t | 53 ++++++ tests/bugs/glusterd/bug-1040408.t | 31 ++++ tests/bugs/glusterd/bug-1046308.t | 19 ++ tests/bugs/glusterd/bug-1047955.t | 23 +++ tests/bugs/glusterd/bug-1070734.t | 74 ++++++++ tests/bugs/glusterd/bug-1075087.t | 33 ++++ tests/bugs/glusterd/bug-1085330.t | 80 ++++++++ tests/bugs/glusterd/bug-1087203.t | 103 +++++++++++ tests/bugs/glusterd/bug-1089668.t | 27 +++ tests/bugs/glusterd/bug-1090042.t | 30 +++ ...091935-brick-order-check-from-cli-to-glusterd.t | 27 +++ tests/bugs/glusterd/bug-1092841.t | 24 +++ tests/bugs/glusterd/bug-1095097.t | 21 +++ tests/bugs/glusterd/bug-1102656.t | 20 ++ tests/bugs/glusterd/bug-1104642.t | 47 +++++ .../glusterd/bug-1109741-auth-mgmt-handshake.t | 50 +++++ tests/bugs/glusterd/bug-1109770.t | 65 +++++++ tests/bugs/glusterd/bug-1109889.t | 74 ++++++++ tests/bugs/glusterd/bug-1111041.t | 36 ++++ tests/bugs/glusterd/bug-1112559.t | 61 +++++++ tests/bugs/glusterd/bug-1112613.t | 49 +++++ tests/bugs/glusterd/bug-1113975.t | 38 ++++ tests/bugs/glusterd/bug-1120647.t | 17 ++ ...hot-and-features-encryption-option-validation.t | 33 ++++ .../bug-1173414-mgmt-v3-remote-lock-failure.t | 34 ++++ tests/bugs/glusterd/bug-765230.t | 60 ++++++ tests/bugs/glusterd/bug-782095.t | 48 +++++ tests/bugs/glusterd/bug-824753-file-locker.c | 42 +++++ tests/bugs/glusterd/bug-824753.t | 45 +++++ tests/bugs/glusterd/bug-839595.t | 31 ++++ tests/bugs/glusterd/bug-857330/common.rc | 55 ++++++ tests/bugs/glusterd/bug-857330/normal.t | 79 ++++++++ tests/bugs/glusterd/bug-857330/xml.t | 103 +++++++++++ tests/bugs/glusterd/bug-859927.t | 70 +++++++ tests/bugs/glusterd/bug-862834.t | 46 +++++ tests/bugs/glusterd/bug-878004.t | 29 +++ tests/bugs/glusterd/bug-888752.t | 24 +++ tests/bugs/glusterd/bug-889630.t | 56 ++++++ tests/bugs/glusterd/bug-905307.t | 36 ++++ tests/bugs/glusterd/bug-913487.t | 14 ++ tests/bugs/glusterd/bug-913555.t | 54 ++++++ tests/bugs/glusterd/bug-916549.t | 19 ++ tests/bugs/glusterd/bug-948686.t | 46 +++++ tests/bugs/glusterd/bug-948729/bug-948729-force.t | 103 +++++++++++ .../glusterd/bug-948729/bug-948729-mode-script.t | 77 ++++++++ tests/bugs/glusterd/bug-948729/bug-948729.t | 80 ++++++++ tests/bugs/glusterd/bug-949930.t | 27 +++ tests/bugs/glusterd/bug-955588.t | 27 +++ tests/bugs/glusterd/bug-958790.t | 21 +++ tests/bugs/glusterd/bug-961669.t | 48 +++++ tests/bugs/glusterd/bug-963541.t | 33 ++++ tests/bugs/glusterd/bug-964059.t | 30 +++ tests/bugs/glusterd/bug-974007.t | 52 ++++++ tests/bugs/glusterfs-server/bug-852147.t | 85 +++++++++ tests/bugs/glusterfs-server/bug-861542.t | 50 +++++ tests/bugs/glusterfs-server/bug-864222.t | 27 +++ tests/bugs/glusterfs-server/bug-873549.t | 17 ++ tests/bugs/glusterfs-server/bug-877992.t | 61 +++++++ tests/bugs/glusterfs-server/bug-887145.t | 88 +++++++++ tests/bugs/glusterfs-server/bug-889996.t | 19 ++ tests/bugs/glusterfs-server/bug-904300.t | 62 +++++++ tests/bugs/glusterfs-server/bug-905864.c | 82 +++++++++ tests/bugs/glusterfs-server/bug-905864.t | 32 ++++ tests/bugs/glusterfs-server/bug-912297.t | 44 +++++ tests/bugs/glusterfs/bug-811493.t | 18 ++ tests/bugs/glusterfs/bug-844688.t | 34 ++++ tests/bugs/glusterfs/bug-848251.t | 51 ++++++ tests/bugs/glusterfs/bug-853690.t | 91 ++++++++++ tests/bugs/glusterfs/bug-856455.t | 42 +++++ tests/bugs/glusterfs/bug-860297.t | 13 ++ tests/bugs/glusterfs/bug-861015-index.t | 36 ++++ tests/bugs/glusterfs/bug-861015-log.t | 29 +++ tests/bugs/glusterfs/bug-866459.t | 45 +++++ tests/bugs/glusterfs/bug-867253.t | 69 +++++++ tests/bugs/glusterfs/bug-869724.t | 37 ++++ tests/bugs/glusterfs/bug-872923.t | 56 ++++++ tests/bugs/glusterfs/bug-873962-spb.t | 39 ++++ tests/bugs/glusterfs/bug-873962.t | 107 +++++++++++ tests/bugs/glusterfs/bug-879490.t | 37 ++++ tests/bugs/glusterfs/bug-879494.t | 37 ++++ tests/bugs/glusterfs/bug-892730.t | 77 ++++++++ tests/bugs/glusterfs/bug-893338.t | 34 ++++ tests/bugs/glusterfs/bug-893378.t | 73 ++++++++ tests/bugs/glusterfs/bug-895235.t | 23 +++ tests/bugs/glusterfs/bug-896431.t | 124 +++++++++++++ tests/bugs/glusterfs/bug-902610.t | 65 +++++++ tests/bugs/glusterfs/bug-906646.t | 93 ++++++++++ tests/bugs/glusterfs/getlk_owner.c | 120 ++++++++++++ tests/bugs/io-cache/bug-858242.c | 81 +++++++++ tests/bugs/io-cache/bug-858242.t | 28 +++ tests/bugs/libgfapi/bug-1032894.t | 33 ++++ tests/bugs/logging/bug-823081.t | 41 +++++ tests/bugs/nfs/bug-1053579.t | 111 ++++++++++++ tests/bugs/nfs/bug-1116503.t | 45 +++++ tests/bugs/nfs/bug-1157223-symlink-mounting.t | 124 +++++++++++++ tests/bugs/nfs/bug-1161092-nfs-acls.t | 36 ++++ tests/bugs/nfs/bug-847622.t | 36 ++++ tests/bugs/nfs/bug-877885.t | 36 ++++ tests/bugs/nfs/bug-904065.t | 91 ++++++++++ tests/bugs/nfs/bug-915280.t | 51 ++++++ tests/bugs/nfs/bug-970070.t | 13 ++ tests/bugs/nfs/bug-974972.t | 37 ++++ tests/bugs/overlap.py | 59 ------ tests/bugs/posix/bug-1034716.t | 60 ++++++ ...bug-1040275-brick-uid-reset-on-volume-restart.t | 54 ++++++ tests/bugs/posix/bug-765380.t | 39 ++++ tests/bugs/posix/bug-990028.t | 155 ++++++++++++++++ tests/bugs/protocol/bug-762989.t | 40 ++++ tests/bugs/protocol/bug-808400-dist.t | 32 ++++ tests/bugs/protocol/bug-808400-fcntl.c | 117 ++++++++++++ tests/bugs/protocol/bug-808400-flock.c | 96 ++++++++++ tests/bugs/protocol/bug-808400-repl.t | 31 ++++ tests/bugs/protocol/bug-808400-stripe.t | 32 ++++ tests/bugs/protocol/bug-808400.t | 35 ++++ tests/bugs/quick-read/bug-846240.t | 58 ++++++ tests/bugs/quota/afr-quota-xattr-mdata-heal.t | 138 ++++++++++++++ tests/bugs/quota/bug-1023974.t | 35 ++++ tests/bugs/quota/bug-1035576.t | 52 ++++++ tests/bugs/quota/bug-1038598.t | 80 ++++++++ tests/bugs/quota/bug-1040423.t | 72 ++++++++ tests/bugs/quota/bug-1049323.t | 64 +++++++ tests/bugs/quota/bug-1087198.t | 77 ++++++++ tests/bugs/quota/bug-1100050.t | 25 +++ tests/bugs/quota/bug-1104692.t | 32 ++++ tests/bugs/rdma/bug-765473.t | 35 ++++ tests/bugs/replicate/886998/strict-readdir.t | 52 ++++++ tests/bugs/replicate/bug-1015990-rep.t | 80 ++++++++ tests/bugs/replicate/bug-1015990.t | 95 ++++++++++ tests/bugs/replicate/bug-1032927.t | 32 ++++ tests/bugs/replicate/bug-1037501.t | 104 +++++++++++ tests/bugs/replicate/bug-1046624.t | 46 +++++ tests/bugs/replicate/bug-1058797.t | 45 +++++ tests/bugs/replicate/bug-1101647.t | 29 +++ tests/bugs/replicate/bug-1130892.t | 60 ++++++ tests/bugs/replicate/bug-1132102.t | 28 +++ .../bug-1134691-afr-lookup-metadata-heal.t | 50 +++++ tests/bugs/replicate/bug-1139230.t | 58 ++++++ tests/bugs/replicate/bug-765564.t | 86 +++++++++ tests/bugs/replicate/bug-767585-gfid.t | 42 +++++ tests/bugs/replicate/bug-802417.t | 108 +++++++++++ tests/bugs/replicate/bug-821056.t | 52 ++++++ tests/bugs/replicate/bug-830665.t | 120 ++++++++++++ tests/bugs/replicate/bug-853680.t | 53 ++++++ tests/bugs/replicate/bug-859581.t | 53 ++++++ tests/bugs/replicate/bug-865825.t | 82 +++++++++ tests/bugs/replicate/bug-880898.t | 23 +++ tests/bugs/replicate/bug-884328.t | 12 ++ tests/bugs/replicate/bug-886998.t | 52 ++++++ tests/bugs/replicate/bug-888174.t | 62 +++++++ tests/bugs/replicate/bug-913051.t | 67 +++++++ tests/bugs/replicate/bug-916226.t | 26 +++ tests/bugs/replicate/bug-918437-sh-mtime.t | 71 ++++++++ tests/bugs/replicate/bug-921231.t | 31 ++++ tests/bugs/replicate/bug-957877.t | 33 ++++ tests/bugs/replicate/bug-966018.t | 35 ++++ tests/bugs/replicate/bug-976800.t | 28 +++ tests/bugs/replicate/bug-977797.t | 95 ++++++++++ tests/bugs/replicate/bug-978794.t | 29 +++ tests/bugs/replicate/bug-979365.t | 47 +++++ tests/bugs/replicate/bug-986905.t | 27 +++ tests/bugs/rpc/bug-1043886.t | 55 ++++++ tests/bugs/rpc/bug-847624.t | 25 +++ tests/bugs/rpc/bug-884452.t | 47 +++++ tests/bugs/rpc/bug-921072.t | 124 +++++++++++++ tests/bugs/rpc/bug-954057.t | 44 +++++ tests/bugs/snapshot/bug-1045333.t | 44 +++++ tests/bugs/snapshot/bug-1049834.t | 44 +++++ tests/bugs/snapshot/bug-1064768.t | 20 ++ ...ug-1155042-dont-display-deactivated-snapshots.t | 36 ++++ tests/bugs/snapshot/bug-1157991.t | 30 +++ tests/bugs/snapshot/bug-1162462.t | 38 ++++ tests/bugs/snapshot/bug-1162498.t | 56 ++++++ tests/bugs/snapshot/bug-1164613.t | 34 ++++ tests/bugs/snapshot/bug-1166197.t | 48 +++++ ...7580-set-proper-uid-and-gid-during-nfs-access.t | 201 +++++++++++++++++++++ tests/bugs/snapshot/bug-1178079.t | 24 +++ tests/bugs/stripe/bug-1002207.t | 53 ++++++ tests/bugs/stripe/bug-1111454.t | 18 ++ tests/bugs/trace/bug-797171.t | 41 +++++ tests/bugs/transport/bug-873367.t | 45 +++++ tests/bugs/unclassified/bug-1034085.t | 31 ++++ tests/bugs/unclassified/bug-874498.t | 64 +++++++ tests/bugs/unclassified/bug-991622.t | 35 ++++ tests/bugs/write-behind/bug-1058663.c | 119 ++++++++++++ tests/bugs/write-behind/bug-1058663.t | 28 +++ 534 files changed, 13852 insertions(+), 13848 deletions(-) delete mode 100755 tests/bugs/859927/repl.t delete mode 100644 tests/bugs/886998/strict-readdir.t delete mode 100644 tests/bugs/949327.t create mode 100644 tests/bugs/access-control/bug-1051896.c create mode 100644 tests/bugs/access-control/bug-1051896.t create mode 100644 tests/bugs/access-control/bug-887098-gmount-crash.t create mode 100644 tests/bugs/access-control/bug-958691.t delete mode 100644 tests/bugs/afr-quota-xattr-mdata-heal.t delete mode 100755 tests/bugs/brick-uid-reset-on-volume-restart.t delete mode 100755 tests/bugs/bug-000000.t delete mode 100644 tests/bugs/bug-1002207.t delete mode 100755 tests/bugs/bug-1002556.t delete mode 100644 tests/bugs/bug-1004218.t delete mode 100644 tests/bugs/bug-1004744.t delete mode 100755 tests/bugs/bug-1015990-rep.t delete mode 100755 tests/bugs/bug-1015990.t delete mode 100755 tests/bugs/bug-1022055.t delete mode 100644 tests/bugs/bug-1022905.t delete mode 100644 tests/bugs/bug-1023974.t delete mode 100644 tests/bugs/bug-1027171.t delete mode 100644 tests/bugs/bug-1030208.t delete mode 100644 tests/bugs/bug-1030580.t delete mode 100644 tests/bugs/bug-1032894.t delete mode 100644 tests/bugs/bug-1032927.t delete mode 100644 tests/bugs/bug-1034085.t delete mode 100644 tests/bugs/bug-1034716.t delete mode 100644 tests/bugs/bug-1035576.t delete mode 100755 tests/bugs/bug-1037501.t delete mode 100644 tests/bugs/bug-1038598.t delete mode 100644 tests/bugs/bug-1040408.t delete mode 100755 tests/bugs/bug-1040423.t delete mode 100644 tests/bugs/bug-1042725.t delete mode 100755 tests/bugs/bug-1043886.t delete mode 100755 tests/bugs/bug-1045333.t delete mode 100644 tests/bugs/bug-1046308.t delete mode 100755 tests/bugs/bug-1046624.t delete mode 100644 tests/bugs/bug-1047378.t delete mode 100644 tests/bugs/bug-1047416.t delete mode 100644 tests/bugs/bug-1047955.t delete mode 100755 tests/bugs/bug-1049323.t delete mode 100755 tests/bugs/bug-1049834.t delete mode 100644 tests/bugs/bug-1051896.c delete mode 100644 tests/bugs/bug-1051896.t delete mode 100755 tests/bugs/bug-1053579.t delete mode 100644 tests/bugs/bug-1058663.c delete mode 100644 tests/bugs/bug-1058663.t delete mode 100644 tests/bugs/bug-1058797.t delete mode 100755 tests/bugs/bug-1063230.t delete mode 100644 tests/bugs/bug-1064768.t delete mode 100755 tests/bugs/bug-1066798.t delete mode 100755 tests/bugs/bug-1070734.t delete mode 100644 tests/bugs/bug-1075087.t delete mode 100644 tests/bugs/bug-1077682.t delete mode 100755 tests/bugs/bug-1085330.t delete mode 100755 tests/bugs/bug-1086228.t delete mode 100644 tests/bugs/bug-1087198.t delete mode 100644 tests/bugs/bug-1087203.t delete mode 100755 tests/bugs/bug-1087487.t delete mode 100755 tests/bugs/bug-1088231.t delete mode 100755 tests/bugs/bug-1089668.t delete mode 100755 tests/bugs/bug-1090042.t delete mode 100755 tests/bugs/bug-1091935-brick-order-check-from-cli-to-glusterd.t delete mode 100644 tests/bugs/bug-1092841.t delete mode 100755 tests/bugs/bug-1095097.t delete mode 100644 tests/bugs/bug-1099890.t delete mode 100644 tests/bugs/bug-1100050.t delete mode 100644 tests/bugs/bug-1101647.t delete mode 100644 tests/bugs/bug-1102656.t delete mode 100644 tests/bugs/bug-1104642.t delete mode 100755 tests/bugs/bug-1104692.t delete mode 100644 tests/bugs/bug-1109741-auth-mgmt-handshake.t delete mode 100644 tests/bugs/bug-1109770.t delete mode 100644 tests/bugs/bug-1109889.t delete mode 100644 tests/bugs/bug-1110917.t delete mode 100644 tests/bugs/bug-1111041.t delete mode 100644 tests/bugs/bug-1111454.t delete mode 100644 tests/bugs/bug-1111490.t delete mode 100644 tests/bugs/bug-1111557.t delete mode 100755 tests/bugs/bug-1112559.t delete mode 100644 tests/bugs/bug-1112613.t delete mode 100644 tests/bugs/bug-1113476.t delete mode 100644 tests/bugs/bug-1113975.t delete mode 100644 tests/bugs/bug-1116503.t delete mode 100755 tests/bugs/bug-1117851.t delete mode 100644 tests/bugs/bug-1117951.t delete mode 100644 tests/bugs/bug-1119582.t delete mode 100644 tests/bugs/bug-1120647.t delete mode 100644 tests/bugs/bug-1122443.t delete mode 100755 tests/bugs/bug-1125824.t delete mode 100644 tests/bugs/bug-1126048.c delete mode 100755 tests/bugs/bug-1126048.t delete mode 100644 tests/bugs/bug-1130892.t delete mode 100644 tests/bugs/bug-1132102.t delete mode 100644 tests/bugs/bug-1134691-afr-lookup-metadata-heal.t delete mode 100644 tests/bugs/bug-1135514-allow-setxattr-with-null-value.t delete mode 100644 tests/bugs/bug-1139230.t delete mode 100644 tests/bugs/bug-1140162-file-snapshot-and-features-encryption-option-validation.t delete mode 100644 tests/bugs/bug-1155042-dont-display-deactivated-snapshots.t delete mode 100644 tests/bugs/bug-1157223-symlink-mounting.t delete mode 100755 tests/bugs/bug-1157991.t delete mode 100644 tests/bugs/bug-1161092-nfs-acls.t delete mode 100755 tests/bugs/bug-1161156.t delete mode 100755 tests/bugs/bug-1162462.t delete mode 100644 tests/bugs/bug-1162498.t delete mode 100644 tests/bugs/bug-1164613.t delete mode 100755 tests/bugs/bug-1166197.t delete mode 100644 tests/bugs/bug-1167580-set-proper-uid-and-gid-during-nfs-access.t delete mode 100755 tests/bugs/bug-1168803-snapd-option-validation-fix.t delete mode 100644 tests/bugs/bug-1168875.t delete mode 100755 tests/bugs/bug-1173414-mgmt-v3-remote-lock-failure.t delete mode 100644 tests/bugs/bug-1178079.t delete mode 100755 tests/bugs/bug-762989.t delete mode 100644 tests/bugs/bug-764638.t delete mode 100755 tests/bugs/bug-765230.t delete mode 100644 tests/bugs/bug-765380.t delete mode 100755 tests/bugs/bug-765473.t delete mode 100644 tests/bugs/bug-765564.t delete mode 100755 tests/bugs/bug-767095.t delete mode 100755 tests/bugs/bug-767585-gfid.t delete mode 100755 tests/bugs/bug-770655.t delete mode 100755 tests/bugs/bug-782095.t delete mode 100755 tests/bugs/bug-797171.t delete mode 100755 tests/bugs/bug-802417.t delete mode 100755 tests/bugs/bug-808400-dist.t delete mode 100644 tests/bugs/bug-808400-fcntl.c delete mode 100644 tests/bugs/bug-808400-flock.c delete mode 100755 tests/bugs/bug-808400-repl.t delete mode 100755 tests/bugs/bug-808400-stripe.t delete mode 100755 tests/bugs/bug-808400.t delete mode 100755 tests/bugs/bug-811493.t delete mode 100644 tests/bugs/bug-821056.t delete mode 100755 tests/bugs/bug-822830.t delete mode 100755 tests/bugs/bug-823081.t delete mode 100644 tests/bugs/bug-824753-file-locker.c delete mode 100755 tests/bugs/bug-824753.t delete mode 100755 tests/bugs/bug-830665.t delete mode 100644 tests/bugs/bug-834465.c delete mode 100755 tests/bugs/bug-834465.t delete mode 100644 tests/bugs/bug-839595.t delete mode 100755 tests/bugs/bug-844688.t delete mode 100644 tests/bugs/bug-845213.t delete mode 100644 tests/bugs/bug-846240.t delete mode 100755 tests/bugs/bug-847622.t delete mode 100755 tests/bugs/bug-847624.t delete mode 100644 tests/bugs/bug-848251.t delete mode 100755 tests/bugs/bug-852147.t delete mode 100755 tests/bugs/bug-853258.t delete mode 100755 tests/bugs/bug-853680.t delete mode 100755 tests/bugs/bug-853690.t delete mode 100644 tests/bugs/bug-856455.t delete mode 100644 tests/bugs/bug-857330/common.rc delete mode 100755 tests/bugs/bug-857330/normal.t delete mode 100755 tests/bugs/bug-857330/xml.t delete mode 100755 tests/bugs/bug-858215.t delete mode 100644 tests/bugs/bug-858242.c delete mode 100755 tests/bugs/bug-858242.t delete mode 100644 tests/bugs/bug-858488-min-free-disk.t delete mode 100755 tests/bugs/bug-859581.t delete mode 100755 tests/bugs/bug-859927.t delete mode 100644 tests/bugs/bug-860297.t delete mode 100644 tests/bugs/bug-860663.c delete mode 100644 tests/bugs/bug-860663.t delete mode 100644 tests/bugs/bug-861015-index.t delete mode 100644 tests/bugs/bug-861015-log.t delete mode 100755 tests/bugs/bug-861542.t delete mode 100755 tests/bugs/bug-862834.t delete mode 100644 tests/bugs/bug-862967.t delete mode 100755 tests/bugs/bug-864222.t delete mode 100755 tests/bugs/bug-865825.t delete mode 100644 tests/bugs/bug-866459.t delete mode 100644 tests/bugs/bug-867252.t delete mode 100644 tests/bugs/bug-867253.t delete mode 100644 tests/bugs/bug-869724.t delete mode 100755 tests/bugs/bug-872923.t delete mode 100755 tests/bugs/bug-873367.t delete mode 100644 tests/bugs/bug-873549.t delete mode 100644 tests/bugs/bug-873962-spb.t delete mode 100755 tests/bugs/bug-873962.t delete mode 100644 tests/bugs/bug-874498.t delete mode 100755 tests/bugs/bug-877293.t delete mode 100755 tests/bugs/bug-877885.t delete mode 100755 tests/bugs/bug-877992.t delete mode 100644 tests/bugs/bug-878004.t delete mode 100755 tests/bugs/bug-879490.t delete mode 100755 tests/bugs/bug-879494.t delete mode 100644 tests/bugs/bug-880898.t delete mode 100755 tests/bugs/bug-882278.t delete mode 100644 tests/bugs/bug-884328.t delete mode 100644 tests/bugs/bug-884452.t delete mode 100755 tests/bugs/bug-884455.t delete mode 100755 tests/bugs/bug-884597.t delete mode 100644 tests/bugs/bug-886998.t delete mode 100644 tests/bugs/bug-887098-gmount-crash.t delete mode 100755 tests/bugs/bug-887145.t delete mode 100644 tests/bugs/bug-888174.t delete mode 100644 tests/bugs/bug-888752.t delete mode 100755 tests/bugs/bug-889630.t delete mode 100644 tests/bugs/bug-889996.t delete mode 100755 tests/bugs/bug-892730.t delete mode 100644 tests/bugs/bug-893338.t delete mode 100755 tests/bugs/bug-893378.t delete mode 100644 tests/bugs/bug-895235.t delete mode 100755 tests/bugs/bug-896431.t delete mode 100755 tests/bugs/bug-902610.t delete mode 100644 tests/bugs/bug-903336.t delete mode 100755 tests/bugs/bug-904065.t delete mode 100755 tests/bugs/bug-904300.t delete mode 100644 tests/bugs/bug-905307.t delete mode 100644 tests/bugs/bug-905864.c delete mode 100644 tests/bugs/bug-905864.t delete mode 100644 tests/bugs/bug-906646.t delete mode 100755 tests/bugs/bug-907072.t delete mode 100755 tests/bugs/bug-908146.t delete mode 100755 tests/bugs/bug-912297.t delete mode 100755 tests/bugs/bug-912564.t delete mode 100644 tests/bugs/bug-913051.t delete mode 100644 tests/bugs/bug-913487.t delete mode 100644 tests/bugs/bug-913544.t delete mode 100755 tests/bugs/bug-913555.t delete mode 100755 tests/bugs/bug-915280.t delete mode 100755 tests/bugs/bug-915554.t delete mode 100644 tests/bugs/bug-916226.t delete mode 100755 tests/bugs/bug-916549.t delete mode 100644 tests/bugs/bug-918437-sh-mtime.t delete mode 100755 tests/bugs/bug-921072.t delete mode 100755 tests/bugs/bug-921215.t delete mode 100644 tests/bugs/bug-921231.t delete mode 100755 tests/bugs/bug-921408.t delete mode 100755 tests/bugs/bug-924075.t delete mode 100755 tests/bugs/bug-924265.t delete mode 100755 tests/bugs/bug-924726.t delete mode 100755 tests/bugs/bug-927616.t delete mode 100755 tests/bugs/bug-948686.t delete mode 100644 tests/bugs/bug-948729/bug-948729-force.t delete mode 100644 tests/bugs/bug-948729/bug-948729-mode-script.t delete mode 100644 tests/bugs/bug-948729/bug-948729.t delete mode 100644 tests/bugs/bug-949242.t delete mode 100644 tests/bugs/bug-949298.t delete mode 100644 tests/bugs/bug-949930.t delete mode 100755 tests/bugs/bug-954057.t delete mode 100755 tests/bugs/bug-955588.t delete mode 100644 tests/bugs/bug-957877.t delete mode 100644 tests/bugs/bug-958691.t delete mode 100644 tests/bugs/bug-958790.t delete mode 100644 tests/bugs/bug-961307.t delete mode 100644 tests/bugs/bug-961615.t delete mode 100644 tests/bugs/bug-961669.t delete mode 100755 tests/bugs/bug-963541.t delete mode 100644 tests/bugs/bug-963678.t delete mode 100755 tests/bugs/bug-964059.t delete mode 100644 tests/bugs/bug-966018.t delete mode 100755 tests/bugs/bug-969193.t delete mode 100755 tests/bugs/bug-970070.t delete mode 100755 tests/bugs/bug-973073.t delete mode 100644 tests/bugs/bug-974007.t delete mode 100755 tests/bugs/bug-974972.t delete mode 100644 tests/bugs/bug-976800.t delete mode 100644 tests/bugs/bug-977246.t delete mode 100755 tests/bugs/bug-977797.t delete mode 100644 tests/bugs/bug-978794.t delete mode 100755 tests/bugs/bug-979365.t delete mode 100644 tests/bugs/bug-982174.t delete mode 100644 tests/bugs/bug-983317.t delete mode 100755 tests/bugs/bug-983477.t delete mode 100644 tests/bugs/bug-985074.t delete mode 100644 tests/bugs/bug-986429.t delete mode 100755 tests/bugs/bug-986905.t delete mode 100755 tests/bugs/bug-990028.t delete mode 100644 tests/bugs/bug-991622.t create mode 100644 tests/bugs/cli/bug-1004218.t create mode 100644 tests/bugs/cli/bug-1022905.t create mode 100644 tests/bugs/cli/bug-1030580.t create mode 100644 tests/bugs/cli/bug-1047378.t create mode 100644 tests/bugs/cli/bug-1047416.t create mode 100644 tests/bugs/cli/bug-1077682.t create mode 100755 tests/bugs/cli/bug-1087487.t create mode 100644 tests/bugs/cli/bug-1113476.t create mode 100644 tests/bugs/cli/bug-764638.t create mode 100755 tests/bugs/cli/bug-770655.t create mode 100755 tests/bugs/cli/bug-822830.t create mode 100644 tests/bugs/cli/bug-867252.t create mode 100755 tests/bugs/cli/bug-921215.t create mode 100644 tests/bugs/cli/bug-949298.t create mode 100644 tests/bugs/cli/bug-961307.t create mode 100755 tests/bugs/cli/bug-969193.t create mode 100644 tests/bugs/cli/bug-977246.t create mode 100644 tests/bugs/cli/bug-982174.t create mode 100644 tests/bugs/cli/bug-983317.t create mode 100644 tests/bugs/core/949327.t create mode 100644 tests/bugs/core/bug-1110917.t create mode 100644 tests/bugs/core/bug-1111557.t create mode 100644 tests/bugs/core/bug-1117951.t create mode 100644 tests/bugs/core/bug-1119582.t create mode 100644 tests/bugs/core/bug-1135514-allow-setxattr-with-null-value.t create mode 100755 tests/bugs/core/bug-1168803-snapd-option-validation-fix.t create mode 100644 tests/bugs/core/bug-1168875.t create mode 100644 tests/bugs/core/bug-834465.c create mode 100755 tests/bugs/core/bug-834465.t create mode 100644 tests/bugs/core/bug-845213.t create mode 100644 tests/bugs/core/bug-903336.t create mode 100755 tests/bugs/core/bug-908146.t create mode 100644 tests/bugs/core/bug-913544.t create mode 100755 tests/bugs/core/bug-924075.t create mode 100755 tests/bugs/core/bug-927616.t create mode 100644 tests/bugs/core/bug-949242.t create mode 100644 tests/bugs/core/bug-986429.t create mode 100644 tests/bugs/distribute/bug-1042725.t create mode 100755 tests/bugs/distribute/bug-1063230.t create mode 100755 tests/bugs/distribute/bug-1066798.t create mode 100755 tests/bugs/distribute/bug-1086228.t create mode 100755 tests/bugs/distribute/bug-1088231.t create mode 100644 tests/bugs/distribute/bug-1099890.t create mode 100755 tests/bugs/distribute/bug-1117851.t create mode 100644 tests/bugs/distribute/bug-1122443.t create mode 100755 tests/bugs/distribute/bug-1125824.t create mode 100755 tests/bugs/distribute/bug-1161156.t create mode 100755 tests/bugs/distribute/bug-853258.t create mode 100644 tests/bugs/distribute/bug-860663.c create mode 100644 tests/bugs/distribute/bug-860663.t create mode 100644 tests/bugs/distribute/bug-862967.t create mode 100755 tests/bugs/distribute/bug-882278.t create mode 100755 tests/bugs/distribute/bug-884455.t create mode 100755 tests/bugs/distribute/bug-884597.t create mode 100755 tests/bugs/distribute/bug-907072.t create mode 100755 tests/bugs/distribute/bug-912564.t create mode 100755 tests/bugs/distribute/bug-915554.t create mode 100755 tests/bugs/distribute/bug-921408.t create mode 100755 tests/bugs/distribute/bug-924265.t create mode 100644 tests/bugs/distribute/bug-961615.t create mode 100755 tests/bugs/distribute/bug-973073.t create mode 100755 tests/bugs/distribute/overlap.py create mode 100755 tests/bugs/error-gen/bug-767095.t create mode 100644 tests/bugs/fuse/bug-1030208.t create mode 100644 tests/bugs/fuse/bug-1126048.c create mode 100755 tests/bugs/fuse/bug-1126048.t create mode 100755 tests/bugs/fuse/bug-858215.t create mode 100644 tests/bugs/fuse/bug-858488-min-free-disk.t create mode 100755 tests/bugs/fuse/bug-924726.t create mode 100644 tests/bugs/fuse/bug-963678.t create mode 100755 tests/bugs/fuse/bug-983477.t create mode 100644 tests/bugs/fuse/bug-985074.t create mode 100644 tests/bugs/geo-replication/bug-1111490.t create mode 100755 tests/bugs/geo-replication/bug-877293.t delete mode 100644 tests/bugs/getlk_owner.c create mode 100755 tests/bugs/glusterd/859927/repl.t create mode 100755 tests/bugs/glusterd/bug-000000.t create mode 100755 tests/bugs/glusterd/bug-1002556.t create mode 100644 tests/bugs/glusterd/bug-1004744.t create mode 100755 tests/bugs/glusterd/bug-1022055.t create mode 100644 tests/bugs/glusterd/bug-1027171.t create mode 100644 tests/bugs/glusterd/bug-1040408.t create mode 100644 tests/bugs/glusterd/bug-1046308.t create mode 100644 tests/bugs/glusterd/bug-1047955.t create mode 100755 tests/bugs/glusterd/bug-1070734.t create mode 100644 tests/bugs/glusterd/bug-1075087.t create mode 100755 tests/bugs/glusterd/bug-1085330.t create mode 100644 tests/bugs/glusterd/bug-1087203.t create mode 100755 tests/bugs/glusterd/bug-1089668.t create mode 100755 tests/bugs/glusterd/bug-1090042.t create mode 100755 tests/bugs/glusterd/bug-1091935-brick-order-check-from-cli-to-glusterd.t create mode 100644 tests/bugs/glusterd/bug-1092841.t create mode 100755 tests/bugs/glusterd/bug-1095097.t create mode 100644 tests/bugs/glusterd/bug-1102656.t create mode 100644 tests/bugs/glusterd/bug-1104642.t create mode 100644 tests/bugs/glusterd/bug-1109741-auth-mgmt-handshake.t create mode 100644 tests/bugs/glusterd/bug-1109770.t create mode 100644 tests/bugs/glusterd/bug-1109889.t create mode 100644 tests/bugs/glusterd/bug-1111041.t create mode 100755 tests/bugs/glusterd/bug-1112559.t create mode 100644 tests/bugs/glusterd/bug-1112613.t create mode 100644 tests/bugs/glusterd/bug-1113975.t create mode 100644 tests/bugs/glusterd/bug-1120647.t create mode 100644 tests/bugs/glusterd/bug-1140162-file-snapshot-and-features-encryption-option-validation.t create mode 100755 tests/bugs/glusterd/bug-1173414-mgmt-v3-remote-lock-failure.t create mode 100755 tests/bugs/glusterd/bug-765230.t create mode 100755 tests/bugs/glusterd/bug-782095.t create mode 100644 tests/bugs/glusterd/bug-824753-file-locker.c create mode 100755 tests/bugs/glusterd/bug-824753.t create mode 100644 tests/bugs/glusterd/bug-839595.t create mode 100644 tests/bugs/glusterd/bug-857330/common.rc create mode 100755 tests/bugs/glusterd/bug-857330/normal.t create mode 100755 tests/bugs/glusterd/bug-857330/xml.t create mode 100755 tests/bugs/glusterd/bug-859927.t create mode 100755 tests/bugs/glusterd/bug-862834.t create mode 100644 tests/bugs/glusterd/bug-878004.t create mode 100644 tests/bugs/glusterd/bug-888752.t create mode 100755 tests/bugs/glusterd/bug-889630.t create mode 100644 tests/bugs/glusterd/bug-905307.t create mode 100644 tests/bugs/glusterd/bug-913487.t create mode 100755 tests/bugs/glusterd/bug-913555.t create mode 100755 tests/bugs/glusterd/bug-916549.t create mode 100755 tests/bugs/glusterd/bug-948686.t create mode 100644 tests/bugs/glusterd/bug-948729/bug-948729-force.t create mode 100644 tests/bugs/glusterd/bug-948729/bug-948729-mode-script.t create mode 100644 tests/bugs/glusterd/bug-948729/bug-948729.t create mode 100644 tests/bugs/glusterd/bug-949930.t create mode 100755 tests/bugs/glusterd/bug-955588.t create mode 100644 tests/bugs/glusterd/bug-958790.t create mode 100644 tests/bugs/glusterd/bug-961669.t create mode 100755 tests/bugs/glusterd/bug-963541.t create mode 100755 tests/bugs/glusterd/bug-964059.t create mode 100644 tests/bugs/glusterd/bug-974007.t create mode 100755 tests/bugs/glusterfs-server/bug-852147.t create mode 100755 tests/bugs/glusterfs-server/bug-861542.t create mode 100755 tests/bugs/glusterfs-server/bug-864222.t create mode 100644 tests/bugs/glusterfs-server/bug-873549.t create mode 100755 tests/bugs/glusterfs-server/bug-877992.t create mode 100755 tests/bugs/glusterfs-server/bug-887145.t create mode 100644 tests/bugs/glusterfs-server/bug-889996.t create mode 100755 tests/bugs/glusterfs-server/bug-904300.t create mode 100644 tests/bugs/glusterfs-server/bug-905864.c create mode 100644 tests/bugs/glusterfs-server/bug-905864.t create mode 100755 tests/bugs/glusterfs-server/bug-912297.t create mode 100755 tests/bugs/glusterfs/bug-811493.t create mode 100755 tests/bugs/glusterfs/bug-844688.t create mode 100644 tests/bugs/glusterfs/bug-848251.t create mode 100755 tests/bugs/glusterfs/bug-853690.t create mode 100644 tests/bugs/glusterfs/bug-856455.t create mode 100644 tests/bugs/glusterfs/bug-860297.t create mode 100644 tests/bugs/glusterfs/bug-861015-index.t create mode 100644 tests/bugs/glusterfs/bug-861015-log.t create mode 100644 tests/bugs/glusterfs/bug-866459.t create mode 100644 tests/bugs/glusterfs/bug-867253.t create mode 100644 tests/bugs/glusterfs/bug-869724.t create mode 100755 tests/bugs/glusterfs/bug-872923.t create mode 100644 tests/bugs/glusterfs/bug-873962-spb.t create mode 100755 tests/bugs/glusterfs/bug-873962.t create mode 100755 tests/bugs/glusterfs/bug-879490.t create mode 100755 tests/bugs/glusterfs/bug-879494.t create mode 100755 tests/bugs/glusterfs/bug-892730.t create mode 100644 tests/bugs/glusterfs/bug-893338.t create mode 100755 tests/bugs/glusterfs/bug-893378.t create mode 100644 tests/bugs/glusterfs/bug-895235.t create mode 100755 tests/bugs/glusterfs/bug-896431.t create mode 100755 tests/bugs/glusterfs/bug-902610.t create mode 100644 tests/bugs/glusterfs/bug-906646.t create mode 100644 tests/bugs/glusterfs/getlk_owner.c create mode 100644 tests/bugs/io-cache/bug-858242.c create mode 100755 tests/bugs/io-cache/bug-858242.t create mode 100644 tests/bugs/libgfapi/bug-1032894.t create mode 100755 tests/bugs/logging/bug-823081.t create mode 100755 tests/bugs/nfs/bug-1053579.t create mode 100644 tests/bugs/nfs/bug-1116503.t create mode 100644 tests/bugs/nfs/bug-1157223-symlink-mounting.t create mode 100644 tests/bugs/nfs/bug-1161092-nfs-acls.t create mode 100755 tests/bugs/nfs/bug-847622.t create mode 100755 tests/bugs/nfs/bug-877885.t create mode 100755 tests/bugs/nfs/bug-904065.t create mode 100755 tests/bugs/nfs/bug-915280.t create mode 100755 tests/bugs/nfs/bug-970070.t create mode 100755 tests/bugs/nfs/bug-974972.t delete mode 100755 tests/bugs/overlap.py create mode 100644 tests/bugs/posix/bug-1034716.t create mode 100755 tests/bugs/posix/bug-1040275-brick-uid-reset-on-volume-restart.t create mode 100644 tests/bugs/posix/bug-765380.t create mode 100755 tests/bugs/posix/bug-990028.t create mode 100755 tests/bugs/protocol/bug-762989.t create mode 100755 tests/bugs/protocol/bug-808400-dist.t create mode 100644 tests/bugs/protocol/bug-808400-fcntl.c create mode 100644 tests/bugs/protocol/bug-808400-flock.c create mode 100755 tests/bugs/protocol/bug-808400-repl.t create mode 100755 tests/bugs/protocol/bug-808400-stripe.t create mode 100755 tests/bugs/protocol/bug-808400.t create mode 100644 tests/bugs/quick-read/bug-846240.t create mode 100644 tests/bugs/quota/afr-quota-xattr-mdata-heal.t create mode 100644 tests/bugs/quota/bug-1023974.t create mode 100644 tests/bugs/quota/bug-1035576.t create mode 100644 tests/bugs/quota/bug-1038598.t create mode 100755 tests/bugs/quota/bug-1040423.t create mode 100755 tests/bugs/quota/bug-1049323.t create mode 100644 tests/bugs/quota/bug-1087198.t create mode 100644 tests/bugs/quota/bug-1100050.t create mode 100755 tests/bugs/quota/bug-1104692.t create mode 100755 tests/bugs/rdma/bug-765473.t create mode 100644 tests/bugs/replicate/886998/strict-readdir.t create mode 100755 tests/bugs/replicate/bug-1015990-rep.t create mode 100755 tests/bugs/replicate/bug-1015990.t create mode 100644 tests/bugs/replicate/bug-1032927.t create mode 100755 tests/bugs/replicate/bug-1037501.t create mode 100755 tests/bugs/replicate/bug-1046624.t create mode 100644 tests/bugs/replicate/bug-1058797.t create mode 100644 tests/bugs/replicate/bug-1101647.t create mode 100644 tests/bugs/replicate/bug-1130892.t create mode 100644 tests/bugs/replicate/bug-1132102.t create mode 100644 tests/bugs/replicate/bug-1134691-afr-lookup-metadata-heal.t create mode 100644 tests/bugs/replicate/bug-1139230.t create mode 100644 tests/bugs/replicate/bug-765564.t create mode 100755 tests/bugs/replicate/bug-767585-gfid.t create mode 100755 tests/bugs/replicate/bug-802417.t create mode 100644 tests/bugs/replicate/bug-821056.t create mode 100755 tests/bugs/replicate/bug-830665.t create mode 100755 tests/bugs/replicate/bug-853680.t create mode 100755 tests/bugs/replicate/bug-859581.t create mode 100755 tests/bugs/replicate/bug-865825.t create mode 100644 tests/bugs/replicate/bug-880898.t create mode 100644 tests/bugs/replicate/bug-884328.t create mode 100644 tests/bugs/replicate/bug-886998.t create mode 100644 tests/bugs/replicate/bug-888174.t create mode 100644 tests/bugs/replicate/bug-913051.t create mode 100644 tests/bugs/replicate/bug-916226.t create mode 100644 tests/bugs/replicate/bug-918437-sh-mtime.t create mode 100644 tests/bugs/replicate/bug-921231.t create mode 100644 tests/bugs/replicate/bug-957877.t create mode 100644 tests/bugs/replicate/bug-966018.t create mode 100644 tests/bugs/replicate/bug-976800.t create mode 100755 tests/bugs/replicate/bug-977797.t create mode 100644 tests/bugs/replicate/bug-978794.t create mode 100755 tests/bugs/replicate/bug-979365.t create mode 100755 tests/bugs/replicate/bug-986905.t create mode 100755 tests/bugs/rpc/bug-1043886.t create mode 100755 tests/bugs/rpc/bug-847624.t create mode 100644 tests/bugs/rpc/bug-884452.t create mode 100755 tests/bugs/rpc/bug-921072.t create mode 100755 tests/bugs/rpc/bug-954057.t create mode 100755 tests/bugs/snapshot/bug-1045333.t create mode 100755 tests/bugs/snapshot/bug-1049834.t create mode 100644 tests/bugs/snapshot/bug-1064768.t create mode 100644 tests/bugs/snapshot/bug-1155042-dont-display-deactivated-snapshots.t create mode 100755 tests/bugs/snapshot/bug-1157991.t create mode 100755 tests/bugs/snapshot/bug-1162462.t create mode 100644 tests/bugs/snapshot/bug-1162498.t create mode 100644 tests/bugs/snapshot/bug-1164613.t create mode 100755 tests/bugs/snapshot/bug-1166197.t create mode 100644 tests/bugs/snapshot/bug-1167580-set-proper-uid-and-gid-during-nfs-access.t create mode 100644 tests/bugs/snapshot/bug-1178079.t create mode 100644 tests/bugs/stripe/bug-1002207.t create mode 100644 tests/bugs/stripe/bug-1111454.t create mode 100755 tests/bugs/trace/bug-797171.t create mode 100755 tests/bugs/transport/bug-873367.t create mode 100644 tests/bugs/unclassified/bug-1034085.t create mode 100644 tests/bugs/unclassified/bug-874498.t create mode 100644 tests/bugs/unclassified/bug-991622.t create mode 100644 tests/bugs/write-behind/bug-1058663.c create mode 100644 tests/bugs/write-behind/bug-1058663.t (limited to 'tests/bugs') diff --git a/tests/bugs/859927/repl.t b/tests/bugs/859927/repl.t deleted file mode 100755 index 0094f43dfca..00000000000 --- a/tests/bugs/859927/repl.t +++ /dev/null @@ -1,69 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../../include.rc -. $(dirname $0)/../../volume.rc -cleanup; - -TEST glusterd; -TEST pidof glusterd - -#Tests for data-self-heal-algorithm option -function create_setup_for_self_heal { - file=$1 - kill_brick $V0 $H0 $B0/${V0}1 - dd of=$file if=/dev/urandom bs=1024k count=1 2>&1 > /dev/null - $CLI volume start $V0 force -} - -function test_write { - dd of=$M0/a if=/dev/urandom bs=1k count=1 2>&1 > /dev/null -} - -TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2}; -TEST $CLI volume set $V0 cluster.self-heal-daemon off -TEST $CLI volume set $V0 performance.stat-prefetch off -TEST $CLI volume set $V0 client-log-level DEBUG -TEST $CLI volume set $V0 cluster.background-self-heal-count 0 -TEST $CLI volume start $V0 -TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id=$V0 $M0; - -touch $M0/a - -TEST $CLI volume set $V0 cluster.data-self-heal-algorithm full -EXPECT full volume_option $V0 cluster.data-self-heal-algorithm -create_setup_for_self_heal $M0/a -EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0 -cat $file 2>&1 > /dev/null -TEST cmp $B0/${V0}1/a $B0/${V0}2/a - -TEST $CLI volume set $V0 cluster.data-self-heal-algorithm diff -EXPECT diff volume_option $V0 cluster.data-self-heal-algorithm -create_setup_for_self_heal $M0/a -EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0 -cat $file 2>&1 > /dev/null -TEST cmp $B0/${V0}1/a $B0/${V0}2/a - -TEST $CLI volume reset $V0 cluster.data-self-heal-algorithm -create_setup_for_self_heal $M0/a -EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0 -cat $file 2>&1 > /dev/null -TEST cmp $B0/${V0}1/a $B0/${V0}2/a - -TEST ! $CLI volume set $V0 cluster.data-self-heal-algorithm "" - -#Tests for quorum-type option -TEST ! $CLI volume set $V0 cluster.quorum-type "" -TEST $CLI volume set $V0 cluster.quorum-type fixed -EXPECT fixed volume_option $V0 cluster.quorum-type -TEST $CLI volume set $V0 cluster.quorum-count 2 -kill_brick $V0 $H0 $B0/${V0}1 -TEST ! test_write -TEST $CLI volume set $V0 cluster.quorum-type auto -EXPECT auto volume_option $V0 cluster.quorum-type -TEST ! test_write -TEST $CLI volume set $V0 cluster.quorum-type none -EXPECT none volume_option $V0 cluster.quorum-type -TEST test_write -TEST $CLI volume reset $V0 cluster.quorum-type -TEST test_write -cleanup; diff --git a/tests/bugs/886998/strict-readdir.t b/tests/bugs/886998/strict-readdir.t deleted file mode 100644 index 57a8c1c32dc..00000000000 --- a/tests/bugs/886998/strict-readdir.t +++ /dev/null @@ -1,52 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../../include.rc -. $(dirname $0)/../../volume.rc - -function num_files_in_dir { - d=$1 - ls $d | sort | uniq | wc -l -} - -#Basic sanity tests for readdir functionality -cleanup; -TEST glusterd -TEST pidof glusterd - -TEST $CLI volume create $V0 replica 2 $H0:$B0/r2d2_0 $H0:$B0/r2d2_1 $H0:$B0/r2d2_2 $H0:$B0/r2d2_3 -TEST $CLI volume start $V0 -TEST glusterfs --volfile-server=$H0 --volfile-id=/$V0 $M0 - -TEST touch $M0/{1..100} -EXPECT "100" num_files_in_dir $M0 - -TEST kill_brick $V0 $H0 $B0/r2d2_0 -TEST kill_brick $V0 $H0 $B0/r2d2_2 -EXPECT "100" num_files_in_dir $M0 - -TEST $CLI volume start $V0 force -EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0 -EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 2 - -TEST kill_brick $V0 $H0 $B0/r2d2_1 -TEST kill_brick $V0 $H0 $B0/r2d2_3 -EXPECT "100" num_files_in_dir $M0 - -TEST $CLI volume start $V0 force -EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1 -EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 3 - -TEST $CLI volume set $V0 cluster.strict-readdir on -EXPECT "on" volinfo_field $V0 cluster.strict-readdir -TEST kill_brick $V0 $H0 $B0/r2d2_0 -TEST kill_brick $V0 $H0 $B0/r2d2_2 -EXPECT "100" num_files_in_dir $M0 - -TEST $CLI volume start $V0 force -EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0 -EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 2 - -TEST kill_brick $V0 $H0 $B0/r2d2_1 -TEST kill_brick $V0 $H0 $B0/r2d2_3 -EXPECT "100" num_files_in_dir $M0 -cleanup; diff --git a/tests/bugs/949327.t b/tests/bugs/949327.t deleted file mode 100644 index b8ac995f246..00000000000 --- a/tests/bugs/949327.t +++ /dev/null @@ -1,23 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc - -cleanup; - -function tmp_file_count() -{ - echo $(ls -lh /tmp/tmp.* 2>/dev/null | wc -l) -} - - -old_count=$(tmp_file_count); -TEST glusterd -TEST pidof glusterd -TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}0 $H0:$B0/${V0}1 -TEST $CLI volume start $V0 -new_count=$(tmp_file_count); - -TEST [ "$old_count" -eq "$new_count" ] - -cleanup diff --git a/tests/bugs/access-control/bug-1051896.c b/tests/bugs/access-control/bug-1051896.c new file mode 100644 index 00000000000..27aa1559453 --- /dev/null +++ b/tests/bugs/access-control/bug-1051896.c @@ -0,0 +1,97 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +int do_setfacl(const char *path, const char *options, const char *textacl) +{ + int r; + int type; + acl_t acl; + int dob; + int dok; + int dom; + struct stat st; + char textmode[30]; + + r = 0; + dob = strchr(options, 'b') != (char *)NULL; + dok = strchr(options, 'k') != (char *)NULL; + dom = strchr(options, 'm') != (char *)NULL; + if ((dom && !textacl) + || (!dom && (textacl || (!dok && !dob) || + strchr(options, 'd')))) { + errno = EBADRQC; /* "bad request" */ + r = -1; + } else { + if (dob || dok) { + r = acl_delete_def_file(path); + } + if (dob && !r) { + if (!stat(path, &st)) { + sprintf(textmode, + "u::%c%c%c,g::%c%c%c,o::%c%c%c", + (st.st_mode & 0400 ? 'r' : '-'), + (st.st_mode & 0200 ? 'w' : '-'), + (st.st_mode & 0100 ? 'x' : '-'), + (st.st_mode & 0040 ? 'r' : '-'), + (st.st_mode & 0020 ? 'w' : '-'), + (st.st_mode & 0010 ? 'x' : '-'), + (st.st_mode & 004 ? 'r' : '-'), + (st.st_mode & 002 ? 'w' : '-'), + (st.st_mode & 001 ? 'x' : '-')); + acl = acl_from_text(textmode); + if (acl) { + r = acl_set_file(path, + ACL_TYPE_ACCESS, acl); + acl_free(acl); + } else + r = -1; + } else + r = -1; + } + if (!r && dom) { + if (strchr(options, 'd')) + type = ACL_TYPE_DEFAULT; + else + type = ACL_TYPE_ACCESS; + acl = acl_from_text(textacl); + if (acl) { + r = acl_set_file(path, type, acl); + acl_free(acl); + } else + r = -1; + } + } + if (r) + r = -errno; + return r; +} + + +int main(int argc, char *argv[]) +{ + int rc = 0; + + if (argc != 4) { + fprintf(stderr, + "usage: ./setfacl_test \n"); + return 0; + } + rc = do_setfacl(argv[1], argv[2], argv[3]); + if (rc != 0) { + fprintf(stderr, "do_setfacl failed: %s\n", strerror(errno)); + return rc; + } + return 0; +} diff --git a/tests/bugs/access-control/bug-1051896.t b/tests/bugs/access-control/bug-1051896.t new file mode 100644 index 00000000000..870ede7db21 --- /dev/null +++ b/tests/bugs/access-control/bug-1051896.t @@ -0,0 +1,34 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +case $OSTYPE in +NetBSD) + echo "Skip test on ACL which are not available on NetBSD" >&2 + SKIP_TESTS + exit 0 + ;; +*) + ;; +esac + +cleanup; + +## Start and create a volume +TEST glusterd; +TEST pidof glusterd; +TEST $CLI volume info; + +TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2,3,4}; +TEST $CLI volume start $V0; + +TEST glusterfs --entry-timeout=0 --attribute-timeout=0 --acl -s $H0 --volfile-id $V0 $M0; + +TEST touch $M0/file1; + +TEST $CC $(dirname $0)/bug-1051896.c -o $(dirname $0)/bug-1051896 -lacl +TEST ! $(dirname $0)/bug-1051896 $M0/file1 m 'u::r,u::w,g::r--,o::r--' +TEST rm -f $(dirname $0)/bug-1051896 + +cleanup diff --git a/tests/bugs/access-control/bug-887098-gmount-crash.t b/tests/bugs/access-control/bug-887098-gmount-crash.t new file mode 100644 index 00000000000..ba9937bd5bf --- /dev/null +++ b/tests/bugs/access-control/bug-887098-gmount-crash.t @@ -0,0 +1,42 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +cleanup; + +## Start and create a volume +TEST glusterd; +TEST pidof glusterd; +TEST $CLI volume info; + +TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2,3,4}; + +## Verify volume is is created +EXPECT "$V0" volinfo_field $V0 'Volume Name'; +EXPECT 'Created' volinfo_field $V0 'Status'; + +## Start volume and verify +TEST $CLI volume start $V0; +EXPECT 'Started' volinfo_field $V0 'Status'; + +TEST glusterfs -s $H0 --volfile-id=$V0 --acl $M0 +MOUNT_PID=$(get_mount_process_pid $V0) + +for i in {1..25}; +do + mkdir $M0/tmp_$i && cat /etc/hosts > $M0/tmp_$i/file + cp -RPp $M0/tmp_$i $M0/newtmp_$i && cat /etc/hosts > $M0/newtmp_$i/newfile +done + +EXPECT "$MOUNT_PID" get_mount_process_pid $V0 +TEST rm -rf $M0/* +EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0 + +## Finish up +TEST $CLI volume stop $V0; +EXPECT 'Stopped' volinfo_field $V0 'Status'; + +TEST $CLI volume delete $V0; + +cleanup; diff --git a/tests/bugs/access-control/bug-958691.t b/tests/bugs/access-control/bug-958691.t new file mode 100644 index 00000000000..9db858da2d2 --- /dev/null +++ b/tests/bugs/access-control/bug-958691.t @@ -0,0 +1,49 @@ +#!/bin/bash +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc +. $(dirname $0)/../../nfs.rc + +cleanup; + +TEST glusterd +TEST pidof glusterd +TEST $CLI volume create $V0 $H0:$B0/${V0}{0,1} +TEST $CLI volume start $V0; + +TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 $M0; +EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available; +TEST mount_nfs $H0:/$V0 $N0 nolock; + +# Tests for the fuse mount +TEST mkdir $M0/dir; +TEST chmod 1777 $M0/dir; +TEST touch $M0/dir/file{1,2}; + +TEST $CLI volume set $V0 server.root-squash enable; + +mv $M0/dir/file1 $M0/dir/file11 2>/dev/null; +TEST [ $? -ne 0 ]; + +TEST $CLI volume set $V0 server.root-squash disable; +TEST rm -rf $M0/dir; + +sleep 1; + +# tests for nfs mount +TEST mkdir $N0/dir; +TEST chmod 1777 $N0/dir; +TEST touch $N0/dir/file{1,2}; + +TEST $CLI volume set $V0 server.root-squash enable; + +mv $N0/dir/file1 $N0/dir/file11 2>/dev/null; +TEST [ $? -ne 0 ]; + +TEST $CLI volume set $V0 server.root-squash disable; +TEST rm -rf $N0/dir; +EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $N0 + +TEST $CLI volume stop $V0; +TEST $CLI volume delete $V0; + +cleanup; diff --git a/tests/bugs/afr-quota-xattr-mdata-heal.t b/tests/bugs/afr-quota-xattr-mdata-heal.t deleted file mode 100644 index 5291c464581..00000000000 --- a/tests/bugs/afr-quota-xattr-mdata-heal.t +++ /dev/null @@ -1,138 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc - -cleanup; -TEST glusterd -TEST pidof glusterd -TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1} -TEST $CLI volume start $V0 -TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0 -TEST $CLI volume quota $V0 enable -TEST $CLI volume quota $V0 limit-usage / 1MB -TEST mkdir $M0/d -TEST $CLI volume quota $V0 limit-usage /d 1MB -TEST touch $M0/d/a -echo abc > $M0/d/a -#Set the acl xattrs directly on backend, for some reason on mount it gives error -acl_access_val="0x0200000001000600ffffffff04000400ffffffff10000400ffffffff20000400ffffffff" -acl_file_val="0x0000000400000001ffffffff0006000000000004ffffffff0004000000000010ffffffff0004000000000020ffffffff00040000" -TEST setfattr -n system.posix_acl_access -v $acl_access_val $B0/${V0}0/d -TEST setfattr -n trusted.SGI_ACL_FILE -v $acl_file_val $B0/${V0}0/d -TEST setfattr -n system.posix_acl_access -v $acl_access_val $B0/${V0}1/d -TEST setfattr -n trusted.SGI_ACL_FILE -v $acl_file_val $B0/${V0}1/d -TEST setfattr -n trusted.foo -v "baz" $M0/d -TEST setfattr -n trusted.foo -v "baz" $M0/d/a -TEST setfattr -n trusted.foo1 -v "baz1" $M0/d -TEST setfattr -n trusted.foo1 -v "baz1" $M0/d/a -TEST setfattr -n trusted.foo3 -v "unchanged" $M0/d -TEST setfattr -n trusted.foo3 -v "unchanged" $M0/d/a - -TEST kill_brick $V0 $H0 $B0/${V0}0 -#Induce metadata self-heal -TEST setfattr -n trusted.foo -v "bar" $M0/d -TEST setfattr -n trusted.foo -v "bar" $M0/d/a -TEST setfattr -x trusted.foo1 $M0/d -TEST setfattr -x trusted.foo1 $M0/d/a -TEST setfattr -n trusted.foo2 -v "bar2" $M0/d -TEST setfattr -n trusted.foo2 -v "bar2" $M0/d/a -d_quota_contri=$(getfattr -d -m . -e hex $B0/${V0}1/d | grep -E "trusted.glusterfs.quota.*.contri") -d_quota_dirty=$(getfattr -d -m . -e hex $B0/${V0}1/d | grep -E "trusted.glusterfs.quota.dirty") -d_quota_limit=$(getfattr -d -m . -e hex $B0/${V0}1/d | grep -E "trusted.glusterfs.quota.limit-set") -d_quota_size=$(getfattr -d -m . -e hex $B0/${V0}1/d | grep -E "trusted.glusterfs.quota.size") - -a_pgfid=$(getfattr -d -m . -e hex $B0/${V0}1/d/a | grep -E "trusted.pgfid.") - -#Change internal xattrs in the backend, later check that they are not healed -TEST setfattr -n trusted.glusterfs.quota.00000000-0000-0000-0000-000000000001.contri -v 0x0000000000000400 $B0/${V0}0/d -TEST setfattr -n trusted.glusterfs.quota.dirty -v 0x0000000000000400 $B0/${V0}0/d -TEST setfattr -n trusted.glusterfs.quota.limit-set -v 0x0000000000000400 $B0/${V0}0/d #This will be healed, this is external xattr -TEST setfattr -n trusted.glusterfs.quota.size -v 0x0000000000000400 $B0/${V0}0/d -TEST setfattr -n $(echo $a_pgfid | cut -f1 -d'=') -v "orphan" $B0/${V0}0/d/a - -TEST $CLI volume set $V0 cluster.self-heal-daemon on -TEST $CLI volume start $V0 force -EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status -EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0 -EXPECT_WITHIN $HEAL_TIMEOUT "0" afr_get_pending_heal_count $V0 - -#Check external xattrs match -EXPECT "bar" echo $(getfattr -d -m. -e text $B0/${V0}0/d | grep trusted.foo) -EXPECT "bar" echo $(getfattr -d -m. -e text $B0/${V0}0/d/a | grep trusted.foo) -TEST ! getfattr -n trusted.foo1 $B0/${V0}0/d -TEST ! getfattr -n trusted.foo1 $B0/${V0}0/d/a -EXPECT "unchanged" echo $(getfattr -d -m. -e text $B0/${V0}0/d | grep trusted.foo3) -EXPECT "unchanged" echo $(getfattr -d -m. -e text $B0/${V0}0/d/a | grep trusted.foo3) -EXPECT "bar2" echo $(getfattr -d -m. -e text $B0/${V0}0/d | grep trusted.foo2) -EXPECT "bar2" echo $(getfattr -d -m. -e text $B0/${V0}0/d/a | grep trusted.foo2) -EXPECT "$d_quota_limit" echo $(getfattr -d -m . -e hex $B0/${V0}0/d | grep "trusted.glusterfs.quota.limit-set") - -EXPECT "bar" echo $(getfattr -d -m. -e text $B0/${V0}1/d | grep trusted.foo) -EXPECT "bar" echo $(getfattr -d -m. -e text $B0/${V0}1/d/a | grep trusted.foo) -TEST ! getfattr -n trusted.foo1 $B0/${V0}1/d -TEST ! getfattr -n trusted.foo1 $B0/${V0}1/d/a -EXPECT "unchanged" echo $(getfattr -d -m. -e text $B0/${V0}1/d | grep trusted.foo3) -EXPECT "unchanged" echo $(getfattr -d -m. -e text $B0/${V0}1/d/a | grep trusted.foo3) -EXPECT "bar2" echo $(getfattr -d -m. -e text $B0/${V0}1/d | grep trusted.foo2) -EXPECT "bar2" echo $(getfattr -d -m. -e text $B0/${V0}1/d/a | grep trusted.foo2) -EXPECT "$d_quota_limit" echo $(getfattr -d -m . -e hex $B0/${V0}1/d | grep "trusted.glusterfs.quota.limit-set") - -#Test that internal xattrs on B0 are not healed -EXPECT 0x0000000000000400 echo $(getfattr -d -m. -e hex $B0/${V0}0/d | grep trusted.glusterfs.quota.00000000-0000-0000-0000-000000000001.contri) -EXPECT 0x0000000000000400 echo $(getfattr -d -m. -e hex $B0/${V0}0/d | grep trusted.glusterfs.quota.dirty) -EXPECT "$d_quota_limit" echo $(getfattr -d -m. -e hex $B0/${V0}0/d | grep trusted.glusterfs.quota.limit-set) #This will be healed, this is external xattr -EXPECT 0x0000000000000400 echo $(getfattr -d -m. -e hex $B0/${V0}0/d | grep trusted.glusterfs.quota.size) -EXPECT "$acl_access_val" echo $(getfattr -d -m. -e hex $B0/${V0}0/d | grep system.posix_acl_access) -EXPECT "$acl_file_val" echo $(getfattr -d -m. -e hex $B0/${V0}0/d | grep trusted.SGI_ACL_FILE) -EXPECT "orphan" echo $(getfattr -d -m. -e text $B0/${V0}0/d/a | grep $(echo $a_pgfid | cut -f1 -d'=')) - -#Test that xattrs didn't go bad in source -EXPECT "$d_quota_contri" echo $(getfattr -d -m . -e hex $B0/${V0}1/d | grep -E "trusted.glusterfs.quota.*.contri") -EXPECT "$d_quota_dirty" echo $(getfattr -d -m . -e hex $B0/${V0}1/d | grep -E "trusted.glusterfs.quota.dirty") -EXPECT "$d_quota_limit" echo $(getfattr -d -m . -e hex $B0/${V0}1/d | grep -E "trusted.glusterfs.quota.limit-set") -EXPECT "$d_quota_size" echo $(getfattr -d -m . -e hex $B0/${V0}1/d | grep -E "trusted.glusterfs.quota.size") -EXPECT "$a_pgfid" echo $(getfattr -d -m . -e hex $B0/${V0}1/d/a | grep -E "trusted.pgfid.") -EXPECT "$acl_access_val" echo $(getfattr -d -m. -e hex $B0/${V0}1/d | grep system.posix_acl_access) -EXPECT "$acl_file_val" echo $(getfattr -d -m. -e hex $B0/${V0}1/d | grep trusted.SGI_ACL_FILE) - -#Do a lookup and it shouldn't trigger metadata self-heal and heal xattrs -EXPECT "bar" echo $(getfattr -d -m. -e text $B0/${V0}0/d | grep trusted.foo) -EXPECT "bar" echo $(getfattr -d -m. -e text $B0/${V0}0/d/a | grep trusted.foo) -TEST ! getfattr -n trusted.foo1 $B0/${V0}0/d -TEST ! getfattr -n trusted.foo1 $B0/${V0}0/d/a -EXPECT "unchanged" echo $(getfattr -d -m. -e text $B0/${V0}0/d | grep trusted.foo3) -EXPECT "unchanged" echo $(getfattr -d -m. -e text $B0/${V0}0/d/a | grep trusted.foo3) -EXPECT "bar2" echo $(getfattr -d -m. -e text $B0/${V0}0/d | grep trusted.foo2) -EXPECT "bar2" echo $(getfattr -d -m. -e text $B0/${V0}0/d/a | grep trusted.foo2) -EXPECT "$d_quota_limit" echo $(getfattr -d -m . -e hex $B0/${V0}0/d | grep "trusted.glusterfs.quota.limit-set") - -EXPECT "bar" echo $(getfattr -d -m. -e text $B0/${V0}1/d | grep trusted.foo) -EXPECT "bar" echo $(getfattr -d -m. -e text $B0/${V0}1/d/a | grep trusted.foo) -TEST ! getfattr -n trusted.foo1 $B0/${V0}1/d -TEST ! getfattr -n trusted.foo1 $B0/${V0}1/d/a -EXPECT "unchanged" echo $(getfattr -d -m. -e text $B0/${V0}1/d | grep trusted.foo3) -EXPECT "unchanged" echo $(getfattr -d -m. -e text $B0/${V0}1/d/a | grep trusted.foo3) -EXPECT "bar2" echo $(getfattr -d -m. -e text $B0/${V0}1/d | grep trusted.foo2) -EXPECT "bar2" echo $(getfattr -d -m. -e text $B0/${V0}1/d/a | grep trusted.foo2) -EXPECT "$d_quota_limit" echo $(getfattr -d -m . -e hex $B0/${V0}1/d | grep "trusted.glusterfs.quota.limit-set") - -#Test that internal xattrs on B0 are not healed -EXPECT 0x0000000000000400 echo $(getfattr -d -m. -e hex $B0/${V0}0/d | grep trusted.glusterfs.quota.00000000-0000-0000-0000-000000000001.contri) -EXPECT 0x0000000000000400 echo $(getfattr -d -m. -e hex $B0/${V0}0/d | grep trusted.glusterfs.quota.dirty) -EXPECT "$d_quota_limit" echo $(getfattr -d -m. -e hex $B0/${V0}0/d | grep trusted.glusterfs.quota.limit-set) #This will be healed, this is external xattr -EXPECT 0x0000000000000400 echo $(getfattr -d -m. -e hex $B0/${V0}0/d | grep trusted.glusterfs.quota.size) -EXPECT "orphan" echo $(getfattr -d -m. -e text $B0/${V0}0/d/a | grep $(echo $a_pgfid | cut -f1 -d'=')) - -#Test that xattrs didn't go bad in source -EXPECT "$d_quota_contri" echo $(getfattr -d -m . -e hex $B0/${V0}1/d | grep -E "trusted.glusterfs.quota.*.contri") -EXPECT "$d_quota_dirty" echo $(getfattr -d -m . -e hex $B0/${V0}1/d | grep -E "trusted.glusterfs.quota.dirty") -EXPECT "$d_quota_limit" echo $(getfattr -d -m . -e hex $B0/${V0}1/d | grep -E "trusted.glusterfs.quota.limit-set") -EXPECT "$d_quota_size" echo $(getfattr -d -m . -e hex $B0/${V0}1/d | grep -E "trusted.glusterfs.quota.size") -EXPECT "$a_pgfid" echo $(getfattr -d -m . -e hex $B0/${V0}1/d/a | grep -E "trusted.pgfid.") - -EXPECT "$acl_access_val" echo $(getfattr -d -m. -e hex $B0/${V0}0/d | grep system.posix_acl_access) -EXPECT "$acl_file_val" echo $(getfattr -d -m. -e hex $B0/${V0}0/d | grep trusted.SGI_ACL_FILE) -EXPECT "$acl_access_val" echo $(getfattr -d -m. -e hex $B0/${V0}1/d | grep system.posix_acl_access) -EXPECT "$acl_file_val" echo $(getfattr -d -m. -e hex $B0/${V0}1/d | grep trusted.SGI_ACL_FILE) -cleanup diff --git a/tests/bugs/brick-uid-reset-on-volume-restart.t b/tests/bugs/brick-uid-reset-on-volume-restart.t deleted file mode 100755 index 26c4d00e9ae..00000000000 --- a/tests/bugs/brick-uid-reset-on-volume-restart.t +++ /dev/null @@ -1,54 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc - -function get_uid() { - stat -c '%u' $1; -} - -function get_gid() { - stat -c '%g' $1; -} - - -cleanup; - -TEST glusterd -TEST pidof glusterd - -TEST $CLI volume create $V0 replica 2 stripe 2 $H0:$B0/${V0}{1,2,3,4,5,6,7,8}; - -EXPECT "$V0" volinfo_field $V0 'Volume Name'; -EXPECT 'Created' volinfo_field $V0 'Status'; -EXPECT '8' brick_count $V0 - -TEST $CLI volume start $V0; -EXPECT 'Started' volinfo_field $V0 'Status'; - -TEST glusterfs -s $H0 --volfile-id $V0 $M0; - -EXPECT 0 get_uid $M0; -EXPECT 0 get_gid $M0; - -TEST chown 100:101 $M0; - -EXPECT 100 get_uid $M0; -EXPECT 101 get_gid $M0; - -TEST $CLI volume stop $V0; -TEST $CLI volume start $V0; - -EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0 -EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1 -EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 2 -EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 3 -EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 4 -EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 5 -EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 6 -EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 7 - -EXPECT 100 get_uid $M0; -EXPECT 101 get_gid $M0; - -cleanup; diff --git a/tests/bugs/bug-000000.t b/tests/bugs/bug-000000.t deleted file mode 100755 index 7f3d15c9d59..00000000000 --- a/tests/bugs/bug-000000.t +++ /dev/null @@ -1,9 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc - -cleanup; - -TEST glusterd - -cleanup; diff --git a/tests/bugs/bug-1002207.t b/tests/bugs/bug-1002207.t deleted file mode 100644 index c67738bd7ec..00000000000 --- a/tests/bugs/bug-1002207.t +++ /dev/null @@ -1,53 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc - -cleanup; - -## Start and create a volume -TEST glusterd; -TEST pidof glusterd; -TEST $CLI volume create $V0 stripe 2 $H0:$B0/${V0}{1,2,3,4,5,6,7,8}; - -## Verify volume is is created -EXPECT "$V0" volinfo_field $V0 'Volume Name'; -EXPECT 'Created' volinfo_field $V0 'Status'; - -## Start volume and verify -TEST $CLI volume start $V0; -EXPECT 'Started' volinfo_field $V0 'Status'; - -TEST glusterfs --attribute-timeout=0 --entry-timeout=0 -s $H0 --volfile-id=$V0 $M0; -TEST dd if=/dev/zero of=$M0/file$i.data bs=1024 count=1024; - -function xattr_query_check() -{ - local path=$1 - local xa_name=$2 - - local ret=$(getfattr -n $xa_name $path 2>&1 | grep -o "$xa_name: No such attribute" | wc -l) - echo $ret -} - -function set_xattr() -{ - local path=$1 - local xa_name=$2 - local xa_val=$3 - - setfattr -n $xa_name -v $xa_val $path - echo $? -} - -EXPECT 0 set_xattr $M0/file$i.data "trusted.name" "testofafairlylongxattrstringthatbutnotlongenoughtofailmemoryallocation" -EXPECT 0 xattr_query_check $M0/file$i.data "trusted.name" - -## Finish up -TEST $CLI volume stop $V0; -EXPECT 'Stopped' volinfo_field $V0 'Status'; - -TEST $CLI volume delete $V0; -TEST ! $CLI volume info $V0; - -cleanup; diff --git a/tests/bugs/bug-1002556.t b/tests/bugs/bug-1002556.t deleted file mode 100755 index a57f455d43f..00000000000 --- a/tests/bugs/bug-1002556.t +++ /dev/null @@ -1,25 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc - -cleanup; - -TEST glusterd -TEST pidof glusterd - -TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1} -TEST $CLI volume start $V0 -EXPECT '1 x 2 = 2' volinfo_field $V0 'Number of Bricks'; - -TEST $CLI volume add-brick $V0 replica 3 $H0:$B0/${V0}2 -EXPECT '1 x 3 = 3' volinfo_field $V0 'Number of Bricks'; - -TEST $CLI volume remove-brick $V0 replica 2 $H0:$B0/${V0}1 force -EXPECT '1 x 2 = 2' volinfo_field $V0 'Number of Bricks'; - -TEST killall glusterd -TEST glusterd - -EXPECT '1 x 2 = 2' volinfo_field $V0 'Number of Bricks'; -cleanup diff --git a/tests/bugs/bug-1004218.t b/tests/bugs/bug-1004218.t deleted file mode 100644 index 17eb3c65b12..00000000000 --- a/tests/bugs/bug-1004218.t +++ /dev/null @@ -1,26 +0,0 @@ -#!/bin/bash - -# Test if only a single xml document is generated by 'status all' -# when a volume is not started - -. $(dirname $0)/../include.rc - -cleanup; - -TEST glusterd - -TEST $CLI volume create ${V0}1 $H0:$B0/${V0}1{1,2} -TEST $CLI volume create ${V0}2 $H0:$B0/${V0}2{1,2} - -TEST $CLI volume start ${V0}1 - -function test_status_all () -{ - $CLI volume status all --xml | xmllint -format - -} - -TEST test_status_all - -TEST $CLI volume stop ${V0}1 - -cleanup diff --git a/tests/bugs/bug-1004744.t b/tests/bugs/bug-1004744.t deleted file mode 100644 index 6b1bb9d19f9..00000000000 --- a/tests/bugs/bug-1004744.t +++ /dev/null @@ -1,46 +0,0 @@ -#!/bin/bash - -#Test case: After a rebalance fix-layout, check if the rebalance status command -#displays the appropriate message at the CLI. - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc - -cleanup; - -#Basic checks -TEST glusterd -TEST pidof glusterd -TEST $CLI volume info - -#Create a 2x1 distributed volume -TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2}; -TEST $CLI volume start $V0 - -# Mount FUSE and create file/directory -TEST glusterfs -s $H0 --volfile-id $V0 $M0 -for i in `seq 1 10`; -do - mkdir $M0/dir_$i - echo file>$M0/dir_$i/file_$i - for j in `seq 1 100`; - do - mkdir $M0/dir_$i/dir_$j - echo file>$M0/dir_$i/dir_$j/file_$j - done -done - -#add 2 bricks -TEST $CLI volume add-brick $V0 $H0:$B0/${V0}{3,4}; - -#perform rebalance fix-layout -TEST $CLI volume rebalance $V0 fix-layout start - -EXPECT_WITHIN $REBALANCE_TIMEOUT "fix-layout completed" rebalance_status_field $V0; - -EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0 -TEST $CLI volume stop $V0 -TEST $CLI volume delete $V0; -TEST ! $CLI volume info $V0; - -cleanup; diff --git a/tests/bugs/bug-1015990-rep.t b/tests/bugs/bug-1015990-rep.t deleted file mode 100755 index bca0d7aff07..00000000000 --- a/tests/bugs/bug-1015990-rep.t +++ /dev/null @@ -1,80 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc -. $(dirname $0)/../afr.rc -cleanup; - -## Start and create a volume -TEST glusterd; -TEST pidof glusterd; -TEST $CLI volume info; - -TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2,3,4}; - -## Verify volume is is created -EXPECT "$V0" volinfo_field $V0 'Volume Name'; -EXPECT 'Created' volinfo_field $V0 'Status'; - -## Start volume and verify -TEST $CLI volume start $V0; -EXPECT 'Started' volinfo_field $V0 'Status'; - - -TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0 - - - -TEST kill_brick $V0 $H0 $B0/$V0"1" -sleep 5 -TEST kill_brick $V0 $H0 $B0/$V0"3" -sleep 5 - -for i in {1..100}; do echo "STRING" > $M0/File$i; done - -brick_2_sh_entries=$(count_sh_entries $B0/$V0"2") -brick_4_sh_entries=$(count_sh_entries $B0/$V0"4") - -command_output=$(gluster volume heal $V0 statistics heal-count replica $H0:$B0/$V0"1") - - -substring="Number of entries:" -count=0 -while read -r line; -do - if [[ "$line" == *$substring* ]] - then - value=$(echo $line | cut -f 2 -d :) - count=$(($count + $value)) - fi - -done <<< "$command_output" - -brick_2_entries_count=$(($count-$value)) - -EXPECT "0" echo $brick_2_entries_count - -brick_2_entries_count=$count - - -xattrop_count_brick_2=$(count_sh_entries $B0/$V0"2") -##Remove the count of the xattrop-gfid entry count as it does not contribute -##to the number of files to be healed - -sub_val=1 -xattrop_count_brick_2=$(($xattrop_count_brick_2-$sub_val)) - -ret=0 -if [ "$xattrop_count_brick_2" -eq "$brick_2_entries_count" ] - then - ret=$(($ret + $sub_val)) -fi - -EXPECT "1" echo $ret -## Finish up -TEST $CLI volume stop $V0; -EXPECT 'Stopped' volinfo_field $V0 'Status'; -TEST $CLI volume delete $V0; -TEST ! $CLI volume info $V0 - -cleanup; diff --git a/tests/bugs/bug-1015990.t b/tests/bugs/bug-1015990.t deleted file mode 100755 index 165af5168a0..00000000000 --- a/tests/bugs/bug-1015990.t +++ /dev/null @@ -1,95 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc -. $(dirname $0)/../afr.rc -cleanup; - -## Start and create a volume -TEST glusterd; -TEST pidof glusterd; -TEST $CLI volume info; - -TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2,3,4}; - -## Verify volume is is created -EXPECT "$V0" volinfo_field $V0 'Volume Name'; -EXPECT 'Created' volinfo_field $V0 'Status'; - -## Start volume and verify -TEST $CLI volume start $V0; -EXPECT 'Started' volinfo_field $V0 'Status'; - - -TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0 - - - -TEST kill_brick $V0 $H0 $B0/$V0"1" -sleep 5 -TEST kill_brick $V0 $H0 $B0/$V0"3" -sleep 5 - -for i in {1..100}; do echo "STRING" > $M0/File$i; done - -brick_2_sh_entries=$(count_sh_entries $B0/$V0"2") -brick_4_sh_entries=$(count_sh_entries $B0/$V0"4") - - -command_output=$(gluster volume heal $V0 statistics heal-count) - - -substring="Number of entries:" -count=0 -while read -r line; -do - if [[ "$line" == *$substring* ]] - then - value=$(echo $line | cut -f 2 -d :) - count=$(($count + $value)) - fi - -done <<< "$command_output" - -brick_2_entries_count=$(($count-$value)) -brick_4_entries_count=$value - - -xattrop_count_brick_2=$(count_sh_entries $B0/$V0"2") -##Remove the count of the xattrop-gfid entry count as it does not contribute -##to the number of files to be healed - -sub_val=1 -xattrop_count_brick_2=$(($xattrop_count_brick_2-$sub_val)) - -xattrop_count_brick_4=$(count_sh_entries $B0/$V0"4") -##Remove xattrop-gfid entry count - -xattrop_count_brick_4=$(($xattrop_count_brick_4-$sub_val)) - - -ret=0 -if [ "$xattrop_count_brick_2" -eq "$brick_2_entries_count" ] - then - ret=$(($ret + $sub_val)) -fi - -EXPECT "1" echo $ret - - -ret=0 -if [ "$xattrop_count_brick_4" -eq "$brick_4_entries_count" ] - then - ret=$(($ret + $sub_val)) -fi - -EXPECT "1" echo $ret - -## Finish up -TEST $CLI volume stop $V0; -EXPECT 'Stopped' volinfo_field $V0 'Status'; -TEST $CLI volume delete $V0; -TEST ! $CLI volume info $V0 - -cleanup; - diff --git a/tests/bugs/bug-1022055.t b/tests/bugs/bug-1022055.t deleted file mode 100755 index 07d0b1f2a6f..00000000000 --- a/tests/bugs/bug-1022055.t +++ /dev/null @@ -1,26 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../cluster.rc - -function check_peers { - $CLI_1 peer status | grep 'Peer in Cluster (Connected)' | wc -l -} - -cleanup; - -TEST launch_cluster 2; - -TEST $CLI_1 peer probe $H2; - -EXPECT_WITHIN $PROBE_TIMEOUT 1 check_peers; - -TEST $CLI_1 volume create $V0 $H1:$B1/$V0 $H2:$B2/$V0; - -TEST $CLI_1 volume start $V0; - -TEST $CLI_1 volume log rotate $V0; - -TEST $CLI_1 volume status; - -cleanup; diff --git a/tests/bugs/bug-1022905.t b/tests/bugs/bug-1022905.t deleted file mode 100644 index aef3395dd8a..00000000000 --- a/tests/bugs/bug-1022905.t +++ /dev/null @@ -1,39 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc - -cleanup; - -## Create a volume -TEST glusterd; -TEST pidof glusterd -TEST $CLI volume info; - -TEST $CLI volume create $V0 $H0:$B0/${V0}{1}; -EXPECT 'Created' volinfo_field $V0 'Status'; - -## Volume start -TEST $CLI volume start $V0; -EXPECT 'Started' volinfo_field $V0 'Status'; - -## Enable a protected and a resettable/unprotected option -TEST $CLI volume quota $V0 enable -TEST $CLI volume set $V0 diagnostics.client-log-level DEBUG - -## Reset cmd resets only unprotected option(s), succeeds. -TEST $CLI volume reset $V0; - -## Reset should fail -TEST ! $CLI volume reset $V0; - -## Set an unprotected option -TEST $CLI volume set $V0 diagnostics.client-log-level DEBUG - -## Now 1 protected and 1 unprotected options are set -## Reset force should succeed -TEST $CLI volume reset $V0 force; - -TEST $CLI volume stop $V0 -TEST $CLI volume delete $V0 - -cleanup; diff --git a/tests/bugs/bug-1023974.t b/tests/bugs/bug-1023974.t deleted file mode 100644 index 63b2c557d24..00000000000 --- a/tests/bugs/bug-1023974.t +++ /dev/null @@ -1,35 +0,0 @@ -#!/bin/bash - -# This regression test tries to ensure renaming a directory with content, and -# no limit set, is accounted properly, when moved into a directory with quota -# limit set. - -. $(dirname $0)/../include.rc - -cleanup; - -TEST glusterd -TEST pidof glusterd; -TEST $CLI volume info; - -TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2,3,4,5,6}; -TEST $CLI volume start $V0; - -TEST $CLI volume quota $V0 enable; - -TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0; - -TEST mkdir -p $M0/1/2; -TEST $CLI volume quota $V0 limit-usage /1/2 100MB 70%; -TEST $CLI volume quota $V0 hard-timeout 0 -TEST $CLI volume quota $V0 soft-timeout 0 - -#The corresponding write(3) should fail with EDQUOT ("Disk quota exceeded") -TEST ! dd if=/dev/urandom of=$M0/1/2/file bs=1024k count=102; -TEST mkdir -p $M0/1/3; -TEST dd if=/dev/urandom of=$M0/1/3/file bs=1024k count=102; - -#The corresponding rename(3) should fail with EDQUOT ("Disk quota exceeded") -TEST ! mv $M0/1/3/ $M0/1/2/3_mvd; - -cleanup; diff --git a/tests/bugs/bug-1027171.t b/tests/bugs/bug-1027171.t deleted file mode 100644 index c1f4bd809fe..00000000000 --- a/tests/bugs/bug-1027171.t +++ /dev/null @@ -1,53 +0,0 @@ -#!/bin/bash - -#Test case: Do not allow commit if the bricks are not decommissioned - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc - -cleanup; - -#Basic checks -TEST glusterd -TEST pidof glusterd -TEST $CLI volume info - -#Create a Distributed volume -TEST $CLI volume create $V0 $H0:$B0/${V0}{1..2}; -TEST $CLI volume start $V0 - -#Remove bricks and commit without starting -function remove_brick_commit_status { - $CLI volume remove-brick $V0 \ - $H0:$B0/${V0}2 commit 2>&1 |grep -oE "success|decommissioned" -} -EXPECT "decommissioned" remove_brick_commit_status; - -TEST $CLI volume stop $V0 -TEST $CLI volume delete $V0 -TEST ! $CLI volume info $V0 - -#Create a Distributed-Replicate volume -TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1..4}; -TEST $CLI volume start $V0 - -#Try to reduce replica count with start option -function remove_brick_start_status { - $CLI volume remove-brick $V0 replica 1 \ - $H0:$B0/${V0}1 $H0:$B0/${V0}3 start 2>&1 |grep -oE "success|failed" -} -EXPECT "failed" remove_brick_start_status; - -#Remove bricks with commit option -function remove_brick_commit_status2 { - $CLI volume remove-brick $V0 replica 1 \ - $H0:$B0/${V0}1 $H0:$B0/${V0}3 commit 2>&1 | - grep -oE "success|decommissioned" -} -EXPECT "decommissioned" remove_brick_commit_status2; - -TEST $CLI volume stop $V0 -TEST $CLI volume delete $V0 -TEST ! $CLI volume info $V0 - -cleanup; diff --git a/tests/bugs/bug-1030208.t b/tests/bugs/bug-1030208.t deleted file mode 100644 index 1ab41070663..00000000000 --- a/tests/bugs/bug-1030208.t +++ /dev/null @@ -1,35 +0,0 @@ -#!/bin/bash - -#Test case: Hardlink test - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc - -cleanup; - -#Basic checks -TEST glusterd -TEST pidof glusterd -TEST $CLI volume info - -#Create a distributed volume -TEST $CLI volume create $V0 $H0:$B0/${V0}{1..2}; -TEST $CLI volume start $V0 - -# Mount FUSE -TEST glusterfs -s $H0 --volfile-id $V0 $M0 - -#Create a file and perform fop on a DIR -TEST touch $M0/foo -TEST ls $M0/ - -#Create hardlink -TEST ln $M0/foo $M0/bar - - -EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0 -TEST $CLI volume stop $V0 -TEST $CLI volume delete $V0; -TEST ! $CLI volume info $V0; - -cleanup; diff --git a/tests/bugs/bug-1030580.t b/tests/bugs/bug-1030580.t deleted file mode 100644 index 1b03ae2f0cd..00000000000 --- a/tests/bugs/bug-1030580.t +++ /dev/null @@ -1,48 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc - -cleanup; - -function write_to_file { - dd of=$M0/1 if=/dev/zero bs=1024k count=128 oflag=append 2>&1 >/dev/null -} - -TEST glusterd -TEST pidof glusterd -TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}0 $H0:$B0/${V0}1 -TEST $CLI volume start $V0 -TEST $CLI volume profile $V0 start -TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0 - -# Verify 'volume profile info' prints both cumulative and incremental stats -write_to_file & -wait -output=$($CLI volume profile $V0 info) -EXPECT 2 cumulative_stat_count "$output" -EXPECT 2 incremental_stat_count "$output" ' 0 ' - -# Verify 'volume profile info incremental' prints incremental stats only -write_to_file & -wait -output=$($CLI volume profile $V0 info incremental) -EXPECT 0 cumulative_stat_count "$output" -EXPECT 2 incremental_stat_count "$output" ' 1 ' - -# Verify 'volume profile info cumulative' prints cumulative stats only -write_to_file & -wait -output=$($CLI volume profile $V0 info cumulative) -EXPECT 2 cumulative_stat_count "$output" -EXPECT 0 incremental_stat_count "$output" '.*' - -# Verify the 'volume profile info cumulative' command above didn't alter -# the interval id -write_to_file & -wait -output=$($CLI volume profile $V0 info incremental) -EXPECT 0 cumulative_stat_count "$output" -EXPECT 2 incremental_stat_count "$output" ' 2 ' - -cleanup; diff --git a/tests/bugs/bug-1032894.t b/tests/bugs/bug-1032894.t deleted file mode 100644 index ecb5952d860..00000000000 --- a/tests/bugs/bug-1032894.t +++ /dev/null @@ -1,33 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc - -#Check stale indices are deleted as part of self-heal-daemon crawl. -cleanup; - -TEST glusterd -TEST pidof glusterd -TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1} -TEST $CLI volume set $V0 cluster.self-heal-daemon off -TEST $CLI volume start $V0 -TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 -cd $M0 -TEST mkdir a -cd a -TEST kill_brick $V0 $H0 $B0/${V0}0 -# Create stale indices -for i in {1..10}; do echo abc > $i; done -for i in {1..10}; do rm -f $i; done - -TEST $CLI volume start $V0 force -EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0 -TEST $CLI volume set $V0 cluster.self-heal-daemon on -EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status - -#Since maximum depth of the directory structure that needs healin is 2 -#Trigger two self-heals. That should make sure the heal is complete -TEST $CLI volume heal $V0 - -EXPECT_WITHIN $HEAL_TIMEOUT "0" afr_get_index_count $B0/${V0}1 -cleanup diff --git a/tests/bugs/bug-1032927.t b/tests/bugs/bug-1032927.t deleted file mode 100644 index 2106f3d5bfe..00000000000 --- a/tests/bugs/bug-1032927.t +++ /dev/null @@ -1,32 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc - -#This tests if pathinfo getxattr fails when one of the bricks is down -#Lets hope it doesn't - -cleanup; -function get_pathinfo_in_loop { - failed=0 - for i in {1..1000} - do - getfattr -n trusted.glusterfs.pathinfo $M0 2>/dev/null - if [ $? -ne 0 ]; then failed=1;break; fi - done - return $failed -} - -TEST glusterd -TEST pidof glusterd -TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1} -TEST $CLI volume start $V0 -TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0 -cd $M0 -TEST kill_brick $V0 $H0 $B0/${V0}1 - -#when one of the bricks is down getfattr of pathinfo should not fail -#Lets just do the test for 1000 times to see if we hit the race -TEST get_pathinfo_in_loop - -cleanup diff --git a/tests/bugs/bug-1034085.t b/tests/bugs/bug-1034085.t deleted file mode 100644 index 7ccb4403ea9..00000000000 --- a/tests/bugs/bug-1034085.t +++ /dev/null @@ -1,31 +0,0 @@ -#!/bin/bash -#Test case: Check the creation of indices/xattrop dir as soon as brick comes up. - -. $(dirname $0)/../include.rc - -cleanup; - -#Create a volume -TEST glusterd; -TEST pidof glusterd -TEST $CLI volume info; - -TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}-{0,1}; -EXPECT 'Created' volinfo_field $V0 'Status'; - -TEST mkdir -p $B0/${V0}-0/.glusterfs/indices/ -TEST touch $B0/${V0}-0/.glusterfs/indices/xattrop - -#Volume start should not work when xattrop dir not created -TEST ! $CLI volume start $V0; - -TEST rm $B0/${V0}-0/.glusterfs/indices/xattrop - -#Volume start should work now -TEST $CLI volume start $V0; -EXPECT 'Started' volinfo_field $V0 'Status'; - -#Check for the existence of indices/xattrop dir -TEST [ -d $B0/${V0}-0/.glusterfs/indices/xattrop/ ]; - -cleanup; diff --git a/tests/bugs/bug-1034716.t b/tests/bugs/bug-1034716.t deleted file mode 100644 index e6260fca8cb..00000000000 --- a/tests/bugs/bug-1034716.t +++ /dev/null @@ -1,60 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc - -cleanup; - -#Basic checks -TEST glusterd -TEST pidof glusterd -TEST $CLI volume info - -#Create a distributed volume -TEST $CLI volume create $V0 $H0:$B0/${V0}{1..2}; -TEST $CLI volume start $V0 - -# Mount FUSE -TEST glusterfs -s $H0 --volfile-id $V0 $M0 - -#Create a file and perform fop on a DIR -TEST touch $M0/foo - -function xattr_query_check() { - local path=$1 - - local ret=`getfattr -m . -d $path 2>&1 | grep -c 'trusted.glusterfs'` - echo $ret -} - -function set_xattr() { - local path=$1 - local xa_name=$2 - local xa_val=$3 - - setfattr -n $xa_name -v $xa_val $path - echo $? -} - -function remove_xattr() { - local path=$1 - local xa_name=$2 - - setfattr -x $xa_name $path - echo $? -} - -EXPECT 0 xattr_query_check $M0/ -EXPECT 0 xattr_query_check $M0/foo - -EXPECT 1 set_xattr $M0/ 'trusted.glusterfs.volume-id' 'foo' -EXPECT 1 remove_xattr $M0/ 'trusted.glusterfs.volume-id' - - -## Finish up -EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0 -TEST $CLI volume stop $V0 -TEST $CLI volume delete $V0; -TEST ! $CLI volume info $V0; - -cleanup; diff --git a/tests/bugs/bug-1035576.t b/tests/bugs/bug-1035576.t deleted file mode 100644 index 62d431a703a..00000000000 --- a/tests/bugs/bug-1035576.t +++ /dev/null @@ -1,52 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc - -#This script tests that self-heal of limit-set xattr is happening on a directory -#but self-heal of quota.size xattr is not happening - -cleanup; - -TEST glusterd -TEST pidof glusterd -TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1} -TEST $CLI volume start $V0 -#Lets disable perf-xls so that lookup would reach afr -TEST $CLI volume set $V0 performance.quick-read off -TEST $CLI volume set $V0 performance.io-cache off -TEST $CLI volume set $V0 performance.write-behind off -TEST $CLI volume set $V0 performance.stat-prefetch off -TEST $CLI volume set $V0 performance.read-ahead off -TEST $CLI volume set $V0 background-self-heal-count 0 -TEST $CLI volume set $V0 self-heal-daemon off -TEST $CLI volume quota $V0 enable - -TEST kill_brick $V0 $H0 $B0/${V0}0 -TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0 -cd $M0 -TEST mkdir $M0/a -TEST $CLI volume quota $V0 limit-usage /a 1GB -echo abc > $M0/a/f -$CLI volume start $V0 force -EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0 -quota_limit_val1=$(get_hex_xattr trusted.glusterfs.quota.limit-set $B0/${V0}1/a) -quota_size_val1=$(get_hex_xattr trusted.glusterfs.quota.size $B0/${V0}1/a) - -#Trigger entry,metadata self-heal -TEST ls $M0/a - -quota_limit_val0=$(get_hex_xattr trusted.glusterfs.quota.limit-set $B0/${V0}0/a) -quota_size_val0=$(get_hex_xattr trusted.glusterfs.quota.size $B0/${V0}0/a) - -#Test that limit-set xattr is healed -TEST [ $quota_limit_val0 == $quota_limit_val1 ] - -#Only entry, metadata self-heal is done quota size value should not be same -TEST [ $quota_size_val0 != $quota_size_val1 ] -TEST cat $M0/a/f - -#Now that data self-heal is done quota size value should be same -quota_size_val0=$(get_hex_xattr trusted.glusterfs.quota.size $B0/${V0}0/a) -TEST [ $quota_size_val0 == $quota_size_val1 ] -cleanup diff --git a/tests/bugs/bug-1037501.t b/tests/bugs/bug-1037501.t deleted file mode 100755 index aff3d6c28b7..00000000000 --- a/tests/bugs/bug-1037501.t +++ /dev/null @@ -1,104 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc - -function write_file() -{ - path="$1"; shift - echo "$*" > "$path" -} - -cleanup; -TEST glusterd -TEST pidof glusterd -TEST $CLI volume info; - -## Start and create a volume -mkdir -p ${B0}/${V0}-0 -mkdir -p ${B0}/${V0}-1 -mkdir -p ${B0}/${V0}-2 -TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}-{0,1,2} - -## Verify volume is created -EXPECT "$V0" volinfo_field $V0 'Volume Name'; -EXPECT 'Created' volinfo_field $V0 'Status'; - -## Start volume and verify -TEST $CLI volume start $V0; -EXPECT 'Started' volinfo_field $V0 'Status'; - -## Mount native -TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 $M0 - -TEST `echo "TEST-FILE" > $M0/File` -TEST `mkdir $M0/Dir` -TEST `ln $M0/File $M0/Link` -TEST `mknod $M0/FIFO p` - -TEST $CLI volume add-brick $V0 replica 4 $H0:$B0/$V0-3 force -TEST $CLI volume add-brick $V0 replica 5 $H0:$B0/$V0-4 force -TEST $CLI volume add-brick $V0 replica 6 $H0:$B0/$V0-5 force - -EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status -EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 3 -EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 4 -EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 5 -TEST gluster volume heal $V0 full -EXPECT_WITHIN $HEAL_TIMEOUT "Y" path_exists $B0/$V0-0/File -EXPECT_WITHIN $HEAL_TIMEOUT "Y" path_exists $B0/$V0-1/File -EXPECT_WITHIN $HEAL_TIMEOUT "Y" path_exists $B0/$V0-2/File -EXPECT_WITHIN $HEAL_TIMEOUT "Y" path_exists $B0/$V0-3/File -EXPECT_WITHIN $HEAL_TIMEOUT "Y" path_exists $B0/$V0-4/File -EXPECT_WITHIN $HEAL_TIMEOUT "Y" path_exists $B0/$V0-5/File - -EXPECT_WITHIN $HEAL_TIMEOUT "Y" path_exists $B0/$V0-0/Link -EXPECT_WITHIN $HEAL_TIMEOUT "Y" path_exists $B0/$V0-1/Link -EXPECT_WITHIN $HEAL_TIMEOUT "Y" path_exists $B0/$V0-2/Link -EXPECT_WITHIN $HEAL_TIMEOUT "Y" path_exists $B0/$V0-3/Link -EXPECT_WITHIN $HEAL_TIMEOUT "Y" path_exists $B0/$V0-4/Link -EXPECT_WITHIN $HEAL_TIMEOUT "Y" path_exists $B0/$V0-5/Link - -EXPECT_WITHIN $HEAL_TIMEOUT "Y" path_exists $B0/$V0-0/Dir -EXPECT_WITHIN $HEAL_TIMEOUT "Y" path_exists $B0/$V0-1/Dir -EXPECT_WITHIN $HEAL_TIMEOUT "Y" path_exists $B0/$V0-2/Dir -EXPECT_WITHIN $HEAL_TIMEOUT "Y" path_exists $B0/$V0-3/Dir -EXPECT_WITHIN $HEAL_TIMEOUT "Y" path_exists $B0/$V0-4/Dir -EXPECT_WITHIN $HEAL_TIMEOUT "Y" path_exists $B0/$V0-5/Dir - -EXPECT_WITHIN $HEAL_TIMEOUT "Y" path_exists $B0/$V0-0/FIFO -EXPECT_WITHIN $HEAL_TIMEOUT "Y" path_exists $B0/$V0-1/FIFO -EXPECT_WITHIN $HEAL_TIMEOUT "Y" path_exists $B0/$V0-2/FIFO -EXPECT_WITHIN $HEAL_TIMEOUT "Y" path_exists $B0/$V0-3/FIFO -EXPECT_WITHIN $HEAL_TIMEOUT "Y" path_exists $B0/$V0-4/FIFO -EXPECT_WITHIN $HEAL_TIMEOUT "Y" path_exists $B0/$V0-5/FIFO - -EXPECT 10 stat -c '%s' $B0/$V0-0/File -EXPECT 10 stat -c '%s' $B0/$V0-1/File -EXPECT 10 stat -c '%s' $B0/$V0-2/File -EXPECT 10 stat -c '%s' $B0/$V0-3/File -EXPECT 10 stat -c '%s' $B0/$V0-4/File -EXPECT 10 stat -c '%s' $B0/$V0-5/File - -EXPECT 3 stat -c '%h' $B0/$V0-0/Link -EXPECT 3 stat -c '%h' $B0/$V0-1/Link -EXPECT 3 stat -c '%h' $B0/$V0-2/Link -EXPECT 3 stat -c '%h' $B0/$V0-3/Link -EXPECT 3 stat -c '%h' $B0/$V0-4/Link -EXPECT 3 stat -c '%h' $B0/$V0-5/Link - -EXPECT 'directory' stat -c '%F' $B0/$V0-0/Dir -EXPECT 'directory' stat -c '%F' $B0/$V0-1/Dir -EXPECT 'directory' stat -c '%F' $B0/$V0-2/Dir -EXPECT 'directory' stat -c '%F' $B0/$V0-3/Dir -EXPECT 'directory' stat -c '%F' $B0/$V0-4/Dir -EXPECT 'directory' stat -c '%F' $B0/$V0-5/Dir - -EXPECT 'fifo' stat -c '%F' $B0/$V0-0/FIFO -EXPECT 'fifo' stat -c '%F' $B0/$V0-1/FIFO -EXPECT 'fifo' stat -c '%F' $B0/$V0-2/FIFO -EXPECT 'fifo' stat -c '%F' $B0/$V0-3/FIFO -EXPECT 'fifo' stat -c '%F' $B0/$V0-4/FIFO -EXPECT 'fifo' stat -c '%F' $B0/$V0-5/FIFO - -cleanup; diff --git a/tests/bugs/bug-1038598.t b/tests/bugs/bug-1038598.t deleted file mode 100644 index 4846b7dc78a..00000000000 --- a/tests/bugs/bug-1038598.t +++ /dev/null @@ -1,80 +0,0 @@ -#!/bin/bash -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc - -cleanup; - -TEST glusterd -TEST pidof glusterd -TEST $CLI volume info; - -TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2}; - -function hard_limit() -{ - local QUOTA_PATH=$1; - $CLI volume quota $V0 list $QUOTA_PATH | grep "$QUOTA_PATH" | awk '{print $2}' -} - -function soft_limit() -{ - local QUOTA_PATH=$1; - $CLI volume quota $V0 list $QUOTA_PATH | grep "$QUOTA_PATH" | awk '{print $3}' -} - -function usage() -{ - local QUOTA_PATH=$1; - $CLI volume quota $V0 list $QUOTA_PATH | grep "$QUOTA_PATH" | awk '{print $4}' -} - -function sl_exceeded() -{ - local QUOTA_PATH=$1; - $CLI volume quota $V0 list $QUOTA_PATH | grep "$QUOTA_PATH" | awk '{print $6}' -} - -function hl_exceeded() -{ - local QUOTA_PATH=$1; - $CLI volume quota $V0 list $QUOTA_PATH | grep "$QUOTA_PATH" | awk '{print $7}' - -} - -EXPECT "$V0" volinfo_field $V0 'Volume Name'; -EXPECT 'Created' volinfo_field $V0 'Status'; -EXPECT '2' brick_count $V0 - -TEST $CLI volume start $V0; -EXPECT 'Started' volinfo_field $V0 'Status'; - -TEST $CLI volume quota $V0 enable -sleep 5 - -TEST glusterfs -s $H0 --volfile-id $V0 $M0; - -TEST mkdir -p $M0/test_dir -TEST $CLI volume quota $V0 limit-usage /test_dir 10MB 50 - -EXPECT "10.0MB" hard_limit "/test_dir"; -EXPECT "50%" soft_limit "/test_dir"; - -TEST dd if=/dev/zero of=$M0/test_dir/file1.txt bs=1024k count=4 -EXPECT "4.0MB" usage "/test_dir"; -EXPECT 'No' sl_exceeded "/test_dir"; -EXPECT 'No' hl_exceeded "/test_dir"; - -TEST dd if=/dev/zero of=$M0/test_dir/file1.txt bs=1024k count=6 -EXPECT "6.0MB" usage "/test_dir"; -EXPECT 'Yes' sl_exceeded "/test_dir"; -EXPECT 'No' hl_exceeded "/test_dir"; - -#set timeout to 0 so that quota gets enforced without any lag -TEST $CLI volume set $V0 features.hard-timeout 0 -TEST $CLI volume set $V0 features.soft-timeout 0 - -TEST ! dd if=/dev/zero of=$M0/test_dir/file1.txt bs=1024k count=15 -EXPECT 'Yes' sl_exceeded "/test_dir"; -EXPECT 'Yes' hl_exceeded "/test_dir"; - -cleanup; diff --git a/tests/bugs/bug-1040408.t b/tests/bugs/bug-1040408.t deleted file mode 100644 index 2982d6a81c0..00000000000 --- a/tests/bugs/bug-1040408.t +++ /dev/null @@ -1,31 +0,0 @@ -#!/bin/bash - -#Test case: Create a distributed replicate volume, and reduce -#replica count - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc - -cleanup; - -#Basic checks -TEST glusterd -TEST pidof glusterd -TEST $CLI volume info - -#Create a 2X3 distributed-replicate volume -TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{1..6}; -TEST $CLI volume start $V0 - -# Reduce to 2x2 volume by specifying bricks in reverse order -function remove_brick_status { - $CLI volume remove-brick $V0 replica 2 \ - $H0:$B0/${V0}6 $H0:$B0/${V0}3 force 2>&1 |grep -oE "success|failed" -} -EXPECT "success" remove_brick_status; - -TEST $CLI volume stop $V0 -TEST $CLI volume delete $V0; -TEST ! $CLI volume info $V0; - -cleanup; diff --git a/tests/bugs/bug-1040423.t b/tests/bugs/bug-1040423.t deleted file mode 100755 index 6dae9eac6e9..00000000000 --- a/tests/bugs/bug-1040423.t +++ /dev/null @@ -1,72 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc - -cleanup - -function _init() { -# Start glusterd -TEST glusterd; -TEST pidof glusterd; -TEST $CLI volume info; - -# Lets create volume -TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2}; - -## Verify volume is created -EXPECT "$V0" volinfo_field $V0 'Volume Name'; -EXPECT 'Created' volinfo_field $V0 'Status'; - -#Start volume and verify -TEST $CLI volume start $V0; -EXPECT 'Started' volinfo_field $V0 'Status'; -TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0 - -#Enable Quota -TEST $CLI volume quota $V0 enable - -#As quotad consumes some time to connect to brick process we invoke sleep -sleep 10; - -#set limit of 1GB of quota on root -TEST $CLI volume quota $V0 limit-usage / 1GB -} - -function get_hardlimit() -{ - VOLUME=$1 - - $CLI volume quota $VOLUME list | tail -1 | sed "s/ \{1,\}/ /g" | - cut -d' ' -f 2 -} - -function check_fattrs { - -touch $M0/file1; - -#This confirms that pgfid is also filtered -TEST ! "getfattr -d -e hex -m . $M0/file1 | grep pgfid "; - -#just check for quota xattr are visible or not -TEST ! "getfattr -d -e hex -m . $M0 | grep quota"; - -#setfattr should fail -TEST ! setfattr -n trusted.glusterfs.quota.limit-set -v 10 $M0; - -#remove xattr should fail -TEST ! setfattr -x trusted.glusterfs.quota.limit-set $M0; - -#check if list command still shows the correct value or not - -EXPECT "1.0GB" get_hardlimit $V0 - -} - -_init; -check_fattrs; -cleanup - - - - diff --git a/tests/bugs/bug-1042725.t b/tests/bugs/bug-1042725.t deleted file mode 100644 index a954019d004..00000000000 --- a/tests/bugs/bug-1042725.t +++ /dev/null @@ -1,49 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc - -cleanup; - -#Basic checks -TEST glusterd -TEST pidof glusterd -TEST $CLI volume info - -#Create a distributed volume -TEST $CLI volume create $V0 $H0:$B0/${V0}{1..2}; -TEST $CLI volume start $V0 - -# Mount FUSE -TEST glusterfs -s $H0 --volfile-id $V0 $M0 - -#Create files -TEST mkdir $M0/foo -TEST touch $M0/foo/{1..20} -for file in {1..20}; do - ln $M0/foo/$file $M0/foo/${file}_linkfile; -done - -#Stop one of the brick -TEST kill_brick ${V0} ${H0} ${B0}/${V0}1 - -rm -rf $M0/foo 2>/dev/null -TEST stat $M0/foo - -touch $M0/foo/{1..20} 2>/dev/null -touch $M0/foo/{1..20}_linkfile 2>/dev/null - -TEST $CLI volume start $V0 force; -sleep 5 -function verify_duplicate { - count=`ls $M0/foo | sort | uniq --repeated | grep [0-9] -c` - echo $count -} -EXPECT 0 verify_duplicate - -EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0 -TEST $CLI volume stop $V0 -TEST $CLI volume delete $V0; -TEST ! $CLI volume info $V0; - -cleanup; diff --git a/tests/bugs/bug-1043886.t b/tests/bugs/bug-1043886.t deleted file mode 100755 index 710fcd2064d..00000000000 --- a/tests/bugs/bug-1043886.t +++ /dev/null @@ -1,55 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../nfs.rc - -cleanup; - -TEST glusterd -TEST pidof glusterd -TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2}; -TEST $CLI volume start $V0 - -## Mount FUSE with caching disabled -TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id $V0 $M0; - -EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available; - -## Mount volume as NFS export -TEST mount_nfs $H0:/$V0 $N0 nolock; - -# just a random uid/gid -uid=22162 -gid=5845 - -mkdir $N0/other; -chown $uid:$gid $N0/other; - -TEST $CLI volume set $V0 server.root-squash on; -TEST $CLI volume set $V0 server.anonuid $uid; -TEST $CLI volume set $V0 server.anongid $gid; - -EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available; - -# create files and directories in the root of the glusterfs and nfs mount -# which is owned by root and hence the right behavior is getting EACCESS -# as the fops are executed as nfsnobody. -touch $M0/file 2>/dev/null; -TEST [ $? -ne 0 ] -mkdir $M0/dir 2>/dev/null; -TEST [ $? -ne 0 ] - -# Here files and directories should be getting created as other directory is owned -# by tmp_user as server.anonuid and server.anongid have the value of tmp_user uid and gid -TEST touch $M0/other/file 2>/dev/null; -TEST [ "$(stat -c %u:%g $N0/other/file)" = "$uid:$gid" ]; -TEST mkdir $M0/other/dir 2>/dev/null; -TEST [ "$(stat -c %u:%g $N0/other/dir)" = "$uid:$gid" ]; - -## Before killing daemon to avoid deadlocks -EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" umount_nfs $N0 - -TEST $CLI volume stop $V0; -TEST $CLI volume delete $V0; - -cleanup; diff --git a/tests/bugs/bug-1045333.t b/tests/bugs/bug-1045333.t deleted file mode 100755 index d877d14c204..00000000000 --- a/tests/bugs/bug-1045333.t +++ /dev/null @@ -1,44 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../snapshot.rc - -cleanup; -TEST verify_lvm_version; -TEST glusterd; -TEST pidof glusterd; - -TEST setup_lvm 1 - -TEST $CLI volume create $V0 $H0:$L1 -TEST $CLI volume start $V0 - - -S1="${V0}-snap1" #Create snapshot with name contains hyphen(-) -S2="-${V0}-snap2" #Create snapshot with name starts with hyphen(-) -#Create snapshot with a long name -S3="${V0}_single_gluster_volume_is_accessible_by_multiple_clients_offline_snapshot_is_a_long_name" - -TEST $CLI snapshot create $S1 $V0 -TEST snapshot_exists 0 $S1 - -TEST $CLI snapshot create $S2 $V0 -TEST snapshot_exists 0 $S2 - -TEST $CLI snapshot create $S3 $V0 -TEST snapshot_exists 0 $S3 - - -TEST glusterfs -s $H0 --volfile-id=/snaps/$S1/$V0 $M0 -TEST glusterfs -s $H0 --volfile-id=/snaps/$S2/$V0 $M1 -TEST glusterfs -s $H0 --volfile-id=/snaps/$S3/$V0 $M2 - -#Clean up -#TEST $CLI snapshot delete $S1 -#TEST $CLI snapshot delete $S2 -#TEST $CLI snapshot delete $S3 - -TEST $CLI volume stop $V0 force -#TEST $CLI volume delete $V0 - -cleanup; diff --git a/tests/bugs/bug-1046308.t b/tests/bugs/bug-1046308.t deleted file mode 100644 index cfec3a35d31..00000000000 --- a/tests/bugs/bug-1046308.t +++ /dev/null @@ -1,19 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc - -cleanup; - -volname="StartMigrationDuringRebalanceTest" -TEST glusterd -TEST pidof glusterd; - -TEST $CLI volume info; -TEST $CLI volume create $volname $H0:$B0/${volname}{1,2}; -TEST $CLI volume start $volname; -TEST $CLI volume rebalance $volname start; - -cleanup; - - - diff --git a/tests/bugs/bug-1046624.t b/tests/bugs/bug-1046624.t deleted file mode 100755 index 13ee45d2911..00000000000 --- a/tests/bugs/bug-1046624.t +++ /dev/null @@ -1,46 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc - -cleanup; -TEST glusterd -TEST pidof glusterd - -## Start and create a volume -TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}-{0,1} - -## Verify volume is created -EXPECT "$V0" volinfo_field $V0 'Volume Name'; -EXPECT 'Created' volinfo_field $V0 'Status'; - - -## Make sure automatic self-heal doesn't perturb our results. -TEST $CLI volume set $V0 cluster.self-heal-daemon off -TEST $CLI volume set $V0 stat-prefetch off -TEST $CLI volume set $V0 background-self-heal-count 0 - -## Start volume and verify -TEST $CLI volume start $V0; -EXPECT 'Started' volinfo_field $V0 'Status'; - -## Mount native -TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 $M0 --use-readdirp=no - -TEST `echo "TEST-FILE" > $M0/File` -TEST `mkdir $M0/Dir` -TEST kill_brick $V0 $H0 $B0/${V0}-0 - -TEST `ln -s $M0/File $M0/Link1` -TEST `ln -s $M0/Dir $M0/Link2` - -TEST $CLI volume start $V0 force -EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0 - -TEST `find $M0/ 2>/dev/null 1>/dev/null` -TEST `find $M0/ | xargs stat 2>/dev/null 1>/dev/null` - -TEST stat $B0/${V0}-0/Link1 -TEST stat $B0/${V0}-0/Link2 - -cleanup; diff --git a/tests/bugs/bug-1047378.t b/tests/bugs/bug-1047378.t deleted file mode 100644 index b441ee9b149..00000000000 --- a/tests/bugs/bug-1047378.t +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc - -cleanup; - -TEST glusterd -TEST pidof glusterd - -TEST "echo volume list | $CLI --xml | xmllint --format -" - -cleanup diff --git a/tests/bugs/bug-1047416.t b/tests/bugs/bug-1047416.t deleted file mode 100644 index 67a0fea5259..00000000000 --- a/tests/bugs/bug-1047416.t +++ /dev/null @@ -1,66 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc - -cleanup; - -function write_to_file { - dd of=$M0/1 if=/dev/zero bs=1024k count=128 oflag=append 2>&1 >/dev/null -} - -TEST glusterd -TEST pidof glusterd -TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}0 $H0:$B0/${V0}1 -TEST $CLI volume start $V0 -TEST $CLI volume profile $V0 start -TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0 - -# Verify 'volume profile info' prints both cumulative and incremental stats -write_to_file & -wait -output=$($CLI volume profile $V0 info) -EXPECT 2 cumulative_stat_count "$output" -EXPECT 2 incremental_stat_count "$output" ' 0 ' - -# Verify 'volume profile info peek' prints both cumulative and incremental stats -# without clearing incremental stats -write_to_file & -wait -output=$($CLI volume profile $V0 info peek) -EXPECT 2 cumulative_stat_count "$output" -EXPECT 2 incremental_stat_count "$output" ' 1 ' - -write_to_file & -wait -output=$($CLI volume profile $V0 info peek) -EXPECT 2 cumulative_stat_count "$output" -EXPECT 2 incremental_stat_count "$output" ' 1 ' - -# Verify 'volume profile info incremental peek' prints incremental stats only -# without clearing incremental stats -write_to_file & -wait -output=$($CLI volume profile $V0 info incremental peek) -EXPECT 0 cumulative_stat_count "$output" -EXPECT 2 incremental_stat_count "$output" ' 1 ' - -write_to_file & -wait -output=$($CLI volume profile $V0 info incremental peek) -EXPECT 0 cumulative_stat_count "$output" -EXPECT 2 incremental_stat_count "$output" ' 1 ' - -# Verify 'volume profile info clear' clears both incremental and cumulative stats -write_to_file & -wait -output=$($CLI volume profile $V0 info clear) -EXPECT 2 cleared_stat_count "$output" - -output=$($CLI volume profile $V0 info) -EXPECT 2 cumulative_stat_count "$output" -EXPECT 2 incremental_stat_count "$output" ' 0 ' -EXPECT 4 data_read_count "$output" ' 0 ' -EXPECT 4 data_written_count "$output" ' 0 ' - -cleanup; diff --git a/tests/bugs/bug-1047955.t b/tests/bugs/bug-1047955.t deleted file mode 100644 index 169333e9eb4..00000000000 --- a/tests/bugs/bug-1047955.t +++ /dev/null @@ -1,23 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../cluster.rc - -function check_peers { - $CLI_1 peer status | grep 'Peer in Cluster (Connected)' | wc -l -} - -cleanup; - -# Create a 2x2 dist-rep volume; peer probe a new node. -# Performing remove-brick from this new node must succeed -# without crashing it's glusterd - -TEST launch_cluster 2; -TEST $CLI_1 volume create $V0 replica 2 $H1:$B1/${V0}{1,2,3,4} -TEST $CLI_1 volume start $V0; -TEST $CLI_1 peer probe $H2; -EXPECT_WITHIN $PROBE_TIMEOUT 1 check_peers; -TEST $CLI_2 volume remove-brick $V0 $H1:$B1/${V0}{3,4} start; -TEST $CLI_2 volume info -cleanup; diff --git a/tests/bugs/bug-1049323.t b/tests/bugs/bug-1049323.t deleted file mode 100755 index 203612e91f1..00000000000 --- a/tests/bugs/bug-1049323.t +++ /dev/null @@ -1,64 +0,0 @@ -#!/bin/bash -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc - -cleanup; - -function _init() -{ -# Start glusterd -TEST glusterd; -TEST pidof glusterd; -TEST $CLI volume info; - -#Create a volume -TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2}; - -#Verify volume is created -EXPECT "$V0" volinfo_field $V0 'Volume Name'; -EXPECT 'Created' volinfo_field $V0 'Status'; - -#Start volume and verify -TEST $CLI volume start $V0; -EXPECT 'Started' volinfo_field $V0 'Status'; -TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0 - -#Enable Quota -TEST $CLI volume quota $V0 enable - -##Wait for the auxiliary mount to comeup -sleep 3; -} - -function get_aux() -{ -##Check if a auxiliary mount is there -df -h | grep "/var/run/gluster/$V0" - - -if [ $? -eq 0 ] -then - echo "0" -else - echo "1" -fi -} - -function create_data() -{ -#set some limit on the volume -TEST $CLI volume quota $V0 limit-usage / 50MB; - -#Auxiliary mount should be there before stopping the volume -EXPECT "0" get_aux; - -TEST $CLI volume stop $V0; - -#Aux mount should have been removed -EXPECT "1" get_aux; - -} - - -_init; -create_data; -cleanup; diff --git a/tests/bugs/bug-1049834.t b/tests/bugs/bug-1049834.t deleted file mode 100755 index 7e16fde61aa..00000000000 --- a/tests/bugs/bug-1049834.t +++ /dev/null @@ -1,44 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../cluster.rc -. $(dirname $0)/../volume.rc -. $(dirname $0)/../snapshot.rc - -cleanup; -TEST verify_lvm_version -TEST launch_cluster 2 -TEST setup_lvm 2 - -TEST $CLI_1 peer probe $H2 -EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count - -TEST $CLI_1 volume create $V0 $H1:$L1 $H2:$L2 -EXPECT 'Created' volinfo_field $V0 'Status' - -TEST $CLI_1 volume start $V0 -EXPECT 'Started' volinfo_field $V0 'Status' - -#Setting the snap-max-hard-limit to 4 -TEST $CLI_1 snapshot config $V0 snap-max-hard-limit 4 -PID_1=$! -wait $PID_1 - -#Creating 3 snapshots on the volume (which is the soft-limit) -TEST create_n_snapshots $V0 3 $V0_snap -TEST snapshot_n_exists $V0 3 $V0_snap - -#Creating the 4th snapshot on the volume and expecting it to be created -# but with the deletion of the oldest snapshot i.e 1st snapshot -TEST $CLI_1 snapshot create ${V0}_snap4 ${V0} -TEST snapshot_exists 1 ${V0}_snap4 -TEST ! snapshot_exists 1 ${V0}_snap1 -TEST $CLI_1 snapshot delete ${V0}_snap4 -TEST $CLI_1 snapshot create ${V0}_snap1 ${V0} -TEST snapshot_exists 1 ${V0}_snap1 - -#Deleting the 4 snaps -#TEST delete_n_snapshots $V0 4 $V0_snap -#TEST ! snapshot_n_exists $V0 4 $V0_snap - -cleanup; diff --git a/tests/bugs/bug-1051896.c b/tests/bugs/bug-1051896.c deleted file mode 100644 index 0ffd8198642..00000000000 --- a/tests/bugs/bug-1051896.c +++ /dev/null @@ -1,94 +0,0 @@ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -int do_setfacl(const char *path, const char *options, const char *textacl) -{ - int r; - int type; - acl_t acl; - int dob; - int dok; - int dom; - struct stat st; - char textmode[30]; - - r = 0; - dob = strchr(options,'b') != (char*)NULL; - dok = strchr(options,'k') != (char*)NULL; - dom = strchr(options,'m') != (char*)NULL; - if ((dom && !textacl) - || (!dom && (textacl || (!dok && !dob) || - strchr(options,'d')))) { - errno = EBADRQC; /* "bad request" */ - r = -1; - } else { - if (dob || dok) { - r = acl_delete_def_file(path); - } - if (dob && !r) { - if (!stat(path,&st)) { - sprintf(textmode, - "u::%c%c%c,g::%c%c%c,o::%c%c%c", - (st.st_mode & 0400 ? 'r' : '-'), - (st.st_mode & 0200 ? 'w' : '-'), - (st.st_mode & 0100 ? 'x' : '-'), - (st.st_mode & 0040 ? 'r' : '-'), - (st.st_mode & 0020 ? 'w' : '-'), - (st.st_mode & 0010 ? 'x' : '-'), - (st.st_mode & 004 ? 'r' : '-'), - (st.st_mode & 002 ? 'w' : '-'), - (st.st_mode & 001 ? 'x' : '-')); - acl = acl_from_text(textmode); - if (acl) { - r = acl_set_file(path, - ACL_TYPE_ACCESS,acl); - acl_free(acl); - } else - r = -1; - } else - r = -1; - } - if (!r && dom) { - if (strchr(options,'d')) - type = ACL_TYPE_DEFAULT; - else - type = ACL_TYPE_ACCESS; - acl = acl_from_text(textacl); - if (acl) { - r = acl_set_file(path,type,acl); - acl_free(acl); - } else - r = -1; - } - } - if (r) - r = -errno; - return (r); -} - - -int main(int argc, char *argv[]){ - int rc = 0; - if (argc != 4) { - fprintf(stderr, - "usage: ./setfacl_test \n"); - return 0; - } - if ((rc = do_setfacl(argv[1], argv[2], argv[3])) != 0){ - fprintf(stderr, "do_setfacl failed: %s\n", strerror(errno)); - return rc; - } - return 0; -} diff --git a/tests/bugs/bug-1051896.t b/tests/bugs/bug-1051896.t deleted file mode 100644 index e069b177982..00000000000 --- a/tests/bugs/bug-1051896.t +++ /dev/null @@ -1,34 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc - -case $OSTYPE in -NetBSD) - echo "Skip test on ACL which are not available on NetBSD" >&2 - SKIP_TESTS - exit 0 - ;; -*) - ;; -esac - -cleanup; - -## Start and create a volume -TEST glusterd; -TEST pidof glusterd; -TEST $CLI volume info; - -TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2,3,4}; -TEST $CLI volume start $V0; - -TEST glusterfs --entry-timeout=0 --attribute-timeout=0 --acl -s $H0 --volfile-id $V0 $M0; - -TEST touch $M0/file1; - -TEST $CC $(dirname $0)/bug-1051896.c -o $(dirname $0)/bug-1051896 -lacl -TEST ! $(dirname $0)/bug-1051896 $M0/file1 m 'u::r,u::w,g::r--,o::r--' -TEST rm -f $(dirname $0)/bug-1051896 - -cleanup diff --git a/tests/bugs/bug-1053579.t b/tests/bugs/bug-1053579.t deleted file mode 100755 index 11357f71eee..00000000000 --- a/tests/bugs/bug-1053579.t +++ /dev/null @@ -1,111 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc -. $(dirname $0)/../nfs.rc - -cleanup - -# prepare the users and groups -NEW_USER=bug1053579 -NEW_UID=1053579 -NEW_GID=1053579 -LAST_GID=1053779 -NEW_GIDS=${NEW_GID} - -# OS-specific overrides -case $OSTYPE in -NetBSD|Darwin) - # only NGROUPS_MAX=16 secondary groups are supported - LAST_GID=1053593 - ;; -FreeBSD) - # NGROUPS_MAX=1023 (FreeBSD>=8.0), we can afford 200 groups - ;; -Linux) - # NGROUPS_MAX=65536, we can afford 200 groups - ;; -*) - ;; -esac - -# create a user that belongs to many groups -for GID in $(seq -f '%6.0f' ${NEW_GID} ${LAST_GID}) -do - groupadd -o -g ${GID} ${NEW_USER}-${GID} - NEW_GIDS="${NEW_GIDS},${NEW_USER}-${GID}" -done -TEST useradd -o -M -u ${NEW_UID} -g ${NEW_GID} -G ${NEW_USER}-${NEW_GIDS} ${NEW_USER} - -# preparation done, start the tests - -TEST glusterd -TEST pidof glusterd -TEST $CLI volume create $V0 $H0:$B0/${V0}1 -TEST $CLI volume set $V0 nfs.server-aux-gids on -TEST $CLI volume start $V0 - -EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available - -# mount the volume -TEST mount_nfs $H0:/$V0 $N0 nolock -TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 - -# the actual test, this used to crash -su -m ${NEW_USER} -c "stat $N0/. > /dev/null" -TEST [ $? -eq 0 ] - -# create a file that only a user in a high-group can access -echo 'Hello World!' > $N0/README -chgrp ${LAST_GID} $N0/README -chmod 0640 $N0/README - -#su -m ${NEW_USER} -c "cat $N0/README 2>&1 > /dev/null" -su -m ${NEW_USER} -c "cat $N0/README" -ret=$? - -case $OSTYPE in -Linux) # Linux NFS fails with big GID - if [ $ret -ne 0 ] ; then - res="Y" - else - res="N" - fi - ;; -*) # Other systems should cope better - if [ $ret -eq 0 ] ; then - res="Y" - else - res="N" - fi - ;; -esac -TEST [ "x$res" = "xY" ] - -# This passes only on build.gluster.org, not reproducible on other machines?! -#su -m ${NEW_USER} -c "cat $M0/README 2>&1 > /dev/null" -#TEST [ $? -ne 0 ] - -# enable server.manage-gids and things should work -TEST $CLI volume set $V0 server.manage-gids on - -su -m ${NEW_USER} -c "cat $N0/README 2>&1 > /dev/null" -TEST [ $? -eq 0 ] -su -m ${NEW_USER} -c "cat $M0/README 2>&1 > /dev/null" -TEST [ $? -eq 0 ] - -# cleanup -userdel --force ${NEW_USER} -for GID in $(seq -f '%6.0f' ${NEW_GID} ${LAST_GID}) -do - groupdel ${NEW_USER}-${GID} -done - -rm -f $N0/README -EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $N0 -EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0 - -TEST $CLI volume stop $V0 -TEST $CLI volume delete $V0 - -cleanup diff --git a/tests/bugs/bug-1058663.c b/tests/bugs/bug-1058663.c deleted file mode 100644 index 8f26d0f07c8..00000000000 --- a/tests/bugs/bug-1058663.c +++ /dev/null @@ -1,119 +0,0 @@ -#include -#include -#include -#include -#include -#include -#include - -#define FILE_SIZE 1048576 - -/* number of tests to run */ -#define RUN_LOOP 1000 - -/* number of SIGBUS before exiting */ -#define MAX_SIGBUS 1 -static int expect_sigbus = 0; -static int sigbus_received = 0; - -/* test for truncate()/seek()/write()/mmap() - * There should ne no SIGBUS triggered. - */ -void seek_write(char *filename) -{ - int fd; - uint8_t* map; - int i; - - fd = open(filename, O_RDWR|O_CREAT|O_TRUNC, 0600); - lseek(fd, FILE_SIZE - 1, SEEK_SET); - write(fd, "\xff", 1); - - map = mmap(NULL, FILE_SIZE, PROT_READ, MAP_PRIVATE, fd, 0); - for (i = 0; i < (FILE_SIZE - 1); i++) { - if (map[i] != 0) /* should never be true */ - abort(); - } - munmap(map, FILE_SIZE); - - close(fd); -} - -int read_after_eof(char *filename) -{ - int ret = 0; - int fd; - char* data; - uint8_t* map; - - fd = open(filename, O_RDWR|O_CREAT|O_TRUNC, 0600); - lseek(fd, FILE_SIZE - 1, SEEK_SET); - write(fd, "\xff", 1); - - /* trigger verify that reading after EOF fails */ - ret = read(fd, data, FILE_SIZE / 2); - if (ret != 0) - return 1; - - /* map an area of 1 byte after FILE_SIZE */ - map = mmap(NULL, 1, PROT_READ, MAP_PRIVATE, fd, FILE_SIZE); - /* map[0] is an access after EOF, it should trigger SIGBUS */ - if (map[0] != 0) - /* it is expected that we exit before we get here */ - if (!sigbus_received) - return 1; - munmap(map, FILE_SIZE); - - close(fd); - - return ret; -} - -/* signal handler for SIGBUS */ -void catch_sigbus(int signum) -{ - switch (signum) { -#ifdef __NetBSD__ - /* Depending on architecture, we can get SIGSEGV */ - case SIGSEGV: /* FALLTHROUGH */ -#endif - case SIGBUS: - sigbus_received++; - if (!expect_sigbus) - exit(EXIT_FAILURE); - if (sigbus_received >= MAX_SIGBUS) - exit(EXIT_SUCCESS); - break; - default: - printf("Unexpected signal received: %d\n", signum); - } -} - -int main(int argc, char** argv) -{ - int i = 0; - - if (argc == 1) { - printf("Usage: %s \n", argv[0]); - return EXIT_FAILURE; - } - -#ifdef __NetBSD__ - /* Depending on architecture, we can get SIGSEGV */ - signal(SIGSEGV, catch_sigbus); -#endif - signal(SIGBUS, catch_sigbus); - - /* the next test should not trigger SIGBUS */ - expect_sigbus = 0; - for (i = 0; i < RUN_LOOP; i++) { - seek_write(argv[1]); - } - - /* the next test should trigger SIGBUS */ - expect_sigbus = 1; - if (read_after_eof(argv[1])) - return EXIT_FAILURE; - - return EXIT_SUCCESS; -} diff --git a/tests/bugs/bug-1058663.t b/tests/bugs/bug-1058663.t deleted file mode 100644 index c1e8211dd08..00000000000 --- a/tests/bugs/bug-1058663.t +++ /dev/null @@ -1,28 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc - -cleanup; - -## Start and create a volume -TEST glusterd; -TEST pidof glusterd; -TEST $CLI volume info; - -TEST $CLI volume create $V0 $H0:$B0/$V0; -TEST $CLI volume start $V0; - -TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id $V0 $M0 - -# compile the test program and run it -TEST $CC $(dirname $0)/bug-1058663.c -o $(dirname $0)/bug-1058663; -TEST $(dirname $0)/bug-1058663 $M0/bug-1058663.bin; -TEST rm -f $(dirname $0)/M0/bug-1058663.bin; - -EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0 - -TEST $CLI volume stop $V0; -TEST $CLI volume delete $V0; - -cleanup; diff --git a/tests/bugs/bug-1058797.t b/tests/bugs/bug-1058797.t deleted file mode 100644 index 74de859441a..00000000000 --- a/tests/bugs/bug-1058797.t +++ /dev/null @@ -1,45 +0,0 @@ -#!/bin/bash -#Test that the setuid bit is healed correctly. - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc - -cleanup; -#Basic checks -TEST glusterd - -#Create a 1x2 replica volume -TEST $CLI volume create $V0 replica 2 $H0:$B0/brick{0,1}; -TEST $CLI volume start $V0 -TEST $CLI volume set $V0 cluster.self-heal-daemon off - -# FUSE mount;create a file -TEST glusterfs -s $H0 --volfile-id $V0 $M0 -TEST touch $M0/file - -#Kill brick1 and set S_ISUID and S_ISGID bits from mount point -kill_brick $V0 $H0 $B0/brick1 -TEST chmod +x,+s $M0/file - -#Get file permissions from backend brick0 and verify that S_ISUID is indeed set -file_permissions1=`ls -l $B0/brick0/file | awk '{print $1}'| cut -d. -f1 | cut -d- -f2,3,4,5,6` -setuid_bit1=`echo $file_permissions1 | cut -b3` -EXPECT "s" echo $setuid_bit1 - -#Restart volume and do lookup from mount to trigger heal -TEST $CLI volume start $V0 force -EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1 -TEST dd if=$M0/file of=/dev/null - -#Get file permissions from healed brick1 and verify that S_ISUID is indeed set -file_permissions2=`ls -l $B0/brick1/file | awk '{print $1}' | cut -d. -f1 | cut -d- -f2,3,4,5,6` -setuid_bit2=`echo $file_permissions2 | cut -b3` -EXPECT "s" echo $setuid_bit2 - -#Also compare the entire permission string,just to be sure -EXPECT $file_permissions1 echo $file_permissions2 -EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0 -TEST $CLI volume stop $V0 -TEST $CLI volume delete $V0; - -cleanup; diff --git a/tests/bugs/bug-1063230.t b/tests/bugs/bug-1063230.t deleted file mode 100755 index a04be88e94e..00000000000 --- a/tests/bugs/bug-1063230.t +++ /dev/null @@ -1,29 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc - -cleanup; - -TEST glusterd -TEST pidof glusterd -TEST $CLI volume create $V0 $H0:$B0/brick0 $H0:$B0/brick1 -TEST $CLI volume start $V0 - -sleep 5 - -TEST glusterfs -s $H0 --volfile-id $V0 $M0 - -var=`gluster volume rebalance $V0 start force` - -EXPECT "0" echo $? - -var1="volume rebalance: $V0: success: Rebalance on $V0 has \ -been started successfully. Use rebalance status command to \ -check status of the rebalance process." - -echo $var | grep "$var1" - -EXPECT "0" echo $? - -cleanup diff --git a/tests/bugs/bug-1064768.t b/tests/bugs/bug-1064768.t deleted file mode 100644 index b0d04eb25ec..00000000000 --- a/tests/bugs/bug-1064768.t +++ /dev/null @@ -1,20 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc - -cleanup; - -TEST glusterd -TEST pidof glusterd -TEST $CLI volume create $V0 replica 2 $H0:$B0/brick0 $H0:$B0/brick1 -TEST $CLI volume start $V0 -EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'Started' volinfo_field $V0 'Status'; - -TEST $CLI volume profile $V0 start -TEST $CLI volume profile $V0 info -TEST $CLI volume profile $V0 stop - -TEST $CLI volume status -TEST $CLI volume stop $V0 -EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'Stopped' volinfo_field $V0 'Status'; -cleanup; diff --git a/tests/bugs/bug-1066798.t b/tests/bugs/bug-1066798.t deleted file mode 100755 index 445ec75c936..00000000000 --- a/tests/bugs/bug-1066798.t +++ /dev/null @@ -1,86 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc - -cleanup; - -TESTS_EXPECTED_IN_LOOP=200 - -## Start glusterd -TEST glusterd; -TEST pidof glusterd; -TEST $CLI volume info; - -## Lets create volume -TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2}; - -## Verify volume is created -EXPECT "$V0" volinfo_field $V0 'Volume Name'; -EXPECT 'Created' volinfo_field $V0 'Status'; - -## Start volume and verify -TEST $CLI volume start $V0; -EXPECT 'Started' volinfo_field $V0 'Status'; -TEST glusterfs -s $H0 --volfile-id=$V0 $M0 - -############################################################ -#TEST_PLAN# -#Create a file -#Store the hashed brick information -#Create hard links to it -#Remove the hashed brick -#Check now all the hardlinks are migrated in to "OTHERBRICK" -#Check also in mount point for all the files -#check there is no failures and skips for migration -############################################################ - -TEST touch $M0/file1; - -file_perm=`ls -l $M0/file1 | grep file1 | awk '{print $1}'`; - -if [ -f $B0/${V0}1/file1 ] -then - HASHED=$B0/${V0}1 - OTHER=$B0/${V0}2 -else - HASHED=$B0/${V0}2 - OTHER=$B0/${V0}1 -fi - -#create hundred hard links -for i in {1..50}; -do -TEST_IN_LOOP ln $M0/file1 $M0/link$i; -done - - -TEST $CLI volume remove-brick $V0 $H0:${HASHED} start -EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" remove_brick_status_completed_field "$V0" "$H0:${HASHED}"; - -#check consistency in mount point -#And also check all the links are migrated to OTHER -for i in {1..50} -do -TEST_IN_LOOP [ -f ${OTHER}/link${i} ]; -TEST_IN_LOOP [ -f ${M0}/link${i} ]; -done; - -#check in OTHER that all the files has proper permission (Means no -#linkto files) - -for i in {1..50} -do -link_perm=`ls -l $OTHER | grep -w link${i} | awk '{print $1}'`; -TEST_IN_LOOP [ "${file_perm}" == "${link_perm}" ] - -done - -#check that remove-brick status should not have any failed or skipped files - -var=`$CLI volume remove-brick $V0 $H0:${HASHED} status | grep completed` - -TEST [ `echo $var | awk '{print $5}'` = "0" ] -TEST [ `echo $var | awk '{print $6}'` = "0" ] - -cleanup diff --git a/tests/bugs/bug-1070734.t b/tests/bugs/bug-1070734.t deleted file mode 100755 index 3928983c345..00000000000 --- a/tests/bugs/bug-1070734.t +++ /dev/null @@ -1,74 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc -. $(dirname $0)/../nfs.rc - -cleanup; - -## Start glusterd -TEST glusterd; -TEST pidof glusterd; -TEST $CLI volume info; - -## Lets create volume -TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2}; - -## Verify volume is created -EXPECT "$V0" volinfo_field $V0 'Volume Name'; -EXPECT 'Created' volinfo_field $V0 'Status'; - -## Start volume and verify -TEST $CLI volume start $V0; -EXPECT 'Started' volinfo_field $V0 'Status'; - -EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available; -TEST mount_nfs $H0:/$V0 $N0; - -############################################################################ -#TEST-PLAN: -#Create a directory DIR and a file inside DIR -#check the hash brick of the file -#delete the directory for recreating later after remove-brick -#remove the brick where the files hashed to -#After remove-brick status says complete go on creating the same directory \ -#DIR and file -#Check if the file now falls into the other brick -#Check if the other brick gets the full layout and the remove brick gets \ -#the zeroed layout -############################################################################ - -TEST mkdir $N0/DIR; - -TEST touch $N0/DIR/file; - -if [ -f $B0/${V0}1/DIR/file ] -then - HASHED=$B0/${V0}1; - OTHERBRICK=$B0/${V0}2; -else - HASHED=$B0/${V0}2; - OTHERBRICK=$B0/${V0}1; -fi - -TEST rm -f $N0/DIR/file; -TEST rmdir $N0/DIR; -TEST $CLI volume remove-brick $V0 $H0:${HASHED} start; -EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" remove_brick_status_completed_field "$V0" \ -"$H0:${HASHED}"; - -TEST mkdir $N0/DIR; -TEST touch $N0/DIR/file; - -#Check now the file should fall in to OTHERBRICK -TEST [ -f ${OTHERBRICK}/DIR/file ] - -#Check the DIR on HASHED should have got zeroed layout and the \ -#OTHERBRICK should have got full layout -EXPECT "0x00000001000000000000000000000000" dht_get_layout $HASHED/DIR ; -EXPECT "0x000000010000000000000000ffffffff" dht_get_layout $OTHERBRICK/DIR; - -## Before killing daemon to avoid deadlocks -EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" umount_nfs $N0 - -cleanup diff --git a/tests/bugs/bug-1075087.t b/tests/bugs/bug-1075087.t deleted file mode 100644 index 89aab028de6..00000000000 --- a/tests/bugs/bug-1075087.t +++ /dev/null @@ -1,33 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc - -cleanup; - -TEST glusterd -TEST pidof glusterd - -TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}0 $H0:$B0/${V0}1 \ - $H0:$B0/${V0}2 $H0:$B0/${V0}3 -TEST $CLI volume start $V0 - -## Mount FUSE -TEST glusterfs -s $H0 --volfile-id=$V0 $M0; - -TEST mkdir $M0/dir{1..10}; -TEST touch $M0/dir{1..10}/files{1..10}; - -TEST $CLI volume add-brick $V0 $H0:$B0/${V0}4 $H0:/$B0/${V0}5 - -TEST $CLI volume rebalance $V0 start force -EXPECT_WITHIN 60 "completed" rebalance_status_field $V0 - -TEST pkill gluster -TEST glusterd -TEST pidof glusterd - -# status should be "completed" immediate after glusterd has respawned. -EXPECT_WITHIN 5 "completed" rebalance_status_field $V0 - -cleanup; diff --git a/tests/bugs/bug-1077682.t b/tests/bugs/bug-1077682.t deleted file mode 100644 index b47744e603c..00000000000 --- a/tests/bugs/bug-1077682.t +++ /dev/null @@ -1,34 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc - -function get-task-status() -{ - $CLI $COMMAND | grep -o $PATTERN - if [ ${PIPESTATUS[0]} -ne 0 ]; - then - return 1 - fi - return 0 -} - -cleanup; - -TEST glusterd -TEST pidof glusterd - -TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2,3,4} -TEST $CLI volume start $V0 -TEST ! $CLI volume remove-brick $V0 $H0:$B0/${V0}1 -TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}2 force -TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}3 start - -EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" remove_brick_status_completed_field "$V0" \ -"$H0:$B0/${V0}3" - -TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}3 commit -TEST killall glusterd -TEST glusterd - -cleanup diff --git a/tests/bugs/bug-1085330.t b/tests/bugs/bug-1085330.t deleted file mode 100755 index dafba215540..00000000000 --- a/tests/bugs/bug-1085330.t +++ /dev/null @@ -1,80 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc - -STR="1234567890" -volname="Vol" - -cleanup; -TEST glusterd -TEST pidof glusterd -TEST $CLI volume info; - - -# Construct volname string such that its more than 256 characters -for i in {1..30} -do - volname+=$STR -done -# Now $volname is more than 256 chars - -TEST ! $CLI volume create $volname $H0:$B0/${volname}{1,2}; - -TEST $CLI volume info; - -# Construct brick string such that its more than 256 characters -volname="Vol1234" -brick="brick" -for i in {1..30} -do - brick+=$STR -done -# Now $brick1 is more than 256 chars - -TEST ! $CLI volume create $volname $H0:$B0/$brick; - -TEST $CLI volume info; - -# Now try to create a volume with couple of bricks (strlen(volname) = 128 & -# strlen(brick1) = 128 -# Command should still fail as strlen(volp path) > 256 - -volname="Volume-0" -brick="brick-00" -STR="12345678" - -for i in {1..15} -do - volname+=$STR - brick+=$STR -done -TEST ! $CLI volume create $volname $H0:$B0/$brick; - -TEST $CLI volume info; - -# test case with brick path as 255 and a trailing "/" -brick="" -STR1="12345678" -volname="vol" - -for i in {1..31} -do - brick+=$STR1 -done -brick+="123456/" - -echo $brick | wc -c -# Now $brick is exactly 255 chars, but at end a trailing space -# This will still fail as volfpath exceeds more than _POSIX_MAX chars - -TEST ! $CLI volume create $volname $H0:$B0/$brick; - -TEST $CLI volume info; - -# Positive test case -TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2}; - -TEST $CLI volume info; - -cleanup; diff --git a/tests/bugs/bug-1086228.t b/tests/bugs/bug-1086228.t deleted file mode 100755 index 4c77a851b88..00000000000 --- a/tests/bugs/bug-1086228.t +++ /dev/null @@ -1,34 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc -. $(dirname $0)/../fileio.rc -. $(dirname $0)/../dht.rc -cleanup; - -## Start and create a volume -TEST glusterd; -TEST pidof glusterd; -TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2} -TEST $CLI volume start $V0; -TEST glusterfs --direct-io-mode=yes --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id $V0 $M0; - -echo "D" > $M0/file1; -TEST chmod +st $M0/file1; - -TEST $CLI volume add-brick $V0 $H0:$B0/${V0}"3" -TEST $CLI volume rebalance $V0 start force - -EXPECT_WITHIN "10" "0" rebalance_completed -count=0 -for i in `ls $B0/$V0"3"`; - do - var=`stat -c %A $B0/$V0"3"/$i | cut -c 4`; - echo $B0/$V0"3"/$i $var - if [ "$var" != "S" ]; then - count=$((count + 1)) - fi - done - -TEST [[ $count == 0 ]] -cleanup diff --git a/tests/bugs/bug-1087198.t b/tests/bugs/bug-1087198.t deleted file mode 100644 index 92d2acc0859..00000000000 --- a/tests/bugs/bug-1087198.t +++ /dev/null @@ -1,77 +0,0 @@ -#!/bin/bash - -## The script tests the logging of the quota in the bricks after reaching soft -## limit of the configured limit. -## -## Steps: -## 1. Create and mount the volume -## 2. Enable quota and set the limit on 2 directories -## 3. Write some data to cross the limit -## 4. Grep the string expected in brick logs -## 5. Wait for 10 seconds (alert timeout is set to 10s) -## 6. Repeat 3 and 4. -## 7. Cleanup - -. $(dirname $0)/../include.rc -. $(dirname $0)/../fileio.rc -. $(dirname $0)/../volume.rc -. $(dirname $0)/../nfs.rc - -cleanup; - -#1 -## Step 1 -TEST glusterd -TEST pidof glusterd -TEST $CLI volume info; - -TEST $CLI volume create $V0 $H0:$B0/brick{1..4}; -EXPECT 'Created' volinfo_field $V0 'Status'; - -TEST $CLI volume start $V0; -EXPECT 'Started' volinfo_field $V0 'Status'; - -EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available; -TEST mount_nfs $H0:/$V0 $N0 noac,nolock - - -QUOTA_LIMIT_DIR="quota_limit_dir" -BRICK_LOG_DIR="`gluster --print-logdir`/bricks" - -#9 -TEST mkdir $N0/$QUOTA_LIMIT_DIR - -#10 -## Step 2 -TEST $CLI volume quota $V0 enable -TEST $CLI volume quota $V0 alert-time 10 -TEST $CLI volume quota $V0 hard-timeout 0 -TEST $CLI volume quota $V0 soft-timeout 0 -TEST $CLI volume quota $V0 limit-usage / 200KB -TEST $CLI volume quota $V0 limit-usage /$QUOTA_LIMIT_DIR 100KB - -#16 -## Step 3 and 4 -TEST dd if=/dev/urandom of=$N0/$QUOTA_LIMIT_DIR/95KB_file bs=1k count=95 -TEST grep -e "\"Usage crossed soft limit:.*used by /$QUOTA_LIMIT_DIR\"" -- $BRICK_LOG_DIR/* - -TEST dd if=/dev/urandom of=$N0/100KB_file bs=1k count=100 -TEST grep -e "\"Usage crossed soft limit:.*used by /\"" -- $BRICK_LOG_DIR/* - -#20 -## Step 5 -TEST sleep 10 - -## Step 6 -TEST dd if=/dev/urandom of=$N0/$QUOTA_LIMIT_DIR/1KB_file bs=1k count=1 -TEST grep -e "\"Usage is above soft limit:.*used by /$QUOTA_LIMIT_DIR\"" -- $BRICK_LOG_DIR/* - -#23 -TEST dd if=/dev/urandom of=$N0/1KB_file bs=1k count=1 -TEST grep -e "\"Usage is above soft limit:.*used by /\"" -- $BRICK_LOG_DIR/* - -#25 -## Step 7 -EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $N0 - -cleanup; diff --git a/tests/bugs/bug-1087203.t b/tests/bugs/bug-1087203.t deleted file mode 100644 index e41d601fb66..00000000000 --- a/tests/bugs/bug-1087203.t +++ /dev/null @@ -1,103 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc -. $(dirname $0)/../snapshot.rc -. $(dirname $0)/../cluster.rc - -function get_volume_info () -{ - local var=$1 - $CLI_1 volume info $V0 | grep "^$var" | sed 's/.*: //' -} - -cleanup; - -TEST verify_lvm_version -TEST launch_cluster 2 -TEST setup_lvm 2 - -TEST $CLI_1 peer probe $H2; -EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count; - -TEST $CLI_1 volume create $V0 $H1:$L1 $H2:$L2 -EXPECT "$V0" get_volume_info 'Volume Name'; -EXPECT 'Created' get_volume_info 'Status'; - -TEST $CLI_1 volume start $V0 -EXPECT 'Started' get_volume_info 'Status'; - - -# Setting system limit -TEST $CLI_1 snapshot config snap-max-hard-limit 100 - -# Volume limit cannot exceed system limit, as limit is set to 100, -# this should fail. -TEST ! $CLI_1 snapshot config $V0 snap-max-hard-limit 101 - -# Following are the invalid cases -TEST ! $CLI_1 snapshot config $V0 snap-max-hard-limit a10 -TEST ! $CLI_1 snapshot config snap-max-hard-limit 10a -TEST ! $CLI_1 snapshot config snap-max-hard-limit 10% -TEST ! $CLI_1 snapshot config snap-max-soft-limit 50%1 -TEST ! $CLI_1 snapshot config snap-max-soft-limit 0111 -TEST ! $CLI_1 snapshot config snap-max-hard-limit OXA -TEST ! $CLI_1 snapshot config snap-max-hard-limit 11.11 -TEST ! $CLI_1 snapshot config snap-max-soft-limit 50% -TEST ! $CLI_1 snapshot config snap-max-hard-limit -100 -TEST ! $CLI_1 snapshot config snap-max-soft-limit -90 - -# Soft limit cannot be assigned to volume -TEST ! $CLI_1 snapshot config $V0 snap-max-soft-limit 10 - -# Valid case -TEST $CLI_1 snapshot config snap-max-soft-limit 50 -TEST $CLI_1 snapshot config $V0 snap-max-hard-limit 10 - -# Validating auto-delete feature -# Make sure auto-delete is disabled by default -EXPECT 'disable' snap_config CLI_1 'auto-delete' - -# Test for invalid value for auto-delete -TEST ! $CLI_1 snapshot config auto-delete test - -TEST $CLI_1 snapshot config snap-max-hard-limit 6 -TEST $CLI_1 snapshot config snap-max-soft-limit 50 - -# Create 4 snapshots -snap_index=1 -snap_count=4 -TEST snap_create CLI_1 $V0 $snap_index $snap_count - -# If auto-delete is disabled then oldest snapshot -# should not be deleted automatically. -EXPECT '4' get_snap_count CLI_1; - -TEST snap_delete CLI_1 $snap_index $snap_count; - -# After all those 4 snaps are deleted, There will not be any snaps present -EXPECT '0' get_snap_count CLI_1; - -TEST $CLI_1 snapshot config auto-delete enable - -# auto-delete is already enabled, Hence expect a failure. -TEST ! $CLI_1 snapshot config auto-delete on - -# Testing other boolean values with auto-delete -TEST $CLI_1 snapshot config auto-delete off -EXPECT 'off' snap_config CLI_1 'auto-delete' - -TEST $CLI_1 snapshot config auto-delete true -EXPECT 'true' snap_config CLI_1 'auto-delete' - -# Try to create 4 snaps again, As auto-delete is enabled -# oldest snap should be deleted and snapcount should be 3 - -TEST snap_create CLI_1 $V0 $snap_index $snap_count; -EXPECT '3' get_snap_count CLI_1; - -TEST $CLI_1 snapshot config auto-delete disable -EXPECT 'disable' snap_config CLI_1 'auto-delete' - -cleanup; - diff --git a/tests/bugs/bug-1087487.t b/tests/bugs/bug-1087487.t deleted file mode 100755 index d0cb0a32b4a..00000000000 --- a/tests/bugs/bug-1087487.t +++ /dev/null @@ -1,23 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc - -cleanup; - -function rebalance_start { - $CLI volume rebalance $1 start | head -1; -} - -TEST glusterd -TEST pidof glusterd - -TEST $CLI volume create $V0 $H0:$B0/${V0}0 $H0:$B0/${V0}1 - -TEST $CLI volume start $V0 - -EXPECT "volume rebalance: $V0: success: Rebalance on $V0 has \ -been started successfully. Use rebalance status command to \ -check status of the rebalance process." rebalance_start $V0 - -cleanup; diff --git a/tests/bugs/bug-1088231.t b/tests/bugs/bug-1088231.t deleted file mode 100755 index da6adaeb173..00000000000 --- a/tests/bugs/bug-1088231.t +++ /dev/null @@ -1,161 +0,0 @@ -#!/bin/bash -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc -. $(dirname $0)/../fileio.rc -. $(dirname $0)/../dht.rc - - -cleanup; - -TEST glusterd -TEST pidof glusterd -TEST $CLI volume create $V0 $H0:$B0/${V0}{0,1} -TEST $CLI volume set $V0 cluster.randomize-hash-range-by-gfid on -TEST $CLI volume start $V0 -TEST glusterfs --volfile-id=/$V0 --aux-gfid-mount --volfile-server=$H0 $M0 -TEST mkdir $M0/a - - -## Bug Description: In case of dht_discover code path, which is triggered -## when lookup done is nameless lookup, at the end of the lookup, even if -## it finds that self-heal is needed to fix-the layout it wont heal because -## healing code path is not added under nameless lookup. - -## What to test: With Patch, Even in case of nameless lookup, if layout -## needs to be fixed, the it will be fixed wherever lookup is successfull -## and it will not create any directory for subvols having ENOENT as it is -## nameless lookup. - -gfid_with_hyphen=`getfattr -n glusterfs.gfid.string $M0/a 2>/dev/null \ - | grep glusterfs.gfid.string | cut -d '"' -f 2` - -TEST setfattr -x trusted.glusterfs.dht $B0/$V0"0"/a - -TEST stat $M0/.gfid/$gfid_with_hyphen - -## Assuming that we have two bricks, we can have two permutations of layout -## Case 1: Brick - A Brick - B -## 0 - 50 51-100 -## -## Case 2: Brick - A Brick - B -## 51 - 100 0 - 50 -## -## To ensure layout is assigned properly, the following tests should be -## performed. -## -## Case 1: Layout_b0_s = 0; Layout_b0_e = 50, Layout_b1_s=51, -## Layout_b1_e = 100; -## -## layout_b1_s = layout_b0_e + 1; -## layout_b0_s = layout_b1_e + 1; but b0_s is 0, so change to 101 -## then compare -## Case 2: Layout_b0_s = 51, Layout_b0_e = 100, Layout_b1_s=0, -## Layout_b1_e = 51 -## -## layout_b0_s = Layout_b1_e + 1; -## layout_b1_s = Layout_b0_e + 1; but b1_s is 0, so chage to 101. - - -##Extract Layout -layout_b0_s=`get_layout $B0/$V0"0"/a | cut -c19-26` -layout_b0_e=`get_layout $B0/$V0"0"/a | cut -c27-34` -layout_b1_s=`get_layout $B0/$V0"1"/a | cut -c19-26` -layout_b1_e=`get_layout $B0/$V0"1"/a | cut -c27-34` - - -##Add 0X to perform Hex arithematic -layout_b0_s="0x"$layout_b0_s -layout_b0_e="0x"$layout_b0_e -layout_b1_s="0x"$layout_b1_s -layout_b1_e="0x"$layout_b1_e - - - -## Logic of converting starting layout "0" to "Max_value of layout + 1" -comp1=$(($layout_b0_s + 0)) -if [ "$comp1" == "0" ];then - comp1=4294967296 -fi - -comp2=$(($layout_b1_s + 0)) -if [ "$comp2" == "0" ];then - comp2=4294967296 -fi - -diff1=$(($layout_b0_e + 1)) -diff2=$(($layout_b1_e + 1)) - - -healed=0 - -if [ "$comp1" == "$diff1" ] && [ "$comp2" == "$diff2" ]; then - healed=$(($healed + 1)) -fi - -if [ "$comp1" == "$diff2" ] && [ "$comp2" == "$diff1" ]; then - healed=$(($healed + 1)) -fi - -TEST [ $healed == 1 ] - -cleanup - -TEST glusterd -TEST pidof glusterd -TEST $CLI volume create $V0 $H0:$B0/${V0}{0,1} -TEST $CLI volume set $V0 cluster.randomize-hash-range-by-gfid on -TEST $CLI volume start $V0 -TEST glusterfs --volfile-id=/$V0 --aux-gfid-mount --volfile-server=$H0 $M0 -TEST mkdir $M0/a - -gfid_with_hyphen=`getfattr -n glusterfs.gfid.string $M0/a 2>/dev/null \ - | grep glusterfs.gfid.string | cut -d '"' -f 2` - -TEST setfattr -x trusted.glusterfs.dht $B0/$V0"0"/a -TEST setfattr -x trusted.glusterfs.dht $B0/$V0"1"/a - -TEST stat $M0/.gfid/$gfid_with_hyphen - -##Extract Layout -layout_b0_s=`get_layout $B0/$V0"0"/a | cut -c19-26` -layout_b0_e=`get_layout $B0/$V0"0"/a | cut -c27-34` -layout_b1_s=`get_layout $B0/$V0"1"/a | cut -c19-26` -layout_b1_e=`get_layout $B0/$V0"1"/a | cut -c27-34` - - -##Add 0X to perform Hex arithematic -layout_b0_s="0x"$layout_b0_s -layout_b0_e="0x"$layout_b0_e -layout_b1_s="0x"$layout_b1_s -layout_b1_e="0x"$layout_b1_e - - - -## Logic of converting starting layout "0" to "Max_value of layout + 1" -comp1=$(($layout_b0_s + 0)) -if [ "$comp1" == "0" ];then - comp1=4294967296 -fi - -comp2=$(($layout_b1_s + 0)) -if [ "$comp2" == "0" ];then - comp2=4294967296 -fi - -diff1=$(($layout_b0_e + 1)) -diff2=$(($layout_b1_e + 1)) - - -healed=0 - -if [ "$comp1" == "$diff1" ] && [ "$comp2" == "$diff2" ]; then - healed=$(($healed + 1)) -fi - -if [ "$comp1" == "$diff2" ] && [ "$comp2" == "$diff1" ]; then - healed=$(($healed + 1)) -fi - -TEST [ $healed == 1 ] -cleanup - diff --git a/tests/bugs/bug-1089668.t b/tests/bugs/bug-1089668.t deleted file mode 100755 index 3baa3f27fbe..00000000000 --- a/tests/bugs/bug-1089668.t +++ /dev/null @@ -1,27 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc -. $(dirname $0)/../dht.rc - -cleanup - -#This script checks command "gluster volume rebalance status will not -#show any output when user have done only remove-brick start and command -#'gluster volume remove-brick status' will not show -#any output when user have triggered only rebalance start. - -TEST glusterd -TEST pidof glusterd -TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2} -TEST $CLI volume start $V0 - -TEST $CLI volume rebalance $V0 start -TEST ! $CLI volume remove-brick $V0 $H0:$B0/${V0}1 status - -TEST $CLI volume rebalance $V0 stop - -TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}1 start -TEST ! $CLI volume rebalance $V0 status - -cleanup diff --git a/tests/bugs/bug-1090042.t b/tests/bugs/bug-1090042.t deleted file mode 100755 index 364d8b2d66b..00000000000 --- a/tests/bugs/bug-1090042.t +++ /dev/null @@ -1,30 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc -. $(dirname $0)/../snapshot.rc - -cleanup; - -TEST init_n_bricks 3; -TEST setup_lvm 3; -TEST glusterd; - -TEST $CLI volume create $V0 replica 3 $H0:$L1 $H0:$L2 $H0:$L3; -TEST $CLI volume start $V0; - -TEST kill_brick $V0 $H0 $L1; - -#Normal snap create should fail -TEST ! $CLI snapshot create ${V0}_snap1 $V0; -TEST ! snapshot_exists 0 ${V0}_snap1; - -#Force snap create should succeed -TEST $CLI snapshot create ${V0}_snap1 $V0 force; -TEST snapshot_exists 0 ${V0}_snap1; - -#Delete the created snap -TEST $CLI snapshot delete ${V0}_snap1; -TEST ! snapshot_exists 0 ${V0}_snap1; - -cleanup; diff --git a/tests/bugs/bug-1091935-brick-order-check-from-cli-to-glusterd.t b/tests/bugs/bug-1091935-brick-order-check-from-cli-to-glusterd.t deleted file mode 100755 index d5edabda806..00000000000 --- a/tests/bugs/bug-1091935-brick-order-check-from-cli-to-glusterd.t +++ /dev/null @@ -1,27 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc - -cleanup; - -TEST glusterd -TEST pidof glusterd - -cli1=$(echo $CLI | sed 's/ --wignore//') - -# Creating volume with non resolvable host name -TEST ! $cli1 volume create $V0 replica 2 $H0:$B0/${V0}0 redhat:$B0/${V0}1 \ - $H0:$B0/${V0}2 redhat:$B0/${V0}3 - -# Creating distribute-replica volume with bad brick order. It will fail -# due to bad brick order. -TEST ! $cli1 volume create $V0 replica 2 $H0:$B0/${V0}0 $H0:$B0/${V0}1 \ - $H0:$B0/${V0}2 $H0:$B0/${V0}3 - -# Now with force at the end of command it will bypass brick-order check -# for replicate or distribute-replicate volume. and it will create volume -TEST $cli1 volume create $V0 replica 2 $H0:$B0/${V0}0 $H0:$B0/${V0}1 \ - $H0:$B0/${V0}2 $H0:$B0/${V0}3 force - -cleanup; diff --git a/tests/bugs/bug-1092841.t b/tests/bugs/bug-1092841.t deleted file mode 100644 index 6740c318d9c..00000000000 --- a/tests/bugs/bug-1092841.t +++ /dev/null @@ -1,24 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc - -cleanup; - -TEST glusterd; -TEST pidof glusterd; -TEST $CLI volume info; - -TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2}; - -TEST $CLI volume start $V0; - -TEST $CLI volume barrier $V0 enable; - -TEST ! $CLI volume barrier $V0 enable; - -TEST $CLI volume barrier $V0 disable; - -TEST ! $CLI volume barrier $V0 disable; - -cleanup diff --git a/tests/bugs/bug-1095097.t b/tests/bugs/bug-1095097.t deleted file mode 100755 index 9f9db7a3df5..00000000000 --- a/tests/bugs/bug-1095097.t +++ /dev/null @@ -1,21 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc - -cleanup; -TEST glusterd; -TEST pidof glusterd; -TEST $CLI volume info; - -TEST $CLI volume create $V0 $H0:$B0/brick1 $H0:$B1/brick1; -EXPECT 'Created' volinfo_field $V0 'Status'; - -TEST $CLI volume start $V0; -EXPECT 'Started' volinfo_field $V0 'Status'; - -TEST $CLI volume profile $V0 start -TEST $CLI volume profile $V0 info -TEST $CLI volume replace-brick $V0 $H0:$B0/brick1 $H0:$B0/brick2 start -TEST $CLI volume replace-brick $V0 $H0:$B0/brick1 $H0:$B0/brick2 status - -cleanup; diff --git a/tests/bugs/bug-1099890.t b/tests/bugs/bug-1099890.t deleted file mode 100644 index c4be2cf56ba..00000000000 --- a/tests/bugs/bug-1099890.t +++ /dev/null @@ -1,125 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc -. $(dirname $0)/../dht.rc - -## TO-DO: Fix the following once the dht du refresh interval issue is fixed: -## 1. Do away with sleep(1). -## 2. Do away with creation of empty files. - -cleanup; - -TEST glusterd; -TEST pidof glusterd; - -# Create 2 loop devices, one per brick. -TEST truncate -s 100M $B0/brick1 -TEST truncate -s 100M $B0/brick2 - -TEST L1=`SETUP_LOOP $B0/brick1` -TEST MKFS_LOOP $L1 - -TEST L2=`SETUP_LOOP $B0/brick2` -TEST MKFS_LOOP $L2 - -TEST mkdir -p $B0/${V0}{1,2} - -TEST MOUNT_LOOP $L1 $B0/${V0}1 -TEST MOUNT_LOOP $L2 $B0/${V0}2 - -# Create a plain distribute volume with 2 subvols. -TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2}; - -TEST $CLI volume start $V0; -EXPECT "Started" volinfo_field $V0 'Status'; - -TEST $CLI volume quota $V0 enable; - -TEST $CLI volume set $V0 features.quota-deem-statfs on - -TEST $CLI volume quota $V0 limit-usage / 150MB; - -TEST $CLI volume set $V0 cluster.min-free-disk 50% - -TEST glusterfs -s $H0 --volfile-id=$V0 $M0 - -# Make sure quota-deem-statfs is working as expected -EXPECT "150M" echo `df -h $M0 -P | tail -1 | awk {'print $2'}` - -# Create a new file 'foo' under the root of the volume, which hashes to subvol-0 -# of DHT, that consumes 40M -TEST dd if=/dev/zero of=$M0/foo bs=5120k count=8 - -TEST stat $B0/${V0}1/foo -TEST ! stat $B0/${V0}2/foo - -# Create a new file 'bar' under the root of the volume, which hashes to subvol-1 -# of DHT, that consumes 40M -TEST dd if=/dev/zero of=$M0/bar bs=5120k count=8 - -TEST ! stat $B0/${V0}1/bar -TEST stat $B0/${V0}2/bar - -# Touch a zero-byte file on the root of the volume to make sure the statfs data -# on DHT is refreshed -sleep 1; -TEST touch $M0/empty1; - -# At this point, the available space on each subvol {60M,60M} is greater than -# their min-free-disk {50M,50M}, but if this bug still exists, then -# the total available space on the volume as perceived by DHT should be less -# than min-free-disk, i.e., -# -# consumed space returned per subvol by quota = (40M + 40M) = 80M -# -# Therefore, consumed space per subvol computed by DHT WITHOUT the fix would be: -# (80M/150M)*100 = 53% -# -# Available space per subvol as perceived by DHT with the bug = 47% -# which is less than min-free-disk - -# Now I create a file that hashes to subvol-1 (counting from 0) of DHT. -# If this bug still exists,then DHT should be routing this creation to subvol-0. -# If this bug is fixed, then DHT should be routing the creation to subvol-1 only -# as it has more than min-free-disk space available. - -TEST dd if=/dev/zero of=$M0/file bs=1k count=1 -sleep 1; -TEST ! stat $B0/${V0}1/file -TEST stat $B0/${V0}2/file - -# Touch another zero-byte file on the root of the volume to refresh statfs -# values stored by DHT. - -TEST touch $M0/empty2; - -# Now I create a new file that hashes to subvol-0, at the end of which, there -# will be less than min-free-disk space available on it. -TEST dd if=/dev/zero of=$M0/fil bs=5120k count=4 -sleep 1; -TEST stat $B0/${V0}1/fil -TEST ! stat $B0/${V0}2/fil - -# Touch to refresh statfs info cached by DHT - -TEST touch $M0/empty3; - -# Now I create a file that hashes to subvol-0 but since it has less than -# min-free-disk space available, its data will be cached on subvol-1. - -TEST dd if=/dev/zero of=$M0/zz bs=5120k count=1 - -TEST stat $B0/${V0}1/zz -TEST stat $B0/${V0}2/zz - -EXPECT "$V0-client-1" dht_get_linkto_target "$B0/${V0}1/zz" - -EXPECT "1" is_dht_linkfile "$B0/${V0}1/zz" - -force_umount $M0 -$CLI volume stop $V0 -UMOUNT_LOOP ${B0}/${V0}{1,2} -rm -f ${B0}/brick{1,2} - -cleanup diff --git a/tests/bugs/bug-1100050.t b/tests/bugs/bug-1100050.t deleted file mode 100644 index 537591dcd82..00000000000 --- a/tests/bugs/bug-1100050.t +++ /dev/null @@ -1,25 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc - -cleanup; - -TEST glusterd; -TEST pidof glusterd; - -TEST gluster volume create $V0 stripe 2 $H0:$B0/{1,2} force; -TEST gluster volume start $V0; -TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 $M0; - -TEST gluster volume quota $V0 enable; - -EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" quotad_up_status; - -TEST mkdir $M0/dir; - -TEST gluster volume quota $V0 limit-usage /dir 10MB; - -TEST mkdir $M0/dir/subdir; - -cleanup; diff --git a/tests/bugs/bug-1101647.t b/tests/bugs/bug-1101647.t deleted file mode 100644 index ccfa7e2138b..00000000000 --- a/tests/bugs/bug-1101647.t +++ /dev/null @@ -1,29 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc -. $(dirname $0)/../afr.rc - -cleanup; - -TEST glusterd -TEST pidof glusterd -TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2}; -TEST $CLI volume start $V0; -TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0 -EXPECT_WITHIN 20 "Y" glustershd_up_status - -#Create base entry in indices/xattrop and indices/base_indices_holder -echo "Data">$M0/file - -TEST $CLI volume heal $V0 -#Entries from indices/xattrop and indices/base_indices_holder should not be cleared after a heal. -EXPECT 1 count_sh_entries $B0/$V0"1" -EXPECT 1 count_sh_entries $B0/$V0"2" - -TEST kill_brick $V0 $H0 $B0/${V0}2 -echo "More data">>$M0/file - -EXPECT 1 echo `$CLI volume heal $V0 statistics heal-count|grep "Number of entries:"|head -n1|awk '{print $4}'` - -cleanup; diff --git a/tests/bugs/bug-1102656.t b/tests/bugs/bug-1102656.t deleted file mode 100644 index 18f2b93eb49..00000000000 --- a/tests/bugs/bug-1102656.t +++ /dev/null @@ -1,20 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc - -cleanup; - -TEST glusterd -TEST pidof glusterd -TEST $CLI volume create $V0 replica 2 $H0:$B0/brick0 $H0:$B0/brick1 -TEST $CLI volume start $V0 -EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'Started' volinfo_field $V0 'Status'; - -TEST $CLI volume top $V0 open -TEST ! $CLI volume top $V0 open brick $H0:/tmp/brick -TEST $CLI volume top $V0 read - -TEST $CLI volume status -TEST $CLI volume stop $V0 -EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'Stopped' volinfo_field $V0 'Status'; -cleanup; diff --git a/tests/bugs/bug-1104642.t b/tests/bugs/bug-1104642.t deleted file mode 100644 index da56eb5db75..00000000000 --- a/tests/bugs/bug-1104642.t +++ /dev/null @@ -1,47 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc -. $(dirname $0)/../cluster.rc - - -function get_value() -{ - local key=$1 - local var="CLI_$2" - - eval cli_index=\$$var - - $cli_index volume info | grep "^$key"\ - | sed 's/.*: //' -} - -cleanup - -TEST launch_cluster 2 - -TEST $CLI_1 peer probe $H2; -EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count - -TEST $CLI_1 volume create $V0 $H1:$B1/${V0}0 $H2:$B2/${V0}1 -EXPECT "$V0" get_value 'Volume Name' 1 -EXPECT "Created" get_value 'Status' 1 - -TEST $CLI_1 volume start $V0 -EXPECT "Started" get_value 'Status' 1 - -#Bring down 2nd glusterd -TEST kill_glusterd 2 - -#set the volume all options from the 1st glusterd -TEST $CLI_1 volume set all cluster.server-quorum-ratio 80 - -#Bring back the 2nd glusterd -TEST $glusterd_2 - -#Verify whether the value has been synced -EXPECT '80' get_value 'cluster.server-quorum-ratio' 1 -EXPECT_WITHIN $PROBE_TIMEOUT '1' peer_count -EXPECT '80' get_value 'cluster.server-quorum-ratio' 2 - -cleanup; diff --git a/tests/bugs/bug-1104692.t b/tests/bugs/bug-1104692.t deleted file mode 100755 index 79ed32ce803..00000000000 --- a/tests/bugs/bug-1104692.t +++ /dev/null @@ -1,32 +0,0 @@ -#!/bin/bash -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc - -cleanup; - -TEST glusterd -TEST pidof glusterd - -TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}0 $H0:$B0/${V0}1 $H0:$B0/${V0}2 $H0:$B0/${V0}3 -TEST $CLI volume start $V0 - -TEST glusterfs -s $H0 --volfile-id $V0 $M0; -TEST mkdir -p $M0/limit_one/limit_two/limit_three $M0/limit_four \ - $M0/limit_one/limit_five - -TEST $CLI volume set $V0 server.root-squash on -TEST $CLI volume quota $V0 enable - -TEST $CLI volume quota $V0 limit-usage / 1GB -TEST $CLI volume quota $V0 limit-usage /limit_one 1GB -TEST $CLI volume quota $V0 limit-usage /limit_one/limit_two 1GB -TEST $CLI volume quota $V0 limit-usage /limit_one/limit_two/limit_three 1GB -TEST $CLI volume quota $V0 limit-usage /limit_four 1GB -TEST $CLI volume quota $V0 limit-usage /limit_one/limit_five 1GB - -#Cleanup -EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0 -TEST $CLI volume stop $V0 -TEST $CLI volume delete $V0 - -cleanup; diff --git a/tests/bugs/bug-1109741-auth-mgmt-handshake.t b/tests/bugs/bug-1109741-auth-mgmt-handshake.t deleted file mode 100644 index 42a8eb3ed82..00000000000 --- a/tests/bugs/bug-1109741-auth-mgmt-handshake.t +++ /dev/null @@ -1,50 +0,0 @@ -#! /bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../cluster.rc - -# The test will attempt to verify that management handshake requests to -# GlusterD are authenticated before being allowed to change a GlusterD's -# op-version -# -# 1. Launch 3 glusterds -# 2. Probe 2 of them to form a cluster. This should succeed. -# 3. Probe either of the first two GlusterD's from the 3rd GlusterD. This should fail. -# 4. a. Reduce the op-version of 3rd GlusterD and restart it. -# b. Probe either of the first two GlusterD's from the 3rd GlusterD. This should fail. -# 5. Check current op-version of first two GlusterDs. It shouldn't have changed. -# 6. Probe third GlusterD from the cluster. This should succeed. - - -cleanup - -TEST launch_cluster 3 - -TEST $CLI_1 peer probe $H2 - -TEST ! $CLI_3 peer probe $H1 - -GD1_WD=$($CLI_1 system getwd) -OP_VERS_ORIG=$(grep 'operating-version' ${GD1_WD}/glusterd.info | cut -d '=' -f 2) - -TEST $CLI_3 system uuid get # Needed for glusterd.info to be created - -GD3_WD=$($CLI_3 system getwd) -TEST sed -rnie "'s/(operating-version=)\w+/\130600/gip'" ${GD3_WD}/glusterd.info - -TEST kill_glusterd 3 -TEST start_glusterd 3 - -TEST ! $CLI_3 peer probe $H1 - -OP_VERS_NEW=$(grep 'operating-version' ${GD1_WD}/glusterd.info | cut -d '=' -f 2) -TEST [[ $OP_VERS_ORIG == $OP_VERS_NEW ]] - -TEST $CLI_1 peer probe $H3 - -kill_node 1 -kill_node 2 -kill_node 3 - -cleanup; - diff --git a/tests/bugs/bug-1109770.t b/tests/bugs/bug-1109770.t deleted file mode 100644 index 03b929defda..00000000000 --- a/tests/bugs/bug-1109770.t +++ /dev/null @@ -1,65 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc -. $(dirname $0)/../snapshot.rc -. $(dirname $0)/../fileio.rc -. $(dirname $0)/../nfs.rc - -cleanup; - -TEST init_n_bricks 3; -TEST setup_lvm 3; - -TEST glusterd; - -TEST pidof glusterd; - -TEST $CLI volume create $V0 $H0:$L1 $H0:$L2 $H0:$L3; - -TEST $CLI volume start $V0; - -TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 $M0; - -for i in {1..10} ; do echo "file" > $M0/file$i ; done - -TEST $CLI snapshot create snap1 $V0; - -for i in {11..20} ; do echo "file" > $M0/file$i ; done - -TEST $CLI snapshot create snap2 $V0; - -mkdir $M0/dir1; -mkdir $M0/dir2; - -for i in {1..10} ; do echo "foo" > $M0/dir1/foo$i ; done -for i in {1..10} ; do echo "foo" > $M0/dir2/foo$i ; done - -TEST $CLI snapshot create snap3 $V0; - -for i in {11..20} ; do echo "foo" > $M0/dir1/foo$i ; done -for i in {11..20} ; do echo "foo" > $M0/dir2/foo$i ; done - -TEST $CLI snapshot create snap4 $V0; - -TEST $CLI volume set $V0 features.uss enable; - -EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'Y' check_if_snapd_exist - -TEST $CLI volume set $V0 features.uss disable; - -SNAPD_PID=$(ps auxww | grep snapd | grep -v grep | awk '{print $2}'); - -TEST ! [ $SNAPD_PID -gt 0 ]; - -TEST $CLI volume set $V0 features.uss enable; - -EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'Y' check_if_snapd_exist - -TEST $CLI volume stop $V0; - -SNAPD_PID=$(ps auxww | grep snapd | grep -v grep | awk '{print $2}'); - -TEST ! [ $SNAPD_PID -gt 0 ]; - -cleanup ; diff --git a/tests/bugs/bug-1109889.t b/tests/bugs/bug-1109889.t deleted file mode 100644 index 05f65887308..00000000000 --- a/tests/bugs/bug-1109889.t +++ /dev/null @@ -1,74 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc -. $(dirname $0)/../snapshot.rc -. $(dirname $0)/../fileio.rc -. $(dirname $0)/../nfs.rc - -cleanup; - -TEST init_n_bricks 3; -TEST setup_lvm 3; - -TEST glusterd; - -TEST pidof glusterd; - -TEST $CLI volume create $V0 $H0:$L1 $H0:$L2 $H0:$L3; - -TEST $CLI volume start $V0; - -TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 $M0; - -MOUNT_PID=`ps ax |grep "glusterfs --volfile-sever $H0 --volfile-id=$V0 $M0" | grep -v grep | awk '{print $1}' | head -1` - -for i in {1..10} ; do echo "file" > $M0/file$i ; done - -TEST $CLI snapshot config activate-on-create enable - -TEST $CLI snapshot create snap1 $V0; - -for i in {11..20} ; do echo "file" > $M0/file$i ; done - -TEST $CLI snapshot create snap2 $V0; - -mkdir $M0/dir1; -mkdir $M0/dir2; - -for i in {1..10} ; do echo "foo" > $M0/dir1/foo$i ; done -for i in {1..10} ; do echo "foo" > $M0/dir2/foo$i ; done - -TEST $CLI snapshot create snap3 $V0; - -for i in {11..20} ; do echo "foo" > $M0/dir1/foo$i ; done -for i in {11..20} ; do echo "foo" > $M0/dir2/foo$i ; done - -TEST $CLI snapshot create snap4 $V0; - -TEST $CLI volume set $V0 features.uss enable; - -#let snapd get started properly and client connect to snapd -EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" snap_client_connected_status $V0 - -SNAPD_PID=$(ps auxww | grep snapd | grep -v grep | awk '{print $2}'); - -TEST [ $SNAPD_PID -gt 0 ]; - -TEST stat $M0/.snaps; - -kill -KILL $SNAPD_PID; - -# let snapd die properly -EXPECT_WITHIN $CHILD_UP_TIMEOUT "0" snap_client_connected_status $V0 - -TEST ! stat $M0/.snaps; - -TEST $CLI volume start $V0 force; - -# let client get the snapd port from glusterd and connect -EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" snap_client_connected_status $V0 - -TEST stat $M0/.snaps; - -cleanup; diff --git a/tests/bugs/bug-1110917.t b/tests/bugs/bug-1110917.t deleted file mode 100644 index 4b67293a2c0..00000000000 --- a/tests/bugs/bug-1110917.t +++ /dev/null @@ -1,39 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc - -cleanup; - -TEST glusterd -TEST pidof glusterd - -TEST $CLI volume create $V0 $H0:$B0/brick1 $H0:$B0/brick2; -TEST $CLI volume start $V0; - -TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0 - -TEST $CLI volume set $V0 changelog on -TEST $CLI volume set $V0 changelog.fsync-interval 1 - -# perform I/O on the background -f=$(basename `mktemp -t ${0##*/}.XXXXXX`) -dd if=/dev/urandom of=$M0/$f count=100000 bs=4k & - -# this is the best we can do without inducing _error points_ in the code -# without the patch reconfigre() would hang... -TEST $CLI volume set $V0 changelog.rollover-time `expr $((RANDOM % 9)) + 1` -TEST $CLI volume set $V0 changelog.rollover-time `expr $((RANDOM % 9)) + 1` - -TEST $CLI volume set $V0 changelog off -TEST $CLI volume set $V0 changelog on -TEST $CLI volume set $V0 changelog off -TEST $CLI volume set $V0 changelog on - -TEST $CLI volume set $V0 changelog.rollover-time `expr $((RANDOM % 9)) + 1` -TEST $CLI volume set $V0 changelog.rollover-time `expr $((RANDOM % 9)) + 1` - -# if there's a deadlock, this would hang -wait; - -cleanup; diff --git a/tests/bugs/bug-1111041.t b/tests/bugs/bug-1111041.t deleted file mode 100644 index ad8a2a73652..00000000000 --- a/tests/bugs/bug-1111041.t +++ /dev/null @@ -1,36 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc -. $(dirname $0)/../fileio.rc -. $(dirname $0)/../nfs.rc - -cleanup; - -function is_snapd_running { - $CLI volume status $1 | grep "Snapshot Daemon" | wc -l; -} - -TEST glusterd; - -TEST pidof glusterd; - -TEST $CLI volume create $V0 $H0:$B0/${V0}0 $H0:$B0/${V0}1 - -TEST $CLI volume start $V0; - -EXPECT "0" is_snapd_running $v0 - -TEST $CLI volume set $V0 features.uss enable; - -EXPECT "1" is_snapd_running $V0 - -SNAPD_PID=$(ps auxww | grep snapd | grep -v grep | awk '{print $2}'); - -TEST [ $SNAPD_PID -gt 0 ]; - -SNAPD_PID2=$($CLI volume status $V0 | grep "Snapshot Daemon" | awk {'print $7'}); - -TEST [ $SNAPD_PID -eq $SNAPD_PID2 ] - -cleanup ; diff --git a/tests/bugs/bug-1111454.t b/tests/bugs/bug-1111454.t deleted file mode 100644 index 49f080d975d..00000000000 --- a/tests/bugs/bug-1111454.t +++ /dev/null @@ -1,18 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc - -#symlink resolution should succeed -cleanup; - -TEST glusterd -TEST pidof glusterd -TEST $CLI volume create $V0 stripe 2 $H0:$B0/${V0}{0,1} -TEST $CLI volume start $V0 -TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0 -TEST mkdir $M0/dir -TEST touch $M0/dir/file -TEST ln -s file $M0/dir/symlinkfile -TEST ls -lR $M0 -cleanup diff --git a/tests/bugs/bug-1111490.t b/tests/bugs/bug-1111490.t deleted file mode 100644 index d3138054746..00000000000 --- a/tests/bugs/bug-1111490.t +++ /dev/null @@ -1,34 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc - -cleanup; - -TEST glusterd -TEST pidof glusterd - -TEST $CLI volume create $V0 $H0:$B0/${V0}0 -TEST $CLI volume start $V0 - -# mount with auxillary gfid mount -TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0 --aux-gfid-mount - -# create file with specific gfid -uuid=`uuidgen` -EXPECT "File creation OK" $PYTHON $(dirname $0)/../utils/gfid-access.py \ - $M0 ROOT file0 $uuid file 10 10 0644 - -# check gfid -EXPECT "$uuid" getfattr --only-values -n glusterfs.gfid.string $M0/file0 - -# unmount and mount again so as to start with a fresh inode table -# or use another mount... -TEST umount $M0 -TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0 --aux-gfid-mount - -# touch the file again (gfid-access.py handles errno) -EXPECT "File creation OK" $PYTHON $(dirname $0)/../utils/gfid-access.py \ - $M0 ROOT file0 $uuid file 10 10 0644 - -cleanup; diff --git a/tests/bugs/bug-1111557.t b/tests/bugs/bug-1111557.t deleted file mode 100644 index 656b6e6519b..00000000000 --- a/tests/bugs/bug-1111557.t +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc - -cleanup; - -TEST glusterd -TEST pidof glusterd -TEST $CLI volume create $V0 $H0:$B0/${V0}{0} -TEST $CLI volume set $V0 diagnostics.brick-log-buf-size 0 -TEST ! $CLI volume set $V0 diagnostics.brick-log-buf-size -0 -cleanup diff --git a/tests/bugs/bug-1112559.t b/tests/bugs/bug-1112559.t deleted file mode 100755 index 58cc40f7768..00000000000 --- a/tests/bugs/bug-1112559.t +++ /dev/null @@ -1,61 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../cluster.rc -. $(dirname $0)/../volume.rc -. $(dirname $0)/../snapshot.rc - -function check_peers { - $CLI_1 peer status | grep 'Peer in Cluster (Connected)' | wc -l -} - -function check_snaps_status { - $CLI_1 snapshot status | grep 'Snap Name : ' | wc -l -} - -function check_snaps_bricks_health { - $CLI_1 snapshot status | grep 'Brick Running : Yes' | wc -l -} - - -SNAP_COMMAND_TIMEOUT=40 -NUMBER_OF_BRICKS=2 - -cleanup; -TEST verify_lvm_version -TEST launch_cluster 3 -TEST setup_lvm 3 - -TEST $CLI_1 peer probe $H2 -EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count - -TEST $CLI_1 volume create $V0 $H1:$L1 $H2:$L2 - -TEST $CLI_1 volume start $V0 - -#Create snapshot and add a peer together -$CLI_1 snapshot create ${V0}_snap1 ${V0} & -PID_1=$! -$CLI_1 peer probe $H3 -wait $PID_1 - -#Snapshot should be created and in the snaplist -TEST snapshot_exists 1 ${V0}_snap1 - -#Not being paranoid! Just checking for the status of the snapshot -#During the testing of the bug the snapshot would list but actually -#not be created.Therefore check for health of the snapshot -EXPECT_WITHIN $SNAP_COMMAND_TIMEOUT 1 check_snaps_status - -#Disabling the checking of snap brick status , Will continue investigation -#on the failure of the snapbrick port bind issue. -#EXPECT_WITHIN $SNAP_COMMAND_TIMEOUT $NUMBER_OF_BRICKS check_snaps_bricks_health - -#check if the peer is added successfully -EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count - -TEST $CLI_1 snapshot delete ${V0}_snap1 - -cleanup; - - diff --git a/tests/bugs/bug-1112613.t b/tests/bugs/bug-1112613.t deleted file mode 100644 index 17302eaa427..00000000000 --- a/tests/bugs/bug-1112613.t +++ /dev/null @@ -1,49 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc -. $(dirname $0)/../snapshot.rc -. $(dirname $0)/../cluster.rc - -cleanup; - -V1="patchy2" - -TEST verify_lvm_version; -TEST launch_cluster 2 -TEST setup_lvm 2 - -TEST $CLI_1 peer probe $H2 -EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count - -TEST $CLI_1 volume create $V0 $H1:$L1 -TEST $CLI_1 volume start $V0 -TEST $CLI_1 volume create $V1 $H2:$L2 -TEST $CLI_1 volume start $V1 - -# Create 3 snapshots for volume $V0 -snap_count=3 -snap_index=1 -TEST snap_create CLI_1 $V0 $snap_index $snap_count; - -# Create 3 snapshots for volume $V1 -snap_count=4 -snap_index=11 -TEST snap_create CLI_1 $V1 $snap_index $snap_count; - -EXPECT '3' get_snap_count CLI_1 $V0; -EXPECT '4' get_snap_count CLI_1 $V1; -EXPECT '7' get_snap_count CLI_1 - -TEST $CLI_1 snapshot delete volume $V0 -EXPECT '0' get_snap_count CLI_1 $V0; -EXPECT '4' get_snap_count CLI_1 $V1; -EXPECT '4' get_snap_count CLI_1 - -TEST $CLI_1 snapshot delete all -EXPECT '0' get_snap_count CLI_1 $V0; -EXPECT '0' get_snap_count CLI_1 $V1; -EXPECT '0' get_snap_count CLI_1 - -cleanup; - diff --git a/tests/bugs/bug-1113476.t b/tests/bugs/bug-1113476.t deleted file mode 100644 index 7cb9d908269..00000000000 --- a/tests/bugs/bug-1113476.t +++ /dev/null @@ -1,44 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc -. $(dirname $0)/../snapshot.rc - -function volinfo_validate () -{ - local var=$1 - $CLI volume info $V0 | grep "^$var" | sed 's/.*: //' -} - -cleanup; - -TEST verify_lvm_version -TEST glusterd -TEST pidof glusterd -TEST setup_lvm 1 - -TEST $CLI volume create $V0 $H0:$L1 -TEST $CLI volume start $V0 - -EXPECT '' volinfo_validate 'snap-max-hard-limit' -EXPECT '' volinfo_validate 'snap-max-soft-limit' -EXPECT '' volinfo_validate 'auto-delete' - -TEST $CLI snapshot config snap-max-hard-limit 100 -EXPECT '100' volinfo_validate 'snap-max-hard-limit' -EXPECT '' volinfo_validate 'snap-max-soft-limit' -EXPECT '' volinfo_validate 'auto-delete' - -TEST $CLI snapshot config snap-max-soft-limit 50 -EXPECT '100' volinfo_validate 'snap-max-hard-limit' -EXPECT '50' volinfo_validate 'snap-max-soft-limit' -EXPECT '' volinfo_validate 'auto-delete' - -TEST $CLI snapshot config auto-delete enable -EXPECT '100' volinfo_validate 'snap-max-hard-limit' -EXPECT '50' volinfo_validate 'snap-max-soft-limit' -EXPECT 'enable' volinfo_validate 'auto-delete' - -cleanup; - - diff --git a/tests/bugs/bug-1113975.t b/tests/bugs/bug-1113975.t deleted file mode 100644 index 4b786d1502f..00000000000 --- a/tests/bugs/bug-1113975.t +++ /dev/null @@ -1,38 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc -. $(dirname $0)/../snapshot.rc - -cleanup; - -TEST init_n_bricks 3; -TEST setup_lvm 3; - -TEST glusterd; - -TEST pidof glusterd; - -TEST $CLI volume create $V0 $H0:$L1 $H0:$L2 $H0:$L3; - -TEST $CLI volume start $V0; - -TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 $M0; - -for i in {1..10} ; do echo "file" > $M0/file$i ; done - -TEST $CLI snapshot create snap1 $V0; - -for i in {11..20} ; do echo "file" > $M0/file$i ; done - -TEST $CLI snapshot create snap2 $V0; - -TEST $CLI volume stop $V0 - -TEST $CLI snapshot restore snap1; - -TEST $CLI snapshot restore snap2; - -TEST $CLI volume start $V0 - -cleanup ; diff --git a/tests/bugs/bug-1116503.t b/tests/bugs/bug-1116503.t deleted file mode 100644 index 9bad1cd9e09..00000000000 --- a/tests/bugs/bug-1116503.t +++ /dev/null @@ -1,45 +0,0 @@ -#!/bin/bash -# -# Verify that mounting NFS over UDP (MOUNT service only) works. -# - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc -. $(dirname $0)/../nfs.rc - - -cleanup; -TEST glusterd -TEST pidof glusterd - -TEST $CLI volume create $V0 $H0:$B0/$V0 -TEST $CLI volume set $V0 nfs.mount-udp on - -TEST $CLI volume start $V0 -EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available; - -TEST mount_nfs $H0:/$V0 $N0 nolock,mountproto=udp,proto=tcp; -TEST mkdir -p $N0/foo/bar -TEST ls $N0/foo -TEST ls $N0/foo/bar -EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $N0 - -EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available; -TEST mount_nfs $H0:/$V0/foo $N0 nolock,mountproto=udp,proto=tcp; -EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $N0 - -EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available; -TEST mount_nfs $H0:/$V0/foo/bar $N0 nolock,mountproto=udp,proto=tcp; -EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $N0 - -TEST $CLI volume set $V0 nfs.addr-namelookup on -TEST $CLI volume set $V0 nfs.rpc-auth-allow $H0 -EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available; -TEST mount_nfs $H0:/$V0/foo/bar $N0 nolock,mountproto=udp,proto=tcp; -EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $N0 - -TEST $CLI volume set $V0 nfs.rpc-auth-reject $H0 -EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available; -TEST ! mount_nfs $H0:/$V0/foo/bar $N0 nolock,mountproto=udp,proto=tcp; - -cleanup; diff --git a/tests/bugs/bug-1117851.t b/tests/bugs/bug-1117851.t deleted file mode 100755 index 02e52f53787..00000000000 --- a/tests/bugs/bug-1117851.t +++ /dev/null @@ -1,95 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc - -create_files () { - for i in {1..1000}; do - orig=$(printf %s/abc%04d $1 $i) - real=$(printf %s/src%04d $1 $i) - # Make sure lots of these have linkfiles. - echo "This is file $i" > $orig - mv $orig $real - done - sync -} - -move_files_inner () { - sfile=$M0/status_$(basename $1) - echo "running" > $sfile - for i in {1..1000}; do - src=$(printf %s/src%04d $1 $i) - dst=$(printf %s/dst%04d $1 $i) - mv $src $dst 2> /dev/null - done - echo "done" > $sfile -} - -move_files () { - move_files_inner $* & -} - -check_files () { - errors=0 - for i in {1..1000}; do - if [ ! -f $(printf %s/dst%04d $1 $i) ]; then - if [ -f $(printf %s/src%04d $1 $i) ]; then - echo "file $i didnt get moved" > /dev/stderr - else - echo "file $i is MISSING" > /dev/stderr - errors=$((errors+1)) - fi - fi - done - if [ $((errors)) != 0 ]; then - : ls -l $1 > /dev/stderr - fi - return $errors -} - -cleanup; - -TEST glusterd -TEST pidof glusterd -TEST $CLI volume info; - -TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2,3,4,5,6}; - -EXPECT "$V0" volinfo_field $V0 'Volume Name'; -EXPECT 'Created' volinfo_field $V0 'Status'; -EXPECT '6' brick_count $V0 - -TEST $CLI volume start $V0; -EXPECT 'Started' volinfo_field $V0 'Status'; - -## Mount FUSE with caching disabled (read-write) -TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id $V0 $M0; - -TEST create_files $M0 - -## Mount FUSE with caching disabled (read-write) again -TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id $V0 $M1; - -TEST move_files $M0 -TEST move_files $M1 - -# It's regrettable that renaming 1000 files might take more than 30 seconds, -# but on our test systems sometimes it does, so double the time from what we'd -# use otherwise. There still seem to be some spurious failures, 1 in 20 when -# this does not complete, added an additional 15 seconds to take false reports -# out of the system, during test runs. -EXPECT_WITHIN 75 "done" cat $M0/status_0 -EXPECT_WITHIN 75 "done" cat $M1/status_1 - -TEST umount $M0 -TEST umount $M1 -TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id $V0 $M0; -TEST check_files $M0 - -TEST $CLI volume stop $V0; -EXPECT 'Stopped' volinfo_field $V0 'Status'; - -TEST $CLI volume delete $V0; -TEST ! $CLI volume info $V0; - -cleanup; diff --git a/tests/bugs/bug-1117951.t b/tests/bugs/bug-1117951.t deleted file mode 100644 index 47a0781def5..00000000000 --- a/tests/bugs/bug-1117951.t +++ /dev/null @@ -1,24 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc - -cleanup; - -TEST glusterd -TEST pidof glusterd -TEST $CLI volume create $V0 $H0:$B0/brick -EXPECT 'Created' volinfo_field $V0 'Status'; -TEST $CLI volume start $V0 - -# Running with a locale not using '.' as decimal separator should work -export LC_NUMERIC=sv_SE.utf8 -TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0 -EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0 - -# As should a locale using '.' as a decimal separator -export LC_NUMERIC=C -TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0 -EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0 - -cleanup diff --git a/tests/bugs/bug-1119582.t b/tests/bugs/bug-1119582.t deleted file mode 100644 index cc388bde605..00000000000 --- a/tests/bugs/bug-1119582.t +++ /dev/null @@ -1,24 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc -. $(dirname $0)/../fileio.rc -. $(dirname $0)/../nfs.rc - -cleanup; - -TEST glusterd; - -TEST pidof glusterd; - -TEST $CLI volume create $V0 $H0:$B0/${V0}0 $H0:$B0/${V0}1 - -TEST $CLI volume set $V0 features.uss disable; - -TEST killall glusterd; - -rm -f $GLUSTERD_WORKDIR/vols/$V0/snapd.info - -TEST glusterd - -cleanup ; diff --git a/tests/bugs/bug-1120647.t b/tests/bugs/bug-1120647.t deleted file mode 100644 index 4670faa79d4..00000000000 --- a/tests/bugs/bug-1120647.t +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc - -cleanup; - -TEST glusterd -TEST pidof glusterd -TEST $CLI volume create $V0 replica 2 $H0:$B0/brick{1..4} -TEST $CLI volume start $V0 -TEST $CLI volume remove-brick $V0 $H0:$B0/brick{3..4} start -EXPECT_WITHIN 10 "completed" remove_brick_status_completed_field "$V0 $H0:$B0/brick{3..4}" -TEST $CLI volume remove-brick $V0 $H0:$B0/brick{3..4} commit -TEST $CLI volume remove-brick $V0 replica 1 $H0:$B0/brick2 force - -cleanup; diff --git a/tests/bugs/bug-1122443.t b/tests/bugs/bug-1122443.t deleted file mode 100644 index db9d3a060bf..00000000000 --- a/tests/bugs/bug-1122443.t +++ /dev/null @@ -1,59 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../dht.rc - -make_files() { - mkdir $1 && \ - ln -s ../ $1/symlink && \ - mknod $1/special_b b 1 2 && \ - mknod $1/special_c c 3 4 && \ - mknod $1/special_u u 5 6 && \ - mknod $1/special_p p && \ - touch -h --date=@1 $1/symlink && \ - touch -h --date=@2 $1/special_b && - touch -h --date=@3 $1/special_c && - touch -h --date=@4 $1/special_u && - touch -h --date=@5 $1/special_p -} - -bug_1113050_workaround() { - # Test if graph change has settled (bug-1113050?) - test=$(stat -c "%n:%Y" $1 2>&1 | tr '\n' ',') - if [ $? -eq 0 ] ; then - echo RECONNECTED - else - echo WAITING - fi - return 0 -} - -cleanup - -TEST glusterd -TEST pidof glusterd - -TEST $CLI volume create $V0 $H0:$B0/${V0}0 -TEST $CLI volume start $V0 - -# Mount FUSE and create symlink -TEST glusterfs -s $H0 --volfile-id $V0 $M0 -TEST make_files $M0/subdir - -# Get mtime before migration -BEFORE="$(stat -c %n:%Y $M0/subdir/* | tr '\n' ',')" - -# Migrate brick -TEST $CLI volume add-brick $V0 $H0:$B0/${V0}1 -TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}0 start -EXPECT_WITHIN $REBALANCE_TIMEOUT "0" remove_brick_completed -TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}0 commit - -# Get mtime after migration -EXPECT_WITHIN 5 RECONNECTED bug_1113050_workaround $M0/subdir/* -AFTER="$(stat -c %n:%Y $M0/subdir/* | tr '\n' ',')" - -# Check if mtime is unchanged -TEST [ "$AFTER" == "$BEFORE" ] - -cleanup diff --git a/tests/bugs/bug-1125824.t b/tests/bugs/bug-1125824.t deleted file mode 100755 index fb4fb00cf88..00000000000 --- a/tests/bugs/bug-1125824.t +++ /dev/null @@ -1,100 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc -. $(dirname $0)/../nfs.rc - -create_files () { - for i in {1..10}; do - orig=$(printf %s/file%04d $1 $i) - echo "This is file $i" > $orig - done - for i in {1..10}; do - mkdir $(printf %s/dir%04d $1 $i) - done - sync -} - -create_dirs () { - for i in {1..10}; do - mkdir $(printf %s/dir%04d $1 $i) - create_files $(printf %s/dir%04d $1 $i) - done - sync -} - -stat_files () { - for i in {1..10}; do - orig=$(printf %s/file%04d $1 $i) - stat $orig - done - for i in {1..10}; do - stat $(printf %s/dir%04d $1 $i) - done - sync -} - -stat_dirs () { - for i in {1..10}; do - stat $(printf %s/dir%04d $1 $i) - stat_files $(printf %s/dir%04d $1 $i) - done - sync -} - -cleanup; - -TEST glusterd -TEST pidof glusterd -TEST $CLI volume info; - -TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2,3,4}; - -EXPECT "$V0" volinfo_field $V0 'Volume Name'; -EXPECT 'Created' volinfo_field $V0 'Status'; -EXPECT '4' brick_count $V0 - -TEST $CLI volume start $V0; -EXPECT 'Started' volinfo_field $V0 'Status'; - -EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available; -TEST mount_nfs $H0:/$V0 $N0 - -# Create and poulate the NFS inode tables -TEST create_dirs $N0 -TEST stat_dirs $N0 - -# add-bricks changing the state of the volume where some bricks -# would have some directories and others would not -TEST $CLI volume add-brick $V0 replica 2 $H0:$B0/${V0}{5,6,7,8} - -# Post this dht_access was creating a mess for directories which is fixed -# with this commit. The issues could range from getting ENOENT or -# ESTALE or entries missing to directories not having complete -# layouts. -TEST cd $N0 -TEST ls -lR - -TEST $CLI volume rebalance $V0 start force -EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" rebalance_status_field $V0 - -# tests to check post rebalance if layouts and entires are fine and -# accessible by NFS to clear the volume -TEST ls -lR -rm -rf ./* -# There are additional bugs where NFS+DHT does not delete all entries -# on an rm -rf, so we do an additional rm -rf to ensure all is done -# and we are facing this transient issue, rather than a bad directory -# layout that is cached in memory -TEST rm -rf ./* - -# Get out of the mount, so that umount can work -TEST cd / - -EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $N0 -TEST $CLI volume stop $V0; -EXPECT 'Stopped' volinfo_field $V0 'Status'; -TEST $CLI volume delete $V0; -TEST ! $CLI volume info $V0; - -cleanup; diff --git a/tests/bugs/bug-1126048.c b/tests/bugs/bug-1126048.c deleted file mode 100644 index 2282bb2025e..00000000000 --- a/tests/bugs/bug-1126048.c +++ /dev/null @@ -1,37 +0,0 @@ -#include -#include -#include -#include -#include -#include -#include - -/* - * This function opens a file and to trigger migration failure, unlinks the - * file and performs graph switch (cmd passed in argv). If everything goes fine, - * fsync should fail without crashing the mount process. - */ -int -main (int argc, char **argv) -{ - int ret = 0; - int fd = 0; - char *cmd = argv[1]; - - printf ("cmd is: %s\n", cmd); - fd = open("a.txt", O_CREAT|O_RDWR); - if (fd < 0) - printf ("open failed: %s\n", strerror(errno)); - - ret = unlink("a.txt"); - if (ret < 0) - printf ("unlink failed: %s\n", strerror(errno)); - if (write (fd, "abc", 3) < 0) - printf ("Not able to print %s\n", strerror (errno)); - system(cmd); - sleep(1); //No way to confirm graph switch so sleep 1 - ret = fsync(fd); - if (ret < 0) - printf ("Not able to fsync %s\n", strerror (errno)); - return 0; -} diff --git a/tests/bugs/bug-1126048.t b/tests/bugs/bug-1126048.t deleted file mode 100755 index 53c8ced4f96..00000000000 --- a/tests/bugs/bug-1126048.t +++ /dev/null @@ -1,30 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc - -cleanup; - -function grep_for_ebadf { - $M0/bug-1126048 "gluster --mode=script --wignore volume add-brick $V0 $H0:$B0/brick2" | grep -i "Bad file descriptor" -} -TEST glusterd -TEST pidof glusterd -TEST $CLI volume info; - -TEST $CLI volume create $V0 $H0:$B0/brick1; -EXPECT 'Created' volinfo_field $V0 'Status'; - -TEST $CLI volume start $V0; -EXPECT 'Started' volinfo_field $V0 'Status'; - -TEST glusterfs -s $H0 --volfile-id=$V0 $M0 --direct-io-mode=yes - -build_tester $(dirname $0)/bug-1126048.c - -TEST cp $(dirname $0)/bug-1126048 $M0 -cd $M0 -TEST grep_for_ebadf -TEST ls -l $M0 -cd - -TEST rm -f $(dirname $0)/bug-1126048 -cleanup; diff --git a/tests/bugs/bug-1130892.t b/tests/bugs/bug-1130892.t deleted file mode 100644 index 206778f2938..00000000000 --- a/tests/bugs/bug-1130892.t +++ /dev/null @@ -1,60 +0,0 @@ -#!/bin/bash -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc -. $(dirname $0)/../afr.rc - -cleanup; - -TEST glusterd -TEST pidof glusterd -TEST $CLI volume info; - -# Create a 1X2 replica -TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}-{0,1} -EXPECT 'Created' volinfo_field $V0 'Status'; - -# Disable self-heal daemon -TEST gluster volume set $V0 self-heal-daemon off - -# Disable all perf-xlators -TEST $CLI volume set $V0 performance.quick-read off -TEST $CLI volume set $V0 performance.io-cache off -TEST $CLI volume set $V0 performance.write-behind off -TEST $CLI volume set $V0 performance.stat-prefetch off -TEST $CLI volume set $V0 performance.read-ahead off - -# Volume start -TEST $CLI volume start $V0; -EXPECT 'Started' volinfo_field $V0 'Status'; - -# FUSE Mount -TEST glusterfs -s $H0 --volfile-id $V0 $M0 - -# Create files and dirs -TEST mkdir -p $M0/one/two/ -TEST `echo "Carpe diem" > $M0/one/two/three` - -# Simulate disk-replacement -TEST kill_brick $V0 $H0 $B0/${V0}-1 -TEST rm -rf $B0/${V0}-1/one -TEST rm -rf $B0/${V0}-1/.glusterfs - -# Start force -TEST $CLI volume start $V0 force - -EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1 - -TEST stat $M0/one - -# Check pending xattrs -EXPECT "00000000" afr_get_specific_changelog_xattr $B0/${V0}-0/one trusted.afr.$V0-client-1 data -EXPECT_NOT "00000000" afr_get_specific_changelog_xattr $B0/${V0}-0/one trusted.afr.$V0-client-1 entry -EXPECT "00000000" afr_get_specific_changelog_xattr $B0/${V0}-0/one trusted.afr.$V0-client-1 metadata - -TEST gluster volume set $V0 self-heal-daemon on -EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status -EXPECT_WITHIN $HEAL_TIMEOUT "Y" is_dir_heal_done $B0/${V0}-0 $B0/${V0}-1 one -EXPECT_WITHIN $HEAL_TIMEOUT "Y" is_dir_heal_done $B0/${V0}-0 $B0/${V0}-1 one/two -EXPECT_WITHIN $HEAL_TIMEOUT "Y" is_file_heal_done $B0/${V0}-0 $B0/${V0}-1 one/two/three - -cleanup; diff --git a/tests/bugs/bug-1132102.t b/tests/bugs/bug-1132102.t deleted file mode 100644 index 4e54b1d24dd..00000000000 --- a/tests/bugs/bug-1132102.t +++ /dev/null @@ -1,28 +0,0 @@ -#!/bin/bash - -#This tests that mknod and create fops mark necessary pending changelog -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc - -cleanup; - -TEST glusterd -TEST pidof glusterd -TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1} -TEST $CLI volume start $V0 -TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0 -TEST kill_brick $V0 $H0 $B0/${V0}0 -cd $M0 -TEST mkfifo fifo -TEST mknod block b 0 0 -TEST touch a -EXPECT "00000000" afr_get_specific_changelog_xattr $B0/${V0}1/fifo trusted.afr.$V0-client-0 data -EXPECT "00000000" afr_get_specific_changelog_xattr $B0/${V0}1/fifo trusted.afr.$V0-client-0 entry -EXPECT_NOT "00000000" afr_get_specific_changelog_xattr $B0/${V0}1/fifo trusted.afr.$V0-client-0 metadata -EXPECT "00000000" afr_get_specific_changelog_xattr $B0/${V0}1/block trusted.afr.$V0-client-0 data -EXPECT "00000000" afr_get_specific_changelog_xattr $B0/${V0}1/block trusted.afr.$V0-client-0 entry -EXPECT_NOT "00000000" afr_get_specific_changelog_xattr $B0/${V0}1/block trusted.afr.$V0-client-0 metadata -EXPECT_NOT "00000000" afr_get_specific_changelog_xattr $B0/${V0}1/a trusted.afr.$V0-client-0 data -EXPECT "00000000" afr_get_specific_changelog_xattr $B0/${V0}1/a trusted.afr.$V0-client-0 entry -EXPECT_NOT "00000000" afr_get_specific_changelog_xattr $B0/${V0}1/a trusted.afr.$V0-client-0 metadata -cleanup diff --git a/tests/bugs/bug-1134691-afr-lookup-metadata-heal.t b/tests/bugs/bug-1134691-afr-lookup-metadata-heal.t deleted file mode 100644 index 1fb1732a33f..00000000000 --- a/tests/bugs/bug-1134691-afr-lookup-metadata-heal.t +++ /dev/null @@ -1,50 +0,0 @@ -#!/bin/bash -#### Test iatt and user xattr heal from lookup code path #### - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc -cleanup; - -TEST glusterd -TEST pidof glusterd -TEST $CLI volume create $V0 replica 3 $H0:$B0/brick{0,1,2} -TEST $CLI volume start $V0 -TEST $CLI volume set $V0 cluster.self-heal-daemon off -TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0 - -cd $M0 -TEST touch file -TEST setfattr -n user.attribute1 -v "value" $B0/brick0/file -TEST kill_brick $V0 $H0 $B0/brick2 -TEST chmod +x file -iatt=$(stat -c "%g:%u:%A" file) - -TEST $CLI volume start $V0 force -EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 2 - -#Trigger metadataheal -TEST stat file - -#iattrs must be matching -iatt1=$(stat -c "%g:%u:%A" $B0/brick0/file) -iatt2=$(stat -c "%g:%u:%A" $B0/brick1/file) -iatt3=$(stat -c "%g:%u:%A" $B0/brick2/file) -EXPECT $iatt echo $iatt1 -EXPECT $iatt echo $iatt2 -EXPECT $iatt echo $iatt3 - -#xattrs must be matching -xatt1_cnt=$(getfattr -d $B0/brick0/file|wc|awk '{print $1}') -xatt2_cnt=$(getfattr -d $B0/brick1/file|wc|awk '{print $1}') -xatt3_cnt=$(getfattr -d $B0/brick2/file|wc|awk '{print $1}') -EXPECT "$xatt1_cnt" echo $xatt2_cnt -EXPECT "$xatt1_cnt" echo $xatt3_cnt - -#changelogs must be zero -xattr1=$(get_hex_xattr trusted.afr.$V0-client-2 $B0/brick0/file) -xattr2=$(get_hex_xattr trusted.afr.$V0-client-2 $B0/brick1/file) -EXPECT "000000000000000000000000" echo $xattr1 -EXPECT "000000000000000000000000" echo $xattr2 - -cd - -cleanup; diff --git a/tests/bugs/bug-1135514-allow-setxattr-with-null-value.t b/tests/bugs/bug-1135514-allow-setxattr-with-null-value.t deleted file mode 100644 index cea0566b9bb..00000000000 --- a/tests/bugs/bug-1135514-allow-setxattr-with-null-value.t +++ /dev/null @@ -1,18 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc - -#Test -cleanup; - -TEST glusterd -TEST pidof glusterd -TEST $CLI volume create $V0 $H0:$B0/${V0}0 -TEST $CLI volume start $V0 -TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0 -TEST touch $M0/file -TEST setfattr -n user.attribute1 $M0/file -TEST getfattr -n user.attribute1 $M0/file -cleanup - diff --git a/tests/bugs/bug-1139230.t b/tests/bugs/bug-1139230.t deleted file mode 100644 index 24317dd5f27..00000000000 --- a/tests/bugs/bug-1139230.t +++ /dev/null @@ -1,58 +0,0 @@ -#!/bin/bash -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc -. $(dirname $0)/../afr.rc - -cleanup; - -TEST glusterd -TEST pidof glusterd -TEST $CLI volume info; - -# Create a 1X2 replica -TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}-{0,1} -EXPECT 'Created' volinfo_field $V0 'Status'; - -# Volume start -TEST $CLI volume start $V0; -EXPECT 'Started' volinfo_field $V0 'Status'; - -# FUSE Mount -TEST glusterfs -s $H0 --volfile-id $V0 $M0 - -TEST mkdir -p $M0/one - -# Kill a brick -TEST kill_brick $V0 $H0 $B0/${V0}-1 - -TEST `echo "A long" > $M0/one/two` - -# Start force -TEST $CLI volume start $V0 force - -EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1 - -EXPECT_WITHIN $HEAL_TIMEOUT "Y" is_dir_heal_done $B0/${V0}-0 $B0/${V0}-1 one -EXPECT_WITHIN $HEAL_TIMEOUT "Y" is_file_heal_done $B0/${V0}-0 $B0/${V0}-1 one/two - -# Pending xattrs should be set for all the bricks once self-heal is done -# Check pending xattrs -EXPECT "0x000000000000000000000000" afr_get_changelog_xattr $B0/${V0}-0/one trusted.afr.$V0-client-0 -EXPECT "0x000000000000000000000000" afr_get_changelog_xattr $B0/${V0}-0/one trusted.afr.$V0-client-1 -EXPECT "0x000000000000000000000000" afr_get_changelog_xattr $B0/${V0}-1/one trusted.afr.$V0-client-0 -EXPECT "0x000000000000000000000000" afr_get_changelog_xattr $B0/${V0}-1/one trusted.afr.$V0-client-1 -EXPECT "0x000000000000000000000000" afr_get_changelog_xattr $B0/${V0}-0/one trusted.afr.dirty -EXPECT "0x000000000000000000000000" afr_get_changelog_xattr $B0/${V0}-1/one trusted.afr.dirty - -TEST `echo "time ago" > $M0/one/three` - -# Pending xattrs should be set for all the bricks once transaction is done -# Check pending xattrs -EXPECT "0x000000000000000000000000" afr_get_changelog_xattr $B0/${V0}-0/one/three trusted.afr.$V0-client-0 -EXPECT "0x000000000000000000000000" afr_get_changelog_xattr $B0/${V0}-0/one/three trusted.afr.$V0-client-1 -EXPECT "0x000000000000000000000000" afr_get_changelog_xattr $B0/${V0}-1/one/three trusted.afr.$V0-client-0 -EXPECT "0x000000000000000000000000" afr_get_changelog_xattr $B0/${V0}-1/one/three trusted.afr.$V0-client-1 -EXPECT "0x000000000000000000000000" afr_get_changelog_xattr $B0/${V0}-0/one/three trusted.afr.dirty -EXPECT "0x000000000000000000000000" afr_get_changelog_xattr $B0/${V0}-1/one/three trusted.afr.dirty - -cleanup; diff --git a/tests/bugs/bug-1140162-file-snapshot-and-features-encryption-option-validation.t b/tests/bugs/bug-1140162-file-snapshot-and-features-encryption-option-validation.t deleted file mode 100644 index a7aa883cba8..00000000000 --- a/tests/bugs/bug-1140162-file-snapshot-and-features-encryption-option-validation.t +++ /dev/null @@ -1,33 +0,0 @@ -#!/bin/bash - -## Test case for BZ-1140160 Volume option set and -## command input should validate correctly. - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc - -cleanup; - -## Start glusterd -TEST glusterd; -TEST pidof glusterd; - -## Lets create and start volume -TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2}; -TEST $CLI volume start $V0 - -## Set features.file-snapshot and features.encryption option with non-boolean -## value. These options should fail. -TEST ! $CLI volume set $V0 features.file-snapshot abcd -TEST ! $CLI volume set $V0 features.encryption redhat - -## Set other options with valid value. These options should succeed. -TEST $CLI volume set $V0 barrier enable -TEST $CLI volume set $V0 ping-timeout 60 - -## Set features.file-snapshot and features.encryption option with valid boolean -## value. These options should succeed. -TEST $CLI volume set $V0 features.file-snapshot on -TEST $CLI volume set $V0 features.encryption on - -cleanup; diff --git a/tests/bugs/bug-1155042-dont-display-deactivated-snapshots.t b/tests/bugs/bug-1155042-dont-display-deactivated-snapshots.t deleted file mode 100644 index 63ad3d754d8..00000000000 --- a/tests/bugs/bug-1155042-dont-display-deactivated-snapshots.t +++ /dev/null @@ -1,36 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc -. $(dirname $0)/../snapshot.rc - -cleanup; - -TEST init_n_bricks 2 -TEST setup_lvm 2 -TEST glusterd; - -TEST $CLI volume create $V0 $H0:$L1 $H0:$L2 -TEST $CLI volume start $V0 - -# enable uss and mount the volume -TEST $CLI volume set $V0 features.uss enable -TEST $GFS --volfile-server=$H0 --volfile-id=$V0 $M0 - -# create 10 snapshots and check if all are being reflected -# in the USS world -gluster snapshot config activate-on-create enable -for i in {1..10}; do $CLI snapshot create snap$i $V0; done -EXPECT 10 uss_count_snap_displayed $M0 - -# snapshots should not be displayed after deactivation -for i in {1..10}; do $CLI snapshot deactivate snap$i --mode=script; done -EXPECT 0 uss_count_snap_displayed $M0 - -# activate all the snapshots and check if all the activated snapshots -# are displayed again -for i in {1..10}; do $CLI snapshot activate snap$i --mode=script; done -EXPECT 10 uss_count_snap_displayed $M0 - -cleanup; - diff --git a/tests/bugs/bug-1157223-symlink-mounting.t b/tests/bugs/bug-1157223-symlink-mounting.t deleted file mode 100644 index 4ebc3453889..00000000000 --- a/tests/bugs/bug-1157223-symlink-mounting.t +++ /dev/null @@ -1,124 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../nfs.rc - -cleanup; - -## Start and create a volume -TEST glusterd -TEST pidof glusterd - -TEST $CLI volume info; -TEST $CLI volume create $V0 $H0:$B0/$V0 - -TEST $CLI volume start $V0; - -## Wait for volume to register with rpc.mountd -EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available; - -## Mount NFS -TEST mount_nfs $H0:/$V0 $N0 nolock; - -mkdir $N0/dir1; -mkdir $N0/dir2; -pushd $N0/ ; - -##link created using relative path -ln -s dir1 symlink1; - -##relative path contains ".." -ln -s ../dir1 dir2/symlink2; - -##link created using absolute path -ln -s $N0/dir1 symlink3; - -##link pointing to another symlinks -ln -s symlink1 symlink4 -ln -s symlink3 symlink5 - -##dead links -ln -s does/not/exist symlink6 - -##link which contains ".." points out of glusterfs -ln -s ../../ symlink7 - -##links pointing to unauthorized area -ln -s .glusterfs symlink8 - -popd ; - -##Umount the volume -EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" umount_nfs $N0 - -## Mount and umount NFS via directory -TEST mount_nfs $H0:/$V0/dir1 $N0 nolock; -EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" umount_nfs $N0 - -## Mount and umount NFS via symlink1 -TEST mount_nfs $H0:/$V0/symlink1 $N0 nolock; -EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" umount_nfs $N0 - -## Mount and umount NFS via symlink2 -TEST mount_nfs $H0:/$V0/dir2/symlink2 $N0 nolock; -EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" umount_nfs $N0 - -## Mount NFS via symlink3 should fail -TEST ! mount_nfs $H0:/$V0/symlink3 $N0 nolock; - -## Mount and umount NFS via symlink4 -TEST mount_nfs $H0:/$V0/symlink4 $N0 nolock; -EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" umount_nfs $N0 - -## Mount NFS via symlink5 should fail -TEST ! mount_nfs $H0:/$V0/symlink5 $N0 nolock; - -## Mount NFS via symlink6 should fail -TEST ! mount_nfs $H0:/$V0/symlink6 $N0 nolock; - -## Mount NFS via symlink7 should fail -TEST ! mount_nfs $H0:/$V0/symlink7 $N0 nolock; - -## Mount NFS via symlink8 should fail -TEST ! mount_nfs $H0:/$V0/symlink8 $N0 nolock; - -##Similar check for udp mount -$CLI volume stop $V0 -TEST $CLI volume set $V0 nfs.mount-udp on -$CLI volume start $V0 - -## Wait for volume to register with rpc.mountd -EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available; - -## Mount and umount NFS via directory -TEST mount_nfs $H0:/$V0/dir1 $N0 nolock,mountproto=udp,proto=tcp; -EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" umount_nfs $N0 - -## Mount and umount NFS via symlink1 -TEST mount_nfs $H0:/$V0/symlink1 $N0 nolock,mountproto=udp,proto=tcp; -EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" umount_nfs $N0 - -## Mount and umount NFS via symlink2 -TEST mount_nfs $H0:/$V0/dir2/symlink2 $N0 nolock,mountproto=udp,proto=tcp; -EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" umount_nfs $N0 - -## Mount NFS via symlink3 should fail -TEST ! mount_nfs $H0:/$V0/symlink3 $N0 nolock,mountproto=udp,proto=tcp; - -## Mount and umount NFS via symlink4 -TEST mount_nfs $H0:/$V0/symlink4 $N0 nolock,mountproto=udp,proto=tcp; -EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" umount_nfs $N0 - -## Mount NFS via symlink5 should fail -TEST ! mount_nfs $H0:/$V0/symlink5 $N0 nolock,mountproto=udp,proto=tcp; - -## Mount NFS via symlink6 should fail -TEST ! mount_nfs $H0:/$V0/symlink6 $N0 nolock,mountproto=udp,proto=tcp; - -##symlink7 is not check here, because in udp mount ../../ resolves into root '/' - -## Mount NFS via symlink8 should fail -TEST ! mount_nfs $H0:/$V0/symlink8 $N0 nolock,mountproto=udp,proto=tcp; - -rm -rf $H0:$B0/ -cleanup; diff --git a/tests/bugs/bug-1157991.t b/tests/bugs/bug-1157991.t deleted file mode 100755 index 97d081052e8..00000000000 --- a/tests/bugs/bug-1157991.t +++ /dev/null @@ -1,30 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../snapshot.rc - -cleanup; -TEST verify_lvm_version; -TEST glusterd; -TEST pidof glusterd; - -TEST setup_lvm 1 - -TEST $CLI volume create $V0 $H0:$L1 -TEST $CLI volume start $V0 - -TEST $CLI snapshot create snap1 $V0 -EXPECT 'Stopped' snapshot_status snap1; - -TEST $CLI snapshot config activate-on-create enable -TEST $CLI snapshot create snap2 $V0 -EXPECT 'Started' snapshot_status snap2; - -#Clean up -TEST $CLI snapshot delete snap1 -TEST $CLI snapshot delete snap2 - -TEST $CLI volume stop $V0 force -TEST $CLI volume delete $V0 - -cleanup; diff --git a/tests/bugs/bug-1161092-nfs-acls.t b/tests/bugs/bug-1161092-nfs-acls.t deleted file mode 100644 index f64ae5b3c18..00000000000 --- a/tests/bugs/bug-1161092-nfs-acls.t +++ /dev/null @@ -1,36 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../nfs.rc - -cleanup; - -TEST glusterd -TEST pidof glusterd -TEST $CLI volume info - -TEST $CLI volume create $V0 $H0:$B0/brick1; -EXPECT 'Created' volinfo_field $V0 'Status'; - -TEST $CLI volume start $V0; -EXPECT 'Started' volinfo_field $V0 'Status'; - -EXPECT_WITHIN $NFS_EXPORT_TIMEOUT 1 is_nfs_export_available -TEST mount_nfs $H0:/$V0 $N0 - -TEST touch $N0/file1 -TEST chmod 700 $N0/file1 -TEST getfacl $N0/file1 - -TEST $CLI volume set $V0 root-squash on -TEST getfacl $N0/file1 - -TEST umount_nfs $H0:/$V0 $N0 -TEST mount_nfs $H0:/$V0 $N0 -TEST getfacl $N0/file1 - -## Before killing daemon to avoid deadlocks -umount_nfs $N0 - -cleanup; - diff --git a/tests/bugs/bug-1161156.t b/tests/bugs/bug-1161156.t deleted file mode 100755 index 9f33391d744..00000000000 --- a/tests/bugs/bug-1161156.t +++ /dev/null @@ -1,54 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../nfs.rc - -function usage() -{ - local QUOTA_PATH=$1; - $CLI volume quota $V0 list $QUOTA_PATH | \ - grep "$QUOTA_PATH" | awk '{print $4}' -} - -cleanup; - -TEST glusterd -TEST pidof glusterd -TEST $CLI volume info; - -TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2,3,4,5,6}; -EXPECT 'Created' volinfo_field $V0 'Status'; - -TEST $CLI volume start $V0; -EXPECT 'Started' volinfo_field $V0 'Status'; - -# Testing with NFS for no particular reason -EXPECT_WITHIN $NFS_EXPORT_TIMEOUT 1 is_nfs_export_available -TEST mount_nfs $H0:/$V0 $N0 -mydir="dir" -TEST mkdir -p $N0/$mydir -TEST mkdir -p $N0/newdir - -TEST dd if=/dev/zero of=$N0/$mydir/file bs=1k count=10240 - -TEST $CLI volume quota $V0 enable -TEST $CLI volume quota $V0 limit-usage / 20MB -TEST $CLI volume quota $V0 limit-usage /newdir 5MB -TEST $CLI volume quota $V0 soft-timeout 0 -TEST $CLI volume quota $V0 hard-timeout 0 - -TEST dd if=/dev/zero of=$N0/$mydir/newfile_1 bs=512 count=10240 -# wait for write behind to complete. -EXPECT_WITHIN $MARKER_UPDATE_TIMEOUT "15.0MB" usage "/" -TEST ! dd if=/dev/zero of=$N0/$mydir/newfile_2 bs=1k count=10240 - -# Test rename within a directory. It should pass even when the -# corresponding directory quota is filled. -TEST mv $N0/dir/file $N0/dir/newfile_3 - -# rename should fail here with disk quota exceeded -TEST ! mv $N0/dir/newfile_3 $N0/newdir/ - -# cleanup -umount_nfs $N0 -cleanup; diff --git a/tests/bugs/bug-1162462.t b/tests/bugs/bug-1162462.t deleted file mode 100755 index 30b9dc30250..00000000000 --- a/tests/bugs/bug-1162462.t +++ /dev/null @@ -1,38 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc -. $(dirname $0)/../snapshot.rc - -cleanup; - -TEST init_n_bricks 3; -TEST setup_lvm 3; -TEST glusterd; -TEST pidof glusterd; - -TEST $CLI volume create $V0 $H0:$L1 $H0:$L2 $H0:$L3; -TEST $CLI volume start $V0; -TEST $CLI volume set $V0 features.uss enable; -TEST $GFS --volfile-server=$H0 --volfile-id=$V0 $M0; - -mkdir $M0/test -echo "file1" > $M0/file1 -ln -s $M0/file1 $M0/test/file_symlink -ls -l $M0/ > /dev/null -ls -l $M0/test/ > /dev/null - -TEST $CLI snapshot create snap1 $V0; -$CLI snapshot activate snap1; -EXPECT 'Started' snapshot_status snap1; - -EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" snap_client_connected_status $V0 -ls $M0/.snaps/snap1/test/ > /dev/null -ls -l $M0/.snaps/snap1/test/ > /dev/null -EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" snap_client_connected_status $V0 - -TEST $CLI snapshot delete snap1; -TEST $CLI volume stop $V0; -TEST $CLI volume delete $V0; - -cleanup; diff --git a/tests/bugs/bug-1162498.t b/tests/bugs/bug-1162498.t deleted file mode 100644 index d01999ea7ac..00000000000 --- a/tests/bugs/bug-1162498.t +++ /dev/null @@ -1,56 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../snapshot.rc - -cleanup; -TEST verify_lvm_version; -TEST glusterd; -TEST pidof glusterd; - -TEST setup_lvm 1 - -TEST $CLI volume create $V0 $H0:$L1 -TEST $CLI volume start $V0 - -TEST $CLI snapshot config activate-on-create enable -TEST $CLI volume set $V0 features.uss enable - -TEST glusterfs -s $H0 --volfile-id=$V0 $M0 - -TEST mkdir $M0/xyz - -TEST $CLI snapshot create snap1 $V0 -TEST $CLI snapshot create snap2 $V0 - -TEST rmdir $M0/xyz - -TEST $CLI snapshot create snap3 $V0 -TEST $CLI snapshot create snap4 $V0 - -TEST mkdir $M0/xyz -TEST ls $M0/xyz/.snaps/ - -TEST $CLI volume stop $V0 -TEST $CLI snapshot restore snap2 -TEST $CLI volume start $V0 - -umount -f $M0 -TEST glusterfs -s $H0 --volfile-id=$V0 $M0 - -#Dir xyz exists in snap1 -EXPECT_WITHIN $PROCESS_UP_TIMEOUT "0" STAT $M0/xyz - -TEST ls $M0/xyz/.snaps/ -TEST mkdir $M0/abc -TEST ls $M0/abc/.snaps/ - -#Clean up -TEST $CLI snapshot delete snap1 -TEST $CLI snapshot delete snap3 -TEST $CLI snapshot delete snap4 -TEST $CLI volume stop $V0 force -TEST $CLI volume delete $V0 - -cleanup; - diff --git a/tests/bugs/bug-1164613.t b/tests/bugs/bug-1164613.t deleted file mode 100644 index b7b27f1d649..00000000000 --- a/tests/bugs/bug-1164613.t +++ /dev/null @@ -1,34 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../snapshot.rc - -cleanup; -TEST verify_lvm_version; -TEST glusterd; -TEST pidof glusterd; - -TEST setup_lvm 1 - -TEST $CLI volume create $V0 $H0:$L1 -TEST $CLI volume start $V0 -TEST glusterfs -s $H0 --volfile-id=$V0 $M0 - -TEST touch $M0/testfile - -TEST $CLI snapshot create snaps $V0 -TEST $CLI snapshot activate snaps -TEST $CLI volume set $V0 features.uss enable -TEST $CLI volume set $V0 snapshot-directory snaps - -EXPECT_WITHIN $PROCESS_UP_TIMEOUT "0" STAT $M0/snaps/snaps/testfile - -umount -f $M0 - -#Clean up -TEST $CLI snapshot delete snaps -TEST $CLI volume stop $V0 force -TEST $CLI volume delete $V0 - -cleanup; - diff --git a/tests/bugs/bug-1166197.t b/tests/bugs/bug-1166197.t deleted file mode 100755 index a1e2480d62b..00000000000 --- a/tests/bugs/bug-1166197.t +++ /dev/null @@ -1,48 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../snapshot.rc -. $(dirname $0)/../volume.rc -. $(dirname $0)/../nfs.rc - -cleanup; -CURDIR=`pwd` - -TEST verify_lvm_version; -TEST glusterd; -TEST pidof glusterd; - -TEST setup_lvm 1 - -TEST $CLI volume create $V0 $H0:$L1 -TEST $CLI volume start $V0 -TEST $CLI snapshot config activate-on-create enable -TEST $CLI volume set $V0 features.uss enable - -EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'Started' volinfo_field $V0 'Status'; -TEST mount_nfs $H0:/$V0 $N0 nolock -TEST mkdir $N0/testdir - -TEST $CLI snapshot create snap1 $V0 -TEST $CLI snapshot create snap2 $V0 - -EXPECT_WITHIN $PROCESS_UP_TIMEOUT "0" STAT $N0/testdir/.snaps - -TEST cd $N0/testdir -TEST cd .snaps -TEST ls - -TEST $CLI snapshot deactivate snap2 -TEST ls - -TEST cd $CURDIR - -#Clean up -EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" umount_nfs $N0 -TEST $CLI snapshot delete snap1 -TEST $CLI snapshot delete snap2 -TEST $CLI volume stop $V0 force -TEST $CLI volume delete $V0 - -cleanup; - diff --git a/tests/bugs/bug-1167580-set-proper-uid-and-gid-during-nfs-access.t b/tests/bugs/bug-1167580-set-proper-uid-and-gid-during-nfs-access.t deleted file mode 100644 index 1eb3d55e36c..00000000000 --- a/tests/bugs/bug-1167580-set-proper-uid-and-gid-during-nfs-access.t +++ /dev/null @@ -1,201 +0,0 @@ -#!/bin/bash -. $(dirname $0)/../include.rc -. $(dirname $0)/../nfs.rc -. $(dirname $0)/../volume.rc -. $(dirname $0)/../snapshot.rc - -# This function returns a value "Y" if user can execute -# the given command. Else it will return "N" -# @arg-1 : Name of the user -# @arg-2 : Path of the file -# @arg-3 : command to be executed -function check_if_permitted () { - local usr=$1 - local path=$2 - local cmd=$3 - local var - local ret - var=$(su - $usr -c "$cmd $path") - ret=$? - - if [ "$cmd" == "cat" ] - then - if [ "$var" == "Test" ] - then - echo "Y" - else - echo "N" - fi - else - if [ "$ret" == "0" ] - then - echo "Y" - else - echo "N" - fi - fi -} - -# Create a directory in /tmp to specify which directory to make -# as home directory for user -home_dir=$(cat /dev/urandom | tr -dc 'a-zA-Z' | fold -w 8 | head -n 1) -home_dir="/tmp/bug-1167580-$home_dir" -mkdir $home_dir - -function get_new_user() { - local temp=$(cat /dev/urandom | tr -dc 'a-zA-Z' | fold -w 8 | head -n 1) - id $temp - if [ "$?" == "0" ] - then - get_new_user - else - echo $temp - fi -} - -function create_user() { - local user=$1 - local group=$2 - - if [ "$group" == "" ] - then - useradd -d $home_dir/$user $user - else - useradd -d $home_dir/$user -G $group $user - fi - - return $? -} - -cleanup; - -TEST setup_lvm 1 -TEST glusterd - -TEST $CLI volume create $V0 $H0:$L1 -TEST $CLI volume start $V0 - -# Mount the volume as both fuse and nfs mount -EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available -TEST glusterfs -s $H0 --volfile-id $V0 $M0 -TEST mount_nfs $H0:/$V0 $N0 nolock - -# Create 2 user -user1=$(get_new_user) -create_user $user1 -user2=$(get_new_user) -create_user $user2 - -# create a file for which only user1 has access -echo "Test" > $M0/README -chown $user1 $M0/README -chmod 700 $M0/README - -# enable uss and take a snapshot -TEST $CLI volume set $V0 uss enable -TEST $CLI snapshot config activate-on-create on -TEST $CLI snapshot create snap1 $V0 - -# try to access the file using user1 account. -# It should succeed with both normal mount and snapshot world. -# There is time delay in which snapd might not have got the notification -# from glusterd about snapshot create hence using "EXPECT_WITHIN" -EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" check_if_permitted $user1 $M0/README cat -EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" check_if_permitted $user1 $N0/README cat -EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" check_if_permitted $user1 $M0/.snaps/snap1/README cat -EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" check_if_permitted $user1 $N0/.snaps/snap1/README cat - - -# try to access the file using user2 account -# It should fail from both normal mount and snapshot world -EXPECT_WITHIN $PROCESS_UP_TIMEOUT "N" check_if_permitted $user2 $M0/README cat -EXPECT_WITHIN $PROCESS_UP_TIMEOUT "N" check_if_permitted $user2 $N0/README cat -EXPECT_WITHIN $PROCESS_UP_TIMEOUT "N" check_if_permitted $user2 $M0/.snaps/snap1/README cat -EXPECT_WITHIN $PROCESS_UP_TIMEOUT "N" check_if_permitted $user2 $N0/.snaps/snap1/README cat - -# We need to test another scenario where user belonging to one group -# tries to access files from user belonging to another group -# instead of using the already created users and making the test case look complex -# I thought of using two different users. - -# The test case written below does the following things -# 1) Create 2 users (user{3,4}), belonging to 2 different groups (group{3,4}) -# 2) Take a snapshot "snap2" -# 3) Create a file for which only users belonging to group3 have -# permission to read -# 4) Test various combinations of Read-Write, Fuse-NFS mount, User{3,4,5} -# from both normal mount, and USS world. - -echo "Test" > $M0/file3 - -chmod 740 $M0/file3 - -group3=$(get_new_user) -groupadd $group3 - -group4=$(get_new_user) -groupadd $group4 - -user3=$(get_new_user) -create_user $user3 $group3 - -user4=$(get_new_user) -create_user $user4 $group4 - -user5=$(get_new_user) -create_user $user5 - -chgrp $group3 $M0/file3 - -TEST $CLI snapshot create snap2 $V0 - -EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" check_if_permitted $user3 $M0/file3 cat -EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" check_if_permitted $user3 $M0/.snaps/snap2/file3 cat -EXPECT_WITHIN $PROCESS_UP_TIMEOUT "N" check_if_permitted $user3 $M0/file3 "echo Hello >" -EXPECT_WITHIN $PROCESS_UP_TIMEOUT "N" check_if_permitted $user3 $M0/.snaps/snap2/file3 "echo Hello >" -EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" check_if_permitted $user3 $N0/file3 cat -EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" check_if_permitted $user3 $N0/.snaps/snap2/file3 cat -EXPECT_WITHIN $PROCESS_UP_TIMEOUT "N" check_if_permitted $user3 $N0/file3 "echo Hello >" -EXPECT_WITHIN $PROCESS_UP_TIMEOUT "N" check_if_permitted $user3 $N0/.snaps/snap2/file3 "echo Hello >" - - -EXPECT_WITHIN $PROCESS_UP_TIMEOUT "N" check_if_permitted $user4 $M0/file3 cat -EXPECT_WITHIN $PROCESS_UP_TIMEOUT "N" check_if_permitted $user4 $M0/.snaps/snap2/file3 cat -EXPECT_WITHIN $PROCESS_UP_TIMEOUT "N" check_if_permitted $user4 $M0/file3 "echo Hello >" -EXPECT_WITHIN $PROCESS_UP_TIMEOUT "N" check_if_permitted $user4 $M0/.snaps/snap2/file3 "echo Hello >" -EXPECT_WITHIN $PROCESS_UP_TIMEOUT "N" check_if_permitted $user4 $N0/file3 cat -EXPECT_WITHIN $PROCESS_UP_TIMEOUT "N" check_if_permitted $user4 $N0/.snaps/snap2/file3 cat -EXPECT_WITHIN $PROCESS_UP_TIMEOUT "N" check_if_permitted $user4 $N0/file3 "echo Hello >" -EXPECT_WITHIN $PROCESS_UP_TIMEOUT "N" check_if_permitted $user4 $N0/.snaps/snap2/file3 "echo Hello >" - -EXPECT_WITHIN $PROCESS_UP_TIMEOUT "N" check_if_permitted $user5 $M0/file3 cat -EXPECT_WITHIN $PROCESS_UP_TIMEOUT "N" check_if_permitted $user5 $M0/.snaps/snap2/file3 cat -EXPECT_WITHIN $PROCESS_UP_TIMEOUT "N" check_if_permitted $user5 $M0/file3 "echo Hello >" -EXPECT_WITHIN $PROCESS_UP_TIMEOUT "N" check_if_permitted $user5 $M0/.snaps/snap2/file3 "echo Hello >" -EXPECT_WITHIN $PROCESS_UP_TIMEOUT "N" check_if_permitted $user5 $N0/file3 cat -EXPECT_WITHIN $PROCESS_UP_TIMEOUT "N" check_if_permitted $user5 $N0/.snaps/snap2/file3 cat -EXPECT_WITHIN $PROCESS_UP_TIMEOUT "N" check_if_permitted $user5 $N0/file3 "echo Hello >" -EXPECT_WITHIN $PROCESS_UP_TIMEOUT "N" check_if_permitted $user5 $N0/.snaps/snap2/file3 "echo Hello >" - -# cleanup -/usr/sbin/userdel -f -r $user1 -/usr/sbin/userdel -f -r $user2 -/usr/sbin/userdel -f -r $user3 -/usr/sbin/userdel -f -r $user4 -/usr/sbin/userdel -f -r $user5 - -#cleanup all the home directory which is created as part of this test case -if [ -d "$home_dir" ] -then - rm -rf $home_dir -fi - - -groupdel $group3 -groupdel $group4 - -TEST $CLI snapshot delete all - -cleanup; - - diff --git a/tests/bugs/bug-1168803-snapd-option-validation-fix.t b/tests/bugs/bug-1168803-snapd-option-validation-fix.t deleted file mode 100755 index e29cbe43db6..00000000000 --- a/tests/bugs/bug-1168803-snapd-option-validation-fix.t +++ /dev/null @@ -1,30 +0,0 @@ -#!/bin/bash - -## Test case for BZ-1168803 - snapd option validation should not fail if the -#snapd is not running - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc - -cleanup; - -## Start glusterd -TEST glusterd; -TEST pidof glusterd; - -## create volume -TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2}; -TEST $CLI volume set $V0 features.uss enable - -## Now set another volume option, this should not fail -TEST $CLI volume set $V0 features.file-snapshot on - -## start the volume -TEST $CLI volume start $V0 - -## Kill snapd daemon and then try to stop the volume which should not fail -kill $(ps aux | grep glusterfsd | grep snapd | awk '{print $2}') - -TEST $CLI volume stop $V0 - -cleanup; diff --git a/tests/bugs/bug-1168875.t b/tests/bugs/bug-1168875.t deleted file mode 100644 index 0a7476db87b..00000000000 --- a/tests/bugs/bug-1168875.t +++ /dev/null @@ -1,96 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc -. $(dirname $0)/../snapshot.rc -. $(dirname $0)/../fileio.rc -. $(dirname $0)/../nfs.rc - -cleanup; - -function check_entry_point_exists () -{ - local entry_point=$1; - local _path=$2; - - ls -a $_path | grep $entry_point; - - if [ $? -eq 0 ]; then - echo 'Y'; - else - echo 'N'; - fi -} - -TEST init_n_bricks 3; -TEST setup_lvm 3; - -TEST glusterd; - -TEST pidof glusterd; - -TEST $CLI volume create $V0 $H0:$L1 $H0:$L2 $H0:$L3; - -TEST $CLI volume start $V0; - -TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 --xlator-option *-snapview-client.snapdir-entry-path=/dir $M0; - -TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 $N0; -for i in {1..10} ; do echo "file" > $M0/file$i ; done - - -for i in {11..20} ; do echo "file" > $M0/file$i ; done - -mkdir $M0/dir; - -for i in {1..10} ; do echo "file" > $M0/dir/file$i ; done - -mkdir $M0/dir1; -mkdir $M0/dir2; - -for i in {1..10} ; do echo "foo" > $M0/dir1/foo$i ; done -for i in {1..10} ; do echo "foo" > $M0/dir2/foo$i ; done - -for i in {11..20} ; do echo "foo" > $M0/dir1/foo$i ; done -for i in {11..20} ; do echo "foo" > $M0/dir2/foo$i ; done - -TEST $CLI snapshot create snap1 $V0; -TEST $CLI snapshot activate snap1; - -TEST $CLI volume set $V0 features.uss enable; - -EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'Y' check_if_snapd_exist - -EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'N' check_entry_point_exists .snaps $M0/dir -EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'N' check_entry_point_exists .snaps $N0/dir - -EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'N' check_entry_point_exists .snaps $M0/dir1 -EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'N' check_entry_point_exists .snaps $N0/dir1 - -TEST $CLI volume set $V0 features.show-snapshot-directory enable; - -EXPECT_WITHIN $PROCESS_UP_TIMEOUT "0" STAT $M0/dir -EXPECT_WITHIN $PROCESS_UP_TIMEOUT "0" STAT $N0/dir -EXPECT_WITHIN $PROCESS_UP_TIMEOUT "0" STAT $M0/dir1 -EXPECT_WITHIN $PROCESS_UP_TIMEOUT "0" STAT $N0/dir1 - -EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'Y' check_entry_point_exists ".snaps" $M0/dir -EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'N' check_entry_point_exists ".snaps" $N0/dir - -EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'N' check_entry_point_exists ".snaps" $M0/dir1 -EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'N' check_entry_point_exists ".snaps" $N0/dir1 - -TEST $CLI volume set $V0 features.show-snapshot-directory disable; - -EXPECT_WITHIN $PROCESS_UP_TIMEOUT "0" STAT $M0/dir -EXPECT_WITHIN $PROCESS_UP_TIMEOUT "0" STAT $N0/dir -EXPECT_WITHIN $PROCESS_UP_TIMEOUT "0" STAT $M0/dir1 -EXPECT_WITHIN $PROCESS_UP_TIMEOUT "0" STAT $N0/dir1 - -EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'N' check_entry_point_exists ".snaps" $M0/dir -EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'N' check_entry_point_exists ".snaps" $N0/dir - -EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'N' check_entry_point_exists ".snaps" $M0/dir1 -EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'N' check_entry_point_exists ".snaps" $N0/dir1 - -cleanup; diff --git a/tests/bugs/bug-1173414-mgmt-v3-remote-lock-failure.t b/tests/bugs/bug-1173414-mgmt-v3-remote-lock-failure.t deleted file mode 100755 index adc3fe30dd4..00000000000 --- a/tests/bugs/bug-1173414-mgmt-v3-remote-lock-failure.t +++ /dev/null @@ -1,34 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../cluster.rc - -function check_peers { - $CLI_1 peer status | grep 'Peer in Cluster (Connected)' | wc -l -} - -cleanup; - -TEST launch_cluster 2; -TEST $CLI_1 peer probe $H2; - -EXPECT_WITHIN $PROBE_TIMEOUT 1 check_peers - -TEST $CLI_1 volume create $V0 $H1:$B1/$V0 -TEST $CLI_1 volume create $V1 $H1:$B1/$V1 -TEST $CLI_1 volume start $V0 -TEST $CLI_1 volume start $V1 - -for i in {1..20} -do - $CLI_1 volume set $V0 diagnostics.client-log-level DEBUG & - $CLI_1 volume set $V1 barrier on - $CLI_2 volume set $V0 diagnostics.client-log-level DEBUG & - $CLI_2 volume set $V1 barrier on -done - -EXPECT_WITHIN $PROBE_TIMEOUT 1 check_peers -TEST $CLI_1 volume status -TEST $CLI_2 volume status - -cleanup; diff --git a/tests/bugs/bug-1178079.t b/tests/bugs/bug-1178079.t deleted file mode 100644 index 3ee4f730f3b..00000000000 --- a/tests/bugs/bug-1178079.t +++ /dev/null @@ -1,24 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc - -cleanup; - -#Create a volume -TEST glusterd; -TEST pidof glusterd; -TEST $CLI volume info; - -TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1..2}; -TEST $CLI volume start $V0; - -TEST $CLI volume set $V0 features.uss on; - -TEST glusterfs -s $H0 --volfile-id $V0 $M0; - -TEST touch $M0/file; - -TEST getfattr -d -m . -e hex $M0/file; - -cleanup; diff --git a/tests/bugs/bug-762989.t b/tests/bugs/bug-762989.t deleted file mode 100755 index fb256717626..00000000000 --- a/tests/bugs/bug-762989.t +++ /dev/null @@ -1,40 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc - -cleanup; - -# Skip the entire test if ip_local_reserved_ports does not exist -if [ ! -f /proc/sys/net/ipv4/ip_local_reserved_ports ] ; then - echo "Skip test on /proc/sys/net/ipv4/ip_local_reserved_ports, "\ - "which does not exists on this system" >&2 - SKIP_TESTS - exit 0 -fi - -## reserve port 1023 -older_ports=$(cat /proc/sys/net/ipv4/ip_local_reserved_ports); -echo "1023" > /proc/sys/net/ipv4/ip_local_reserved_ports; - -## Start and create a volume -TEST glusterd -TEST pidof glusterd -TEST $CLI volume info; - -TEST $CLI volume create $V0 replica 2 stripe 2 $H0:$B0/${V0}{1,2,3,4,5,6,7,8}; - -TEST $CLI volume start $V0; - -TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id $V0 \ -$M0; - -## Wait for volume to register with rpc.mountd -sleep 6; -## check if port 1023 (which has been reserved) is used by the gluster processes -op=$(netstat -ntp | grep gluster | grep -w 1023); -EXPECT "" echo $op; - -#set the reserved ports to the older values -echo $older_ports > /proc/sys/net/ipv4/ip_local_reserved_ports - -cleanup; diff --git a/tests/bugs/bug-764638.t b/tests/bugs/bug-764638.t deleted file mode 100644 index 81654652400..00000000000 --- a/tests/bugs/bug-764638.t +++ /dev/null @@ -1,13 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc - -cleanup; - -TEST glusterd -TEST pidof glusterd - -TEST $CLI pool list; -TEST $CLI pool list --xml; - -cleanup; diff --git a/tests/bugs/bug-765230.t b/tests/bugs/bug-765230.t deleted file mode 100755 index 2012be5ad07..00000000000 --- a/tests/bugs/bug-765230.t +++ /dev/null @@ -1,60 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc - -cleanup; - -## Start and create a volume -TEST glusterd; -TEST pidof glusterd; -TEST $CLI volume create $V0 replica 2 stripe 2 $H0:$B0/${V0}{1,2,3,4,5,6,7,8}; - -## Verify volume is created -EXPECT "$V0" volinfo_field $V0 'Volume Name'; -EXPECT 'Created' volinfo_field $V0 'Status'; - -## Start volume and verify -TEST $CLI volume start $V0; -EXPECT 'Started' volinfo_field $V0 'Status'; - -## Setting quota-timeout as 20 -TEST ! $CLI volume set $V0 features.quota-timeout 20 -EXPECT '' volinfo_field $V0 'features.quota-timeout'; - -## Enabling features.quota-deem-statfs -TEST ! $CLI volume set $V0 features.quota-deem-statfs on -EXPECT '' volinfo_field $V0 'features.quota-deem-statfs' - -## Enabling quota -TEST $CLI volume quota $V0 enable -EXPECT 'on' volinfo_field $V0 'features.quota' - -## Setting quota-timeout as 20 -TEST $CLI volume set $V0 features.quota-timeout 20 -EXPECT '20' volinfo_field $V0 'features.quota-timeout'; - -## Enabling features.quota-deem-statfs -TEST $CLI volume set $V0 features.quota-deem-statfs on -EXPECT 'on' volinfo_field $V0 'features.quota-deem-statfs' - -## Disabling quota -TEST $CLI volume quota $V0 disable -EXPECT 'off' volinfo_field $V0 'features.quota' - -## Setting quota-timeout as 30 -TEST ! $CLI volume set $V0 features.quota-timeout 30 -EXPECT '20' volinfo_field $V0 'features.quota-timeout'; - -## Disabling features.quota-deem-statfs -TEST ! $CLI volume set $V0 features.quota-deem-statfs off -EXPECT 'on' volinfo_field $V0 'features.quota-deem-statfs' - -## Finish up -TEST $CLI volume stop $V0; -EXPECT 'Stopped' volinfo_field $V0 'Status'; - -TEST $CLI volume delete $V0; -TEST ! $CLI volume info $V0; - -cleanup; diff --git a/tests/bugs/bug-765380.t b/tests/bugs/bug-765380.t deleted file mode 100644 index cf580c4a811..00000000000 --- a/tests/bugs/bug-765380.t +++ /dev/null @@ -1,39 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc - -cleanup; - -TEST glusterd -TEST pidof glusterd - -REPLICA=2 - -TEST $CLI volume create $V0 replica $REPLICA $H0:$B0/${V0}00 $H0:$B0/${V0}01 $H0:$B0/${V0}10 $H0:$B0/${V0}11 -TEST $CLI volume start $V0 - -## Mount FUSE with caching disabled -TEST $GFS -s $H0 --volfile-id $V0 $M0; - -function count_hostname_or_uuid_from_pathinfo() -{ - pathinfo=$(getfattr -n trusted.glusterfs.pathinfo $M0/f00f) - echo $pathinfo | grep -o $1 | wc -l -} - -TEST touch $M0/f00f - -EXPECT $REPLICA count_hostname_or_uuid_from_pathinfo $H0 - -# turn on node-uuid-pathinfo option -TEST $CLI volume set $V0 node-uuid-pathinfo on - -# do not expext hostname as part of the pathinfo string -EXPECT 0 count_hostname_or_uuid_from_pathinfo $H0 - -uuid=$(grep UUID $GLUSTERD_WORKDIR/glusterd.info | cut -f2 -d=) - -# ... but expect the uuid $REPLICA times -EXPECT $REPLICA count_hostname_or_uuid_from_pathinfo $uuid - -cleanup; diff --git a/tests/bugs/bug-765473.t b/tests/bugs/bug-765473.t deleted file mode 100755 index cf2588256f4..00000000000 --- a/tests/bugs/bug-765473.t +++ /dev/null @@ -1,35 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc -. $(dirname $0)/../fileio.rc - -cleanup; - -function clients_connected() -{ - volname=$1 - gluster volume status $volname clients | grep -i 'Clients connected' | sed -e 's/[^0-9]*\(.*\)/\1/g' -} - -## Start and create a volume -TEST glusterd; -TEST pidof glusterd; -TEST $CLI volume create $V0 $H0:$B0/${V0}1 -TEST $CLI volume start $V0; - -TEST glusterfs --direct-io-mode=yes --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id $V0 $M0; - -TEST fd=`fd_available` -TEST fd_open $fd 'w' "$M0/testfile" -TEST fd_write $fd "content" -TEST $CLI volume stop $V0 -# write some content which will result in marking fd bad -fd_write $fd "more content" -sync $V0 -TEST $CLI volume start $V0 -EXPECT 'Started' volinfo_field $V0 'Status'; -EXPECT_WITHIN $PROCESS_UP_TIMEOUT 2 clients_connected $V0 -TEST ! fd_write $fd "still more content" - -cleanup diff --git a/tests/bugs/bug-765564.t b/tests/bugs/bug-765564.t deleted file mode 100644 index fa8ead7f4f2..00000000000 --- a/tests/bugs/bug-765564.t +++ /dev/null @@ -1,86 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc - -cleanup; - -TEST glusterd -TEST pidof glusterd - -## Start and create a volume -mkdir -p ${B0}/${V0}-0 -mkdir -p ${B0}/${V0}-1 -TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}-{0,1} - -TEST $CLI volume set $V0 performance.io-cache off; -TEST $CLI volume set $V0 performance.write-behind off; -TEST $CLI volume set $V0 performance.stat-prefetch off - -TEST $CLI volume start $V0; - -## Mount native -TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 $M0 - -#returns success if 'olddir' is absent -#'olddir' must be absent in both replicas -function rm_succeeded () { - local dir1=$1 - [[ -d $H0:$B0/${V0}-0/$dir1 || -d $H0:$B0/${V0}-1/$dir1 ]] && return 0 - return 1 -} - -# returns successes if 'newdir' is present -#'newdir' must be present in both replicas -function mv_succeeded () { - local dir1=$1 - [[ -d $H0:$B0/${V0}-0/$dir1 && -d $H0:$B0/${V0}-1/$dir1 ]] && return 1 - return 0 -} - -# returns zero on success -# Only one of rm and mv can succeed. This is captured by the XOR below - -function chk_backend_consistency(){ - local dir1=$1 - local dir2=$2 - local rm_status=rm_succeeded $dir1 - local mv_status=mv_succeeded $dir2 - [[ ( $rm_status && ! $mv_status ) || ( ! $rm_status && $mv_status ) ]] && return 0 - return 1 -} - -#concurrent removal/rename of dirs -function rm_mv_correctness () { - ret=0 - for i in {1..100}; do - mkdir $M0/"dir"$i - rmdir $M0/"dir"$i & - mv $M0/"dir"$i $M0/"adir"$i & - wait - tmp_ret=$(chk_backend_consistency "dir"$i "adir"$i) - (( ret += tmp_ret )) - rm -rf $M0/"dir"$i - rm -rf $M0/"adir"$i - done - return $ret -} - -TEST touch $M0/a; -TEST mv $M0/a $M0/b; - -#test rename fop when one of the bricks is down -kill_brick ${V0} ${H0} ${B0}/${V0}-1; -TEST touch $M0/h; -TEST mv $M0/h $M0/1; - -TEST $CLI volume start $V0 force; - -EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1; -find $M0 2>/dev/null 1>/dev/null; -find $M0 | xargs stat 2>/dev/null 1>/dev/null; - -TEST rm_mv_correctness; -EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0 -cleanup; - diff --git a/tests/bugs/bug-767095.t b/tests/bugs/bug-767095.t deleted file mode 100755 index 82212c72d6e..00000000000 --- a/tests/bugs/bug-767095.t +++ /dev/null @@ -1,51 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc - -cleanup; - -## Start and create a volume -TEST glusterd; -TEST pidof glusterd; -TEST $CLI volume info; - -TEST $CLI volume create $V0 replica 2 stripe 2 $H0:$B0/${V0}{1,2,3,4,5,6,7,8}; - -function volinfo_field() -{ - local vol=$1; - local field=$2; - - $CLI volume info $vol | grep "^$field: " | sed 's/.*: //'; -} - -dump_dir='/tmp/gerrit_glusterfs' -TEST mkdir -p $dump_dir; -## Verify volume is is created -EXPECT "$V0" volinfo_field $V0 'Volume Name'; -EXPECT 'Created' volinfo_field $V0 'Status'; -TEST $CLI volume set $V0 error-gen posix; -TEST $CLI volume set $V0 server.statedump-path $dump_dir; - -## Start volume and verify -TEST $CLI volume start $V0; -EXPECT 'Started' volinfo_field $V0 'Status'; - -TEST PID=`gluster --xml volume status patchy | grep -A 5 patchy1 | grep '' | cut -d '>' -f 2 | cut -d '<' -f 1` -TEST kill -USR1 $PID; -sleep 2; -for file_name in $(ls $dump_dir) -do - TEST grep "error-gen.priv" $dump_dir/$file_name; -done - -## Finish up -TEST $CLI volume stop $V0; -EXPECT 'Stopped' volinfo_field $V0 'Status'; - -TEST $CLI volume delete $V0; -TEST ! $CLI volume info $V0; - -TEST rm -rf $dump_dir; - -cleanup; diff --git a/tests/bugs/bug-767585-gfid.t b/tests/bugs/bug-767585-gfid.t deleted file mode 100755 index 41043a0b247..00000000000 --- a/tests/bugs/bug-767585-gfid.t +++ /dev/null @@ -1,42 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc - -#Test cases to perform gfid-self-heal -#file 'a' should be assigned a fresh gfid -#file 'b' should be healed with gfid1 from brick1 -#file 'c' should be healed with gfid2 from brick2 - -gfid1="0x8428b7193a764bf8be8046fb860b8993" -gfid2="0x85ad91afa2f74694bf52c3326d048209" - -cleanup; -TEST glusterd -TEST pidof glusterd -TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}0 $H0:$B0/${V0}1 -TEST $CLI volume start $V0 -TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0 --direct-io-mode=enable -touch $B0/${V0}0/a $B0/${V0}1/a -touch $B0/${V0}0/b $B0/${V0}1/b -touch $B0/${V0}0/c $B0/${V0}1/c - -TEST setfattr -n trusted.gfid -v $gfid1 $B0/${V0}0/b -TEST setfattr -n trusted.gfid -v $gfid2 $B0/${V0}1/c - -sleep 2 - -TEST stat $M0/a -TEST stat $M0/b -TEST stat $M0/c - -TEST gf_get_gfid_xattr $B0/${V0}0/a -TEST gf_get_gfid_xattr $B0/${V0}1/a - -EXPECT "$gfid1" gf_get_gfid_xattr $B0/${V0}0/b -EXPECT "$gfid1" gf_get_gfid_xattr $B0/${V0}1/b - -EXPECT "$gfid2" gf_get_gfid_xattr $B0/${V0}0/c -EXPECT "$gfid2" gf_get_gfid_xattr $B0/${V0}1/c - -cleanup; diff --git a/tests/bugs/bug-770655.t b/tests/bugs/bug-770655.t deleted file mode 100755 index 945e323bbc8..00000000000 --- a/tests/bugs/bug-770655.t +++ /dev/null @@ -1,168 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc - -cleanup; - -## Start and create a distribute-replicate volume -TEST glusterd; -TEST pidof glusterd; -TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2,3,4,5,6,7,8}; - -## Verify volume is created -EXPECT "$V0" volinfo_field $V0 'Volume Name'; -EXPECT 'Created' volinfo_field $V0 'Status'; -EXPECT 'Distributed-Replicate' volinfo_field $V0 'Type'; - -## Start volume and verify -TEST $CLI volume start $V0; -EXPECT 'Started' volinfo_field $V0 'Status'; - -## Setting stripe-block-size as 10MB -TEST ! $CLI volume set $V0 stripe-block-size 10MB -EXPECT '' volinfo_field $V0 'cluster.stripe-block-size'; - -## Finish up -TEST $CLI volume stop $V0; -EXPECT 'Stopped' volinfo_field $V0 'Status'; - -TEST $CLI volume delete $V0; -TEST ! $CLI volume info $V0; - -cleanup; - -## Start and create a replicate volume -TEST glusterd; -TEST pidof glusterd; -TEST $CLI volume create $V0 replica 8 $H0:$B0/${V0}{1,2,3,4,5,6,7,8}; - -## Verify volume is created -EXPECT "$V0" volinfo_field $V0 'Volume Name'; -EXPECT 'Created' volinfo_field $V0 'Status'; -EXPECT 'Replicate' volinfo_field $V0 'Type'; - -## Start volume and verify -TEST $CLI volume start $V0; -EXPECT 'Started' volinfo_field $V0 'Status'; - -## Setting stripe-block-size as 10MB -TEST ! $CLI volume set $V0 stripe-block-size 10MB -EXPECT '' volinfo_field $V0 'cluster.stripe-block-size'; - -## Finish up -TEST $CLI volume stop $V0; -EXPECT 'Stopped' volinfo_field $V0 'Status'; - -TEST $CLI volume delete $V0; -TEST ! $CLI volume info $V0; - -cleanup; - -## Start and create a distribute volume -TEST glusterd; -TEST pidof glusterd; -TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2,3,4,5,6,7,8}; - -## Verify volume is created -EXPECT "$V0" volinfo_field $V0 'Volume Name'; -EXPECT 'Created' volinfo_field $V0 'Status'; -EXPECT 'Distribute' volinfo_field $V0 'Type'; - -## Start volume and verify -TEST $CLI volume start $V0; -EXPECT 'Started' volinfo_field $V0 'Status'; - -## Setting stripe-block-size as 10MB -TEST ! $CLI volume set $V0 stripe-block-size 10MB -EXPECT '' volinfo_field $V0 'cluster.stripe-block-size'; - -## Finish up -TEST $CLI volume stop $V0; -EXPECT 'Stopped' volinfo_field $V0 'Status'; - -TEST $CLI volume delete $V0; -TEST ! $CLI volume info $V0; - -cleanup; - -## Start and create a stripe volume -TEST glusterd; -TEST pidof glusterd; -TEST $CLI volume create $V0 stripe 8 $H0:$B0/${V0}{1,2,3,4,5,6,7,8}; - -## Verify volume is created -EXPECT "$V0" volinfo_field $V0 'Volume Name'; -EXPECT 'Created' volinfo_field $V0 'Status'; -EXPECT 'Stripe' volinfo_field $V0 'Type'; - -## Start volume and verify -TEST $CLI volume start $V0; -EXPECT 'Started' volinfo_field $V0 'Status'; - -## Setting stripe-block-size as 10MB -TEST $CLI volume set $V0 stripe-block-size 10MB -EXPECT '10MB' volinfo_field $V0 'cluster.stripe-block-size'; - -## Finish up -TEST $CLI volume stop $V0; -EXPECT 'Stopped' volinfo_field $V0 'Status'; - -TEST $CLI volume delete $V0; -TEST ! $CLI volume info $V0; - -cleanup; - -## Start and create a distributed stripe volume -TEST glusterd; -TEST pidof glusterd; -TEST $CLI volume create $V0 stripe 4 $H0:$B0/${V0}{1,2,3,4,5,6,7,8}; - -## Verify volume is created -EXPECT "$V0" volinfo_field $V0 'Volume Name'; -EXPECT 'Created' volinfo_field $V0 'Status'; -EXPECT 'Distributed-Stripe' volinfo_field $V0 'Type'; - -## Start volume and verify -TEST $CLI volume start $V0; -EXPECT 'Started' volinfo_field $V0 'Status'; - -## Setting stripe-block-size as 10MB -TEST $CLI volume set $V0 stripe-block-size 10MB -EXPECT '10MB' volinfo_field $V0 'cluster.stripe-block-size'; - -## Finish up -TEST $CLI volume stop $V0; -EXPECT 'Stopped' volinfo_field $V0 'Status'; - -TEST $CLI volume delete $V0; -TEST ! $CLI volume info $V0; - -cleanup; - -## Start and create a distributed stripe replicate volume -TEST glusterd; -TEST pidof glusterd; -TEST $CLI volume create $V0 stripe 2 replica 2 $H0:$B0/${V0}{1,2,3,4,5,6,7,8}; - -## Verify volume is created -EXPECT "$V0" volinfo_field $V0 'Volume Name'; -EXPECT 'Created' volinfo_field $V0 'Status'; -EXPECT 'Distributed-Striped-Replicate' volinfo_field $V0 'Type'; - -## Start volume and verify -TEST $CLI volume start $V0; -EXPECT 'Started' volinfo_field $V0 'Status'; - -## Setting stripe-block-size as 10MB -TEST $CLI volume set $V0 stripe-block-size 10MB -EXPECT '10MB' volinfo_field $V0 'cluster.stripe-block-size'; - -## Finish up -TEST $CLI volume stop $V0; -EXPECT 'Stopped' volinfo_field $V0 'Status'; - -TEST $CLI volume delete $V0; -TEST ! $CLI volume info $V0; - -cleanup; diff --git a/tests/bugs/bug-782095.t b/tests/bugs/bug-782095.t deleted file mode 100755 index a0cea14ee86..00000000000 --- a/tests/bugs/bug-782095.t +++ /dev/null @@ -1,48 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc - -cleanup; - -## Start and create a volume -TEST glusterd; -TEST pidof glusterd; -TEST $CLI volume create $V0 replica 2 stripe 2 $H0:$B0/${V0}{1,2,3,4,5,6,7,8}; - -## Verify volume is is created -EXPECT "$V0" volinfo_field $V0 'Volume Name'; -EXPECT 'Created' volinfo_field $V0 'Status'; - -## Start volume and verify -TEST $CLI volume start $V0; -EXPECT 'Started' volinfo_field $V0 'Status'; - -## Setting performance cache min size as 2MB -TEST $CLI volume set $V0 performance.cache-min-file-size 2MB -EXPECT '2MB' volinfo_field $V0 'performance.cache-min-file-size'; - -## Setting performance cache max size as 20MB -TEST $CLI volume set $V0 performance.cache-max-file-size 20MB -EXPECT '20MB' volinfo_field $V0 'performance.cache-max-file-size'; - -## Trying to set performance cache min size as 25MB -TEST ! $CLI volume set $V0 performance.cache-min-file-size 25MB -EXPECT '2MB' volinfo_field $V0 'performance.cache-min-file-size'; - -## Able to set performance cache min size as long as its lesser than max size -TEST $CLI volume set $V0 performance.cache-min-file-size 15MB -EXPECT '15MB' volinfo_field $V0 'performance.cache-min-file-size'; - -## Trying it out with only cache-max-file-size in CLI as 10MB -TEST ! $CLI volume set $V0 cache-max-file-size 10MB -EXPECT '20MB' volinfo_field $V0 'performance.cache-max-file-size'; - -## Finish up -TEST $CLI volume stop $V0; -EXPECT 'Stopped' volinfo_field $V0 'Status'; - -TEST $CLI volume delete $V0; -TEST ! $CLI volume info $V0; - -cleanup; diff --git a/tests/bugs/bug-797171.t b/tests/bugs/bug-797171.t deleted file mode 100755 index d29c4bd7a2a..00000000000 --- a/tests/bugs/bug-797171.t +++ /dev/null @@ -1,41 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc - -cleanup; - -TEST glusterd -TEST pidof glusterd -TEST $CLI volume info; - -TEST $CLI volume create $V0 $H0:$B0/brick1; -TEST $CLI volume set $V0 debug.trace marker; -TEST $CLI volume set $V0 debug.log-history on - -TEST $CLI volume start $V0; - -TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id $V0 \ -$M0; - -touch $M0/{1..22}; -rm -f $M0/*; - -pid_file=$(ls $GLUSTERD_WORKDIR/vols/$V0/run); -brick_pid=$(cat $GLUSTERD_WORKDIR/vols/$V0/run/$pid_file); - -mkdir $statedumpdir/statedump_tmp/; -echo "path=$statedumpdir/statedump_tmp" > $statedumpdir/glusterdump.options; -echo "all=yes" >> $statedumpdir/glusterdump.options; - -TEST $CLI volume statedump $V0 history; - -file_name=$(ls $statedumpdir/statedump_tmp); -TEST grep "xlator.debug.trace.history" $statedumpdir/statedump_tmp/$file_name; - -EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0 - -rm -rf $statedumpdir/statedump_tmp; -rm -f $statedumpdir/glusterdump.options; - -cleanup; diff --git a/tests/bugs/bug-802417.t b/tests/bugs/bug-802417.t deleted file mode 100755 index d8f9ca26148..00000000000 --- a/tests/bugs/bug-802417.t +++ /dev/null @@ -1,108 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc - -function write_file() -{ - path="$1"; shift - echo "$*" > "$path" -} - -cleanup; -TEST glusterd -TEST pidof glusterd -TEST $CLI volume info; - -## Start and create a volume -mkdir -p ${B0}/${V0}-0 -mkdir -p ${B0}/${V0}-1 -mkdir -p ${B0}/${V0}-2 -TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}-{0,1,2} - -## Verify volume is created -EXPECT "$V0" volinfo_field $V0 'Volume Name'; -EXPECT 'Created' volinfo_field $V0 'Status'; - -## Make sure io-cache and write-behind don't interfere. -TEST $CLI volume set $V0 performance.io-cache off; -TEST $CLI volume set $V0 performance.write-behind off; -TEST $CLI volume set $V0 performance.stat-prefetch off - -## Make sure automatic self-heal doesn't perturb our results. -TEST $CLI volume set $V0 cluster.self-heal-daemon off -TEST $CLI volume set $V0 cluster.data-self-heal on -TEST $CLI volume set $V0 cluster.background-self-heal-count 0 - -## Start volume and verify -TEST $CLI volume start $V0; -EXPECT 'Started' volinfo_field $V0 'Status'; - -## Mount native -TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 $M0 - -## Create a file with some recognizably stale data. -TEST write_file $M0/a_file "old_data" - -## Kill two of the bricks and write some newer data. -TEST kill_brick ${V0} ${H0} ${B0}/${V0}-1 -TEST kill_brick ${V0} ${H0} ${B0}/${V0}-2 -TEST write_file $M0/a_file "new_data" - -## Bring all the bricks up and kill one so we do a partial self-heal. -TEST $CLI volume start $V0 force -EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0 -EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1 -EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 2 -TEST kill_brick ${V0} ${H0} ${B0}/${V0}-2 -TEST dd if=${M0}/a_file of=/dev/null - - -obs_path_0=${B0}/${V0}-0/a_file -obs_path_1=${B0}/${V0}-1/a_file -obs_path_2=${B0}/${V0}-2/a_file - -tgt_xattr_0="trusted.afr.${V0}-client-0" -tgt_xattr_1="trusted.afr.${V0}-client-1" -tgt_xattr_2="trusted.afr.${V0}-client-2" - -actual=$(afr_get_changelog_xattr $obs_path_0 $tgt_xattr_0) -EXPECT "0x000000000000000000000000|^\$" echo $actual - -actual=$(afr_get_changelog_xattr $obs_path_0 $tgt_xattr_1) -EXPECT "0x000000000000000000000000|^\$" echo $actual - -actual=$(afr_get_changelog_xattr $obs_path_0 $tgt_xattr_2) -EXPECT "0x000000030000000000000000" echo $actual - -actual=$(afr_get_changelog_xattr $obs_path_1 $tgt_xattr_0) -EXPECT "0x000000000000000000000000|^\$" echo $actual - -actual=$(afr_get_changelog_xattr $obs_path_1 $tgt_xattr_1) -EXPECT "0x000000000000000000000000|^\$" echo $actual - -actual=$(afr_get_changelog_xattr $obs_path_1 $tgt_xattr_2) -EXPECT "0x000000010000000000000000" echo $actual - -actual=$(afr_get_changelog_xattr $obs_path_2 $tgt_xattr_0) -EXPECT "0x000000000000000000000000|^\$" echo $actual - -actual=$(afr_get_changelog_xattr $obs_path_2 $tgt_xattr_1) -EXPECT "0x000000000000000000000000|^\$" echo $actual - -actual=$(afr_get_changelog_xattr $obs_path_2 $tgt_xattr_2) -EXPECT "0x000000000000000000000000|^\$" echo $actual - -if [ "$EXIT_EARLY" = "1" ]; then - exit 0; -fi - -## Finish up -EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0 -TEST $CLI volume stop $V0; -EXPECT 'Stopped' volinfo_field $V0 'Status'; - -TEST $CLI volume delete $V0; -TEST ! $CLI volume info $V0; - -cleanup; diff --git a/tests/bugs/bug-808400-dist.t b/tests/bugs/bug-808400-dist.t deleted file mode 100755 index d201b0424f5..00000000000 --- a/tests/bugs/bug-808400-dist.t +++ /dev/null @@ -1,32 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc - -cleanup; - -TEST glusterd -TEST pidof glusterd -TEST $CLI volume info; - -TEST $CLI volume create $V0 $H0:$B0/brick1 $H0:$B0/brick2; -EXPECT 'Created' volinfo_field $V0 'Status'; - -TEST $CLI volume start $V0; -EXPECT 'Started' volinfo_field $V0 'Status'; - -MOUNTDIR=$M0; -TEST glusterfs --entry-timeout=0 --attribute-timeout=0 --volfile-server=$H0 --volfile-id=$V0 $MOUNTDIR; - -build_tester $(dirname $0)/bug-808400-flock.c -build_tester $(dirname $0)/bug-808400-fcntl.c - -TEST $(dirname $0)/bug-808400-flock $MOUNTDIR/testfile \'gluster volume set $V0 performance.write-behind off\' -TEST $(dirname $0)/bug-808400-fcntl $MOUNTDIR/testfile \'gluster volume set $V0 performance.write-behind on\' - -TEST rm -rf $MOUNTDIR/* -TEST rm -rf $(dirname $0)/bug-808400-flock $(dirname $0)/bug-808400-fcntl $(dirname $0)/glusterfs.log - -EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $MOUNTDIR - -cleanup; diff --git a/tests/bugs/bug-808400-fcntl.c b/tests/bugs/bug-808400-fcntl.c deleted file mode 100644 index 87a83f317b8..00000000000 --- a/tests/bugs/bug-808400-fcntl.c +++ /dev/null @@ -1,117 +0,0 @@ -#include -#include -#include -#include -#include -#include -#include -#include - -#ifndef linux -#define fstat64(fd, st) fstat(fd, st) -#endif - -int -run_child (char *filename) -{ - int fd = -1, ret = -1; - struct flock lock = {0, }; - int ppid = 0; - - fd = open (filename, O_RDWR); - if (fd < 0) { - fprintf (stderr, "open failed (%s)\n", strerror (errno)); - goto out; - } - - ppid = getppid (); - - lock.l_type = F_WRLCK; - lock.l_whence = SEEK_SET; - lock.l_start = 0; - lock.l_len = 0; - - ret = fcntl (fd, F_GETLK, &lock); - if (ret < 0) { - fprintf (stderr, "GETLK failed (%s)\n", strerror (errno)); - goto out; - } - - if ((lock.l_type == F_UNLCK) || - (ppid != lock.l_pid)) { - fprintf (stderr, "no locks present, though parent has held " - "one\n"); - ret = -1; - goto out; - } - - ret = 0; -out: - return ret; -} - -int -main (int argc, char *argv[]) -{ - int fd = -1, ret = -1, status = 0; - char *filename = NULL, *cmd = NULL; - struct stat stbuf = {0, }; - struct flock lock = {0, }; - - if (argc != 3) { - fprintf (stderr, "Usage: %s " - "\n", argv[0]); - goto out; - } - - filename = argv[1]; - cmd = argv[2]; - - fd = open (filename, O_RDWR | O_CREAT, 0); - if (fd < 0) { - fprintf (stderr, "open (%s) failed (%s)\n", filename, - strerror (errno)); - goto out; - } - - lock.l_type = F_WRLCK; - lock.l_whence = SEEK_SET; - lock.l_start = 0; - lock.l_len = 0; - - ret = fcntl (fd, F_SETLK, &lock); - if (ret < 0) { - fprintf (stderr, "fcntl failed (%s)\n", strerror (errno)); - goto out; - } - - system (cmd); - - /* wait till graph switch completes */ - ret = fstat64 (fd, &stbuf); - if (ret < 0) { - fprintf (stderr, "fstat64 failure (%s)\n", strerror (errno)); - goto out; - } - - sleep (10); - - /* By now old-graph would be disconnected and locks should be cleaned - * up if they are not migrated. Check that by trying to acquire a lock - * on a new fd opened by another process on same file. - */ - ret = fork (); - if (ret == 0) { - ret = run_child (filename); - } else { - wait (&status); - if (WIFEXITED(status)) { - ret = WEXITSTATUS(status); - } else { - ret = 0; - } - } - -out: - return ret; -} diff --git a/tests/bugs/bug-808400-flock.c b/tests/bugs/bug-808400-flock.c deleted file mode 100644 index bd2ce8cfb01..00000000000 --- a/tests/bugs/bug-808400-flock.c +++ /dev/null @@ -1,96 +0,0 @@ -#include -#include -#include -#include -#include -#include -#include -#include - -#ifndef linux -#define fstat64(fd, st) fstat(fd, st) -#endif - -int -run_child (char *filename) -{ - int fd = -1, ret = -1; - - fd = open (filename, O_RDWR); - if (fd < 0) { - fprintf (stderr, "open failed (%s)\n", strerror (errno)); - goto out; - } - - ret = flock (fd, LOCK_EX | LOCK_NB); - if ((ret == 0) || (errno != EWOULDBLOCK)) { - fprintf (stderr, "no locks present, though parent has held " - "one\n"); - ret = -1; - goto out; - } - - ret = 0; -out: - return ret; -} - -int -main (int argc, char *argv[]) -{ - int fd = -1, ret = -1, status = 0; - char *filename = NULL, *cmd = NULL; - struct stat stbuf = {0, }; - - if (argc != 3) { - fprintf (stderr, "Usage: %s " - "\n", argv[0]); - goto out; - } - - filename = argv[1]; - cmd = argv[2]; - - fd = open (filename, O_RDWR | O_CREAT, 0); - if (fd < 0) { - fprintf (stderr, "open (%s) failed (%s)\n", filename, - strerror (errno)); - goto out; - } - - ret = flock (fd, LOCK_EX); - if (ret < 0) { - fprintf (stderr, "flock failed (%s)\n", strerror (errno)); - goto out; - } - - system (cmd); - - /* wait till graph switch completes */ - ret = fstat64 (fd, &stbuf); - if (ret < 0) { - fprintf (stderr, "fstat64 failure (%s)\n", strerror (errno)); - goto out; - } - - sleep (10); - - /* By now old-graph would be disconnected and locks should be cleaned - * up if they are not migrated. Check that by trying to acquire a lock - * on a new fd opened by another process on same file - */ - ret = fork (); - if (ret == 0) { - ret = run_child (filename); - } else { - wait (&status); - if (WIFEXITED(status)) { - ret = WEXITSTATUS(status); - } else { - ret = 0; - } - } - -out: - return ret; -} diff --git a/tests/bugs/bug-808400-repl.t b/tests/bugs/bug-808400-repl.t deleted file mode 100755 index d1e1c4977c4..00000000000 --- a/tests/bugs/bug-808400-repl.t +++ /dev/null @@ -1,31 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc - -cleanup; - -TEST glusterd -TEST pidof glusterd -TEST $CLI volume info; -TEST $CLI volume create $V0 replica 2 $H0:$B0/brick1 $H0:$B0/brick2; -EXPECT 'Created' volinfo_field $V0 'Status'; - -TEST $CLI volume start $V0; -EXPECT 'Started' volinfo_field $V0 'Status'; - -MOUNTDIR=$M0; -TEST glusterfs --entry-timeout=0 --attribute-timeout=0 --volfile-server=$H0 --volfile-id=$V0 $MOUNTDIR; - -build_tester $(dirname $0)/bug-808400-flock.c -build_tester $(dirname $0)/bug-808400-fcntl.c - -TEST $(dirname $0)/bug-808400-flock $MOUNTDIR/testfile \'gluster volume set $V0 performance.write-behind off\' -TEST $(dirname $0)/bug-808400-fcntl $MOUNTDIR/testfile \'gluster volume set $V0 performance.write-behind on\' - -TEST rm -rf $MOUNTDIR/* -TEST rm -rf $(dirname $0)/bug-808400-flock $(dirname $0)/bug-808400-fcntl $(dirname $0)/glusterfs.log - -EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $MOUNTDIR - -cleanup; diff --git a/tests/bugs/bug-808400-stripe.t b/tests/bugs/bug-808400-stripe.t deleted file mode 100755 index fce6b8a9348..00000000000 --- a/tests/bugs/bug-808400-stripe.t +++ /dev/null @@ -1,32 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc - -cleanup; - -TEST glusterd -TEST pidof glusterd -TEST $CLI volume info; - -TEST $CLI volume create $V0 stripe 2 $H0:$B0/brick1 $H0:$B0/brick2; -EXPECT 'Created' volinfo_field $V0 'Status'; - -TEST $CLI volume start $V0; -EXPECT 'Started' volinfo_field $V0 'Status'; - -MOUNTDIR=$M0; -TEST glusterfs --entry-timeout=0 --attribute-timeout=0 --volfile-server=$H0 --volfile-id=$V0 $MOUNTDIR; - -build_tester $(dirname $0)/bug-808400-flock.c -build_tester $(dirname $0)/bug-808400-fcntl.c - -TEST $(dirname $0)/bug-808400-flock $MOUNTDIR/testfile \'gluster volume set $V0 performance.write-behind off\' -TEST $(dirname $0)/bug-808400-fcntl $MOUNTDIR/testfile \'gluster volume set $V0 performance.write-behind on\' - -TEST rm -rf $MOUNTDIR/* -TEST rm -rf $(dirname $0)/bug-808400-flock $(dirname $0)/bug-808400-fcntl $(dirname $0)/glusterfs.log - -EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $MOUNTDIR - -cleanup; diff --git a/tests/bugs/bug-808400.t b/tests/bugs/bug-808400.t deleted file mode 100755 index 55881b92fa9..00000000000 --- a/tests/bugs/bug-808400.t +++ /dev/null @@ -1,35 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc - -cleanup; - -TEST glusterd -TEST pidof glusterd -TEST $CLI volume info; - -TEST $CLI volume create $V0 $H0:$B0/brick1; -EXPECT 'Created' volinfo_field $V0 'Status'; - -TEST $CLI volume start $V0; -EXPECT 'Started' volinfo_field $V0 'Status'; - -#mount on a random dir -TEST MOUNTDIR="/tmp/$RANDOM" -TEST mkdir $MOUNTDIR -TEST glusterfs --entry-timeout=0 --attribute-timeout=0 --volfile-server=$H0 --volfile-id=$V0 $MOUNTDIR; - -build_tester $(dirname $0)/bug-808400-flock.c -build_tester $(dirname $0)/bug-808400-fcntl.c - -TEST $(dirname $0)/bug-808400-flock $MOUNTDIR/testfile \'gluster volume set $V0 performance.write-behind off\' -TEST $(dirname $0)/bug-808400-fcntl $MOUNTDIR/testfile \'gluster volume set $V0 performance.write-behind on\' - -TEST rm -rf $MOUNTDIR/* -TEST rm -rf $(dirname $0)/bug-808400-flock $(dirname $0)/bug-808400-fcntl $(dirname $0)/glusterfs.log - -EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $MOUNTDIR -TEST rm -rf $MOUNTDIR - -cleanup; diff --git a/tests/bugs/bug-811493.t b/tests/bugs/bug-811493.t deleted file mode 100755 index 966995945cb..00000000000 --- a/tests/bugs/bug-811493.t +++ /dev/null @@ -1,18 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc - -cleanup; - -TEST glusterd -TEST pidof glusterd -TEST $CLI system uuid reset; - -uuid1=$(grep UUID $GLUSTERD_WORKDIR/glusterd.info | cut -f 2 -d "="); - -TEST $CLI system uuid reset; -uuid2=$(grep UUID $GLUSTERD_WORKDIR/glusterd.info | cut -f 2 -d "="); - -TEST [ $uuid1 != $uuid2 ] - -cleanup diff --git a/tests/bugs/bug-821056.t b/tests/bugs/bug-821056.t deleted file mode 100644 index 8c002601066..00000000000 --- a/tests/bugs/bug-821056.t +++ /dev/null @@ -1,52 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc - -cleanup; -TEST glusterd -TEST pidof glusterd - -TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1} -TEST $CLI volume set $V0 eager-lock off -TEST $CLI volume set $V0 cluster.self-heal-daemon off -TEST $CLI volume set $V0 performance.quick-read off -TEST $CLI volume set $V0 performance.open-behind off -TEST $CLI volume set $V0 performance.io-cache off -TEST $CLI volume set $V0 performance.write-behind on -TEST $CLI volume set $V0 performance.stat-prefetch off -TEST $CLI volume set $V0 performance.read-ahead off -TEST $CLI volume set $V0 cluster.background-self-heal-count 0 -TEST $CLI volume start $V0 -TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id=$V0 $M0 --direct-io-mode=enable -touch $M0/a - -#Open file with fd as 5 -exec 5>$M0/a -realpath=$(gf_get_gfid_backend_file_path $B0/${V0}0 "a") - -kill_brick $V0 $H0 $B0/${V0}0 -TEST $CLI volume start $V0 force -EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0 - -EXPECT "Y" gf_check_file_opened_in_brick $V0 $H0 $B0/${V0}0 "$realpath" - -kill_brick $V0 $H0 $B0/${V0}0 -TEST gf_rm_file_and_gfid_link $B0/${V0}0 "a" -TEST $CLI volume start $V0 force -EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0 -ls -l $M0/a 2>&1 > /dev/null #Make sure the file is re-created -EXPECT "N" gf_check_file_opened_in_brick $V0 $H0 $B0/${V0}0 "$realpath" -EXPECT "N" gf_check_file_opened_in_brick $V0 $H0 $B0/${V0}0 $B0/${V0}0/a - -for i in {1..1024}; do - echo "open sesame" >&5 -done - -EXPECT_WITHIN $REOPEN_TIMEOUT "Y" gf_check_file_opened_in_brick $V0 $H0 $B0/${V0}0 "$realpath" -#close the fd -exec 5>&- - -#Check that anon-fd based file is not leaking. -EXPECT_WITHIN $REOPEN_TIMEOUT "N" gf_check_file_opened_in_brick $V0 $H0 $B0/${V0}0 "$realpath" -cleanup; diff --git a/tests/bugs/bug-822830.t b/tests/bugs/bug-822830.t deleted file mode 100755 index b7a5704cdba..00000000000 --- a/tests/bugs/bug-822830.t +++ /dev/null @@ -1,64 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc - -cleanup; - -## Start and create a volume -TEST glusterd; -TEST pidof glusterd; -TEST $CLI volume create $V0 replica 2 stripe 2 $H0:$B0/${V0}{1,2,3,4,5,6,7,8}; - -## Verify volume is is created -EXPECT "$V0" volinfo_field $V0 'Volume Name'; -EXPECT 'Created' volinfo_field $V0 'Status'; - -## Start volume and verify -TEST $CLI volume start $V0; -EXPECT 'Started' volinfo_field $V0 'Status'; - -## Setting nfs.rpc-auth-reject as 192.{}.1.2 -TEST ! $CLI volume set $V0 nfs.rpc-auth-reject 192.{}.1.2 -EXPECT '' volinfo_field $V0 'nfs.rpc-auth-reject'; - -# Setting nfs.rpc-auth-allow as a.a. -TEST ! $CLI volume set $V0 nfs.rpc-auth-allow a.a. -EXPECT '' volinfo_field $V0 'nfs.rpc-auth-allow'; - -## Setting nfs.rpc-auth-reject as 192.*..* -TEST $CLI volume set $V0 nfs.rpc-auth-reject 192.*..* -EXPECT '192.*..*' volinfo_field $V0 'nfs.rpc-auth-reject'; - -# Setting nfs.rpc-auth-allow as a.a -TEST $CLI volume set $V0 nfs.rpc-auth-allow a.a -EXPECT 'a.a' volinfo_field $V0 'nfs.rpc-auth-allow'; - -# Setting nfs.rpc-auth-allow as *.redhat.com -TEST $CLI volume set $V0 nfs.rpc-auth-allow *.redhat.com -EXPECT '\*.redhat.com' volinfo_field $V0 'nfs.rpc-auth-allow'; - -# Setting nfs.rpc-auth-allow as 192.168.10.[1-5] -TEST $CLI volume set $V0 nfs.rpc-auth-allow 192.168.10.[1-5] -EXPECT '192.168.10.\[1-5]' volinfo_field $V0 'nfs.rpc-auth-allow'; - -# Setting nfs.rpc-auth-allow as 192.168.70.? -TEST $CLI volume set $V0 nfs.rpc-auth-allow 192.168.70.? -EXPECT '192.168.70.?' volinfo_field $V0 'nfs.rpc-auth-allow'; - -# Setting nfs.rpc-auth-reject as 192.168.10.5/16 -TEST $CLI volume set $V0 nfs.rpc-auth-reject 192.168.10.5/16 -EXPECT '192.168.10.5/16' volinfo_field $V0 'nfs.rpc-auth-reject'; - -## Setting nfs.rpc-auth-reject as 192.*.* -TEST $CLI volume set $V0 nfs.rpc-auth-reject 192.*.* -EXPECT '192.*.*' volinfo_field $V0 'nfs.rpc-auth-reject'; - -## Finish up -TEST $CLI volume stop $V0; -EXPECT 'Stopped' volinfo_field $V0 'Status'; - -TEST $CLI volume delete $V0; -TEST ! $CLI volume info $V0; - -cleanup; diff --git a/tests/bugs/bug-823081.t b/tests/bugs/bug-823081.t deleted file mode 100755 index e8630521b64..00000000000 --- a/tests/bugs/bug-823081.t +++ /dev/null @@ -1,41 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc - -cleanup; -cmd_log_history="cmd_history.log" -V1=patchy2 - -TEST glusterd -TEST pidof glusterd - -logdir=`gluster --print-logdir` -function set_tail () -{ - vol=$1; - tail_success="volume create $vol $H0:$B0/${vol}1 $H0:$B0/${vol}2 : SUCCESS" - tail_failure="volume create $vol $H0:$B0/${vol}1 $H0:$B0/${vol}2 : FAILED : Volume $vol already exists" - tail_success_force="volume create $vol $H0:$B0/${vol}1 $H0:$B0/${vol}2 force : SUCCESS" - tail_failure_force="volume create $vol $H0:$B0/${vol}1 $H0:$B0/${vol}2 force : FAILED : Volume $vol already exists" -} - -set_tail $V0; - -TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2}; -tail=`tail -n 1 $logdir/$cmd_log_history | cut -d " " -f 5-` -TEST [[ \"$tail\" == \"$tail_success\" ]] - -TEST ! $CLI volume create $V0 $H0:$B0/${V0}{1,2}; -tail=`tail -n 1 $logdir/$cmd_log_history | cut -d " " -f 5-` -TEST [[ \"$tail\" == \"$tail_failure\" ]] - -set_tail $V1; -TEST gluster volume create $V1 $H0:$B0/${V1}{1,2} force; -tail=`tail -n 1 $logdir/$cmd_log_history | cut -d " " -f 5-` -TEST [[ \"$tail\" == \"$tail_success_force\" ]] - -TEST ! gluster volume create $V1 $H0:$B0/${V1}{1,2} force; -tail=`tail -n 1 $logdir/$cmd_log_history | cut -d " " -f 5-` -TEST [[ \"$tail\" == \"$tail_failure_force\" ]] - -cleanup; diff --git a/tests/bugs/bug-824753-file-locker.c b/tests/bugs/bug-824753-file-locker.c deleted file mode 100644 index 903e23e0a1f..00000000000 --- a/tests/bugs/bug-824753-file-locker.c +++ /dev/null @@ -1,42 +0,0 @@ -#include -#include -#include - -int main (int argc, char *argv[]) -{ - int fd = -1; - int ret = -1; - char command[2048] = ""; - char filepath[255] = ""; - struct flock fl; - - fl.l_type = F_WRLCK; - fl.l_whence = SEEK_SET; - fl.l_start = 7; - fl.l_len = 1; - fl.l_pid = getpid(); - - snprintf(filepath, 255, "%s/%s", argv[4], argv[5]); - - fd = open(filepath, O_RDWR); - - if (fd == -1) - return -1; - - if (fcntl(fd, F_SETLKW, &fl) == -1) { - return -1; - } - - snprintf(command, sizeof(command), - "gluster volume clear-locks %s /%s kind all posix 0,7-1 |" - " grep %s | awk -F'..: ' '{print $1}' | grep %s:%s/%s", - argv[1], argv[5], argv[2], argv[2], argv[3], argv[1]); - - ret = system (command); - close(fd); - - if (ret) - return -1; - else - return 0; -} diff --git a/tests/bugs/bug-824753.t b/tests/bugs/bug-824753.t deleted file mode 100755 index 772219a424b..00000000000 --- a/tests/bugs/bug-824753.t +++ /dev/null @@ -1,45 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc - -cleanup; - -## Start and create a volume -TEST glusterd; -TEST pidof glusterd; -TEST $CLI volume info; - -TEST $CLI volume create $V0 replica 2 stripe 2 $H0:$B0/${V0}{1,2,3,4,5,6,7,8}; - -function volinfo_field() -{ - local vol=$1; - local field=$2; - - $CLI volume info $vol | grep "^$field: " | sed 's/.*: //'; -} - -## Verify volume is is created -EXPECT "$V0" volinfo_field $V0 'Volume Name'; -EXPECT 'Created' volinfo_field $V0 'Status'; - -## Start volume and verify -TEST $CLI volume start $V0; -EXPECT 'Started' volinfo_field $V0 'Status'; - -TEST glusterfs -s $H0 --volfile-id=$V0 $M0 -touch $M0/file1; - -TEST $CC -g $(dirname $0)/bug-824753-file-locker.c -o $(dirname $0)/file-locker - -TEST $(dirname $0)/file-locker $V0 $H0 $B0 $M0 file1 - -## Finish up -TEST rm -f $(dirname $0)/file-locker -TEST $CLI volume stop $V0; -EXPECT 'Stopped' volinfo_field $V0 'Status'; - -TEST $CLI volume delete $V0; -TEST ! $CLI volume info $V0; - -cleanup; diff --git a/tests/bugs/bug-830665.t b/tests/bugs/bug-830665.t deleted file mode 100755 index 2a84f9b6045..00000000000 --- a/tests/bugs/bug-830665.t +++ /dev/null @@ -1,120 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../nfs.rc -. $(dirname $0)/../volume.rc - -cleanup; - -function recreate { - rm -rf $1 && mkdir -p $1 -} - -TEST glusterd -TEST pidof glusterd -TEST $CLI volume info; - -## Start and create a volume -recreate ${B0}/${V0}-0 -recreate ${B0}/${V0}-1 -TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}-{0,1} - -function volinfo_field() -{ - local vol=$1; - local field=$2; - - $CLI volume info $vol | grep "^$field: " | sed 's/.*: //'; -} - -#EXPECT_WITHIN fails the test if the command it executes fails. This function -#returns "" when the file doesn't exist -function friendly_cat { - if [ ! -f $1 ]; - then - echo ""; - else - cat $1; - fi -} - -## Verify volume is created -EXPECT "$V0" volinfo_field $V0 'Volume Name'; -EXPECT 'Created' volinfo_field $V0 'Status'; - -## Make sure stat-prefetch doesn't prevent self-heal checks. -TEST $CLI volume set $V0 performance.stat-prefetch off; - -## Make sure automatic self-heal doesn't perturb our results. -TEST $CLI volume set $V0 cluster.self-heal-daemon off -TEST $CLI volume set $V0 cluster.background-self-heal-count 0 - -## Start volume and verify -TEST $CLI volume start $V0; -EXPECT 'Started' volinfo_field $V0 'Status'; - - -EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available; -## Mount NFS -TEST mount_nfs $H0:/$V0 $N0 nolock; - -## Create some files and directories -echo "test_data" > $N0/a_file; -mkdir $N0/a_dir; -echo "more_test_data" > $N0/a_dir/another_file; - -## Unmount and stop the volume. -EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $N0 -TEST $CLI volume stop $V0; - -# Recreate the brick. Note that because of http://review.gluster.org/#change,4202 -# we need to preserve and restore the volume ID or else the brick (and thus the -# entire not-very-HA-any-more volume) won't start. When that bug is fixed, we can -# remove the [gs]etxattr calls. -volid=$(getfattr -e hex -n trusted.glusterfs.volume-id $B0/${V0}-0 2> /dev/null \ - | grep = | cut -d= -f2) -rm -rf $B0/${V0}-0; -mkdir $B0/${V0}-0; -setfattr -n trusted.glusterfs.volume-id -v $volid $B0/${V0}-0 - -## Restart and remount. Note that we use actimeo=0 so that the stat calls -## we need for self-heal don't get blocked by the NFS client. -TEST $CLI volume start $V0; -EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available; -TEST mount_nfs $H0:/$V0 $N0 nolock,actimeo=0; - -## The Linux NFS client has a really charming habit of caching stuff right -## after mount, even though we set actimeo=0 above. Life would be much easier -## if NFS developers cared as much about correctness as they do about shaving -## a few seconds off of benchmarks. -ls -l $N0 &> /dev/null; -sleep 5; - -## Force entry self-heal. -TEST $CLI volume set $V0 cluster.self-heal-daemon on -EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status -EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0 -EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1 -TEST gluster volume heal $V0 full -#ls -lR $N0 > /dev/null; - -## Do NOT check through the NFS mount here. That will force a new self-heal -## check, but we want to test whether self-heal already happened. - -## Make sure everything's in order on the recreated brick. -EXPECT_WITHIN $HEAL_TIMEOUT 'test_data' friendly_cat $B0/${V0}-0/a_file; -EXPECT_WITHIN $HEAL_TIMEOUT 'more_test_data' friendly_cat $B0/${V0}-0/a_dir/another_file; - -if [ "$EXIT_EARLY" = "1" ]; then - exit 0; -fi - -## Finish up -EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $N0 -TEST $CLI volume stop $V0; -EXPECT 'Stopped' volinfo_field $V0 'Status'; - -TEST $CLI volume delete $V0; -TEST ! $CLI volume info $V0; - -cleanup; diff --git a/tests/bugs/bug-834465.c b/tests/bugs/bug-834465.c deleted file mode 100644 index 61d3deac077..00000000000 --- a/tests/bugs/bug-834465.c +++ /dev/null @@ -1,61 +0,0 @@ -#include -#include -#include -#include -#include -#include -#include - -int -main (int argc, char *argv[]) -{ - int fd = -1; - char *filename = NULL; - struct flock lock = {0, }; - int i = 0; - int ret = -1; - - if (argc != 2) { - fprintf (stderr, "Usage: %s ", argv[0]); - goto out; - } - - filename = argv[1]; - - fd = open (filename, O_RDWR | O_CREAT, 0); - if (fd < 0) { - fprintf (stderr, "open (%s) failed (%s)\n", filename, - strerror (errno)); - goto out; - } - - lock.l_type = F_WRLCK; - lock.l_whence = SEEK_SET; - lock.l_start = 1; - lock.l_len = 1; - - while (i < 100) { - lock.l_type = F_WRLCK; - ret = fcntl (fd, F_SETLK, &lock); - if (ret < 0) { - fprintf (stderr, "fcntl setlk failed (%s)\n", - strerror (errno)); - goto out; - } - - lock.l_type = F_UNLCK; - ret = fcntl (fd, F_SETLK, &lock); - if (ret < 0) { - fprintf (stderr, "fcntl setlk failed (%s)\n", - strerror (errno)); - goto out; - } - - i++; - } - - ret = 0; - -out: - return ret; -} diff --git a/tests/bugs/bug-834465.t b/tests/bugs/bug-834465.t deleted file mode 100755 index 70027911498..00000000000 --- a/tests/bugs/bug-834465.t +++ /dev/null @@ -1,48 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc - -cleanup; - -TEST glusterd -TEST pidof glusterd -TEST $CLI volume info; - -TEST $CLI volume create $V0 $H0:$B0/brick1 $H0:$B0/brick2; -EXPECT 'Created' volinfo_field $V0 'Status'; - -TEST $CLI volume start $V0; -EXPECT 'Started' volinfo_field $V0 'Status'; - -MOUNTDIR=$M0; -TEST glusterfs --mem-accounting --volfile-server=$H0 --volfile-id=$V0 $MOUNTDIR; - -sdump1=$(generate_mount_statedump $V0); -nalloc1=0 -grep -A3 "fuse - usage-type gf_common_mt_fd_lk_ctx_node_t" $sdump1 -if [ $? -eq '0' ] -then - nalloc1=`grep -A3 "fuse - usage-type gf_common_mt_fd_lk_ctx_node_t" $sdump1 | grep num_allocs | cut -d '=' -f2` -fi - -build_tester $(dirname $0)/bug-834465.c - -TEST $(dirname $0)/bug-834465 $M0/testfile - -sdump2=$(generate_mount_statedump $V0); - -# With _gf_free now setting typestr to NULL when num_allocs become 0, it is -# expected that there wouldn't be any entry for gf_common_mt_fd_lk_ctx_node_t -# in the statedump file now - -nalloc2=`grep -A3 "fuse - usage-type gf_common_mt_fd_lk_ctx_node_t" $sdump2 | wc -l` -TEST [ $nalloc1 -eq $nalloc2 ]; - -TEST rm -rf $MOUNTDIR/* -TEST rm -rf $(dirname $0)/bug-834465 -cleanup_mount_statedump $V0 - -EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $MOUNTDIR - -cleanup; diff --git a/tests/bugs/bug-839595.t b/tests/bugs/bug-839595.t deleted file mode 100644 index 979827fa736..00000000000 --- a/tests/bugs/bug-839595.t +++ /dev/null @@ -1,31 +0,0 @@ -#!/bin/bash -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc - -cleanup; - -TEST glusterd -TEST pidof glusterd - -TEST $CLI volume create $V0 $H0:$B0/${V0}1 -TEST $CLI volume set $V0 cluster.server-quorum-type server -EXPECT "server" volume_option $V0 cluster.server-quorum-type -TEST $CLI volume set $V0 cluster.server-quorum-type none -EXPECT "none" volume_option $V0 cluster.server-quorum-type -TEST $CLI volume reset $V0 cluster.server-quorum-type -TEST ! $CLI volume set $V0 cluster.server-quorum-type abc -TEST ! $CLI volume set all cluster.server-quorum-type none -TEST ! $CLI volume set $V0 cluster.server-quorum-ratio 100 - -TEST ! $CLI volume set all cluster.server-quorum-ratio abc -TEST ! $CLI volume set all cluster.server-quorum-ratio -1 -TEST ! $CLI volume set all cluster.server-quorum-ratio 100.0000005 -TEST $CLI volume set all cluster.server-quorum-ratio 0 -EXPECT "0" volume_option $V0 cluster.server-quorum-ratio -TEST $CLI volume set all cluster.server-quorum-ratio 100 -EXPECT "100" volume_option $V0 cluster.server-quorum-ratio -TEST $CLI volume set all cluster.server-quorum-ratio 0.0000005 -EXPECT "0.0000005" volume_option $V0 cluster.server-quorum-ratio -TEST $CLI volume set all cluster.server-quorum-ratio 100% -EXPECT "100%" volume_option $V0 cluster.server-quorum-ratio -cleanup; diff --git a/tests/bugs/bug-844688.t b/tests/bugs/bug-844688.t deleted file mode 100755 index 228a3ed2470..00000000000 --- a/tests/bugs/bug-844688.t +++ /dev/null @@ -1,34 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc - -cleanup; - -TEST glusterd -TEST pidof glusterd -TEST $CLI volume create $V0 $H0:$B0/brick0 -TEST $CLI volume start $V0 -TEST glusterfs -s $H0 --volfile-id $V0 $M0 - -mount_pid=$(get_mount_process_pid $V0); -# enable dumping of call stack creation and frame creation times in statedump -kill -USR2 $mount_pid; - -TEST touch $M0/touchfile; -(dd if=/dev/urandom of=$M0/file bs=5k 2>/dev/null 1>/dev/null)& -back_pid=$!; -statedump_file=$(generate_mount_statedump $V0); -grep "callstack-creation-time" $statedump_file 2>/dev/null 1>/dev/null; -TEST [ $? -eq 0 ]; -grep "frame-creation-time" $statedump_file 2>/dev/null 1>/dev/null; -TEST [ $? -eq 0 ]; - -kill -SIGTERM $back_pid; -wait >/dev/null 2>&1; - -TEST rm -f $M0/touchfile $M0/file; -EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0 - -rm -f $statedumpdir/glusterdump.$mount_pid.*; -cleanup diff --git a/tests/bugs/bug-845213.t b/tests/bugs/bug-845213.t deleted file mode 100644 index e79b3710902..00000000000 --- a/tests/bugs/bug-845213.t +++ /dev/null @@ -1,19 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc - -cleanup; - - -## Start and create a volume -TEST glusterd; -TEST pidof glusterd; -TEST $CLI volume info; - -## Create and start a volume with aio enabled -TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2}; -TEST $CLI volume set $V0 remote-dio enable; -TEST $CLI volume set $V0 network.remote-dio disable; - -cleanup; - diff --git a/tests/bugs/bug-846240.t b/tests/bugs/bug-846240.t deleted file mode 100644 index 14ed52c9897..00000000000 --- a/tests/bugs/bug-846240.t +++ /dev/null @@ -1,58 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc -. $(dirname $0)/../fileio.rc - -cleanup; - -TEST glusterd -TEST pidof glusterd -TEST $CLI volume info; - -function volinfo_field() -{ - local vol=$1; - local field=$2; - - $CLI volume info $vol | grep "^$field: " | sed 's/.*: //'; -} - -TEST $CLI volume create $V0 $H0:$B0/brick1 $H0:$B0/brick2; -EXPECT 'Created' volinfo_field $V0 'Status'; - -TEST $CLI volume start $V0; -EXPECT 'Started' volinfo_field $V0 'Status'; - -MOUNTDIR=$M0; -TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 $MOUNTDIR; -TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 $M1; - -TEST touch $M0/testfile; - -# open the file with the fd as 4 -TEST fd=`fd_available`; -TEST fd_open $fd 'w' "$M0/testfile"; - -# remove the file from the other mount point. If unlink is sent from -# $M0 itself, then the file will be actually opened by open-behind which -# we dont want for this testcase -TEST rm -f $M1/testfile; - -# below command opens the file and writes to the file. -# upon open, open-behind unwinds the open call with success. -# now when write comes, open-behind actually opens the file -# and then sends write on the fd. But before sending open itself, -# the file would have been removed from the mount $M1. open() gets error -# and the write call which is put into a stub (open had to be sent first) -# should unwind with the error received in the open call. -echo "data" >> $M0/testfile 2>/dev/null 1>/dev/null; -TEST [ $? -ne 0 ] - -TEST fd_close $fd; - -TEST rm -rf $MOUNTDIR/* - -EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $MOUNTDIR - -cleanup; diff --git a/tests/bugs/bug-847622.t b/tests/bugs/bug-847622.t deleted file mode 100755 index 8ceb01cc4b3..00000000000 --- a/tests/bugs/bug-847622.t +++ /dev/null @@ -1,36 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../nfs.rc -. $(dirname $0)/../volume.rc - -case $OSTYPE in -NetBSD) - echo "Skip test on ACL which are not available on NetBSD" >&2 - SKIP_TESTS - exit 0 - ;; -*) - ;; -esac - -cleanup; - -TEST glusterd -TEST pidof glusterd -TEST $CLI volume create $V0 $H0:$B0/brick0 -TEST $CLI volume start $V0 - -EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available; -TEST mount_nfs $H0:/$V0 $N0 nolock -cd $N0 - -# simple getfacl setfacl commands -TEST touch testfile -TEST setfacl -m u:14:r testfile -TEST getfacl testfile - -cd -EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $N0 -cleanup - diff --git a/tests/bugs/bug-847624.t b/tests/bugs/bug-847624.t deleted file mode 100755 index 6b35c99b0cd..00000000000 --- a/tests/bugs/bug-847624.t +++ /dev/null @@ -1,25 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../nfs.rc -. $(dirname $0)/../volume.rc -cleanup - -#1 -TEST glusterd -TEST pidof glusterd -#3 -TEST $CLI volume create $V0 $H0:$B0/$V0 -TEST $CLI volume set $V0 nfs.drc on -TEST $CLI volume start $V0 -EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available; -TEST mount_nfs $H0:/$V0 $N0 nolock -cd $N0 -#7 -TEST dbench -t 10 10 -TEST rm -rf $N0/* -cd -EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $N0 -#10 -TEST $CLI volume set $V0 nfs.drc-size 10000 -cleanup diff --git a/tests/bugs/bug-848251.t b/tests/bugs/bug-848251.t deleted file mode 100644 index 3de41727e76..00000000000 --- a/tests/bugs/bug-848251.t +++ /dev/null @@ -1,51 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc - -cleanup; - -TEST glusterd -TEST pidof glusterd -TEST $CLI volume info; - -TEST $CLI volume create $V0 $H0:$B0/brick1; - -TEST $CLI volume start $V0; - -#enable quota -TEST $CLI volume quota $V0 enable; - -#mount on a random dir -TEST MOUNTDIR="/tmp/$RANDOM" -TEST mkdir $MOUNTDIR -TEST glusterfs -s $H0 --volfile-id=$V0 $MOUNTDIR - -function set_quota(){ - mkdir "$MOUNTDIR/$name" - $CLI volume quota $V0 limit-usage /$name 50KB -} - -function quota_list(){ - $CLI volume quota $V0 list | grep -- /$name | awk '{print $3}' -} - -TEST name=":d1" -#file name containing ':' in the start -TEST set_quota -EXPECT "80%" quota_list - -TEST name=":d1/d:1" -#file name containing ':' in between -TEST set_quota -EXPECT "80%" quota_list - -TEST name=":d1/d:1/d1:" -#file name containing ':' in the end -TEST set_quota -EXPECT "80%" quota_list - -EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $MOUNTDIR -TEST rm -rf $MOUNTDIR - -cleanup; diff --git a/tests/bugs/bug-852147.t b/tests/bugs/bug-852147.t deleted file mode 100755 index 0e7923086bd..00000000000 --- a/tests/bugs/bug-852147.t +++ /dev/null @@ -1,85 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc - -cleanup; -logdir=`gluster --print-logdir`"/bricks" - -## Start and create a volume -TEST glusterd; -TEST pidof glusterd; -TEST $CLI volume info; - -TEST $CLI volume create $V0 replica 2 stripe 2 $H0:$B0/${V0}{1,2,3,4,5,6,7,8}; - -## Verify volume is is created -EXPECT "$V0" volinfo_field $V0 'Volume Name'; -EXPECT 'Created' volinfo_field $V0 'Status'; - -## Start volume and verify -TEST $CLI volume start $V0; -EXPECT 'Started' volinfo_field $V0 'Status'; - -TEST glusterfs -s $H0 --volfile-id=$V0 $M0 -touch $M0/file1; - -TEST $CLI volume set $V0 performance.cache-max-file-size 20MB -TEST $CLI volume set $V0 performance.cache-min-file-size 10MB - -EXPECT "20MB" volinfo_field $V0 'performance.cache-max-file-size'; -EXPECT "10MB" volinfo_field $V0 'performance.cache-min-file-size'; - -#Performing volume reset and verifying. -TEST $CLI volume reset $V0 -EXPECT "" volinfo_field $V0 'performance.cache-max-file-size'; -EXPECT "" volinfo_field $V0 'performance.cache-min-file-size'; - -#Verifying vlolume-profile start, info and stop -EXPECT "Starting volume profile on $V0 has been successful " $CLI volume profile $V0 start - -function vol_prof_info() -{ - $CLI volume profile $V0 info | grep Brick | wc -l -} -EXPECT "8" vol_prof_info - -EXPECT "Stopping volume profile on $V0 has been successful " $CLI volume profile $V0 stop - -function log-file-name() -{ - logfilename=$B0"/"$V0"1.log" - echo ${logfilename:1} | tr / - -} - -function file-size() -{ - ls -lrt $1 | awk '{print $5}' -} - -#Finding the current log file's size -log_file=$logdir"/"`log-file-name` -log_file_size=`file-size $log_file` - -#Removing the old backup log files -ren_file=$log_file".*" -rm -rf $ren_file - -#Initiating log rotate -TEST $CLI volume log rotate $V0 - -#Capturing new log file's size -new_file_size=`file-size $log_file` - -#Verifying the size of the new log file and the creation of the backup log file -TEST ! [ $new_file_size -eq $log_file_size ] -TEST ls -lrt $ren_file - -## Finish up -TEST $CLI volume stop $V0; -EXPECT 'Stopped' volinfo_field $V0 'Status'; - -TEST $CLI volume delete $V0; -TEST ! $CLI volume info $V0; - -cleanup; diff --git a/tests/bugs/bug-853258.t b/tests/bugs/bug-853258.t deleted file mode 100755 index 21e48bd1a8f..00000000000 --- a/tests/bugs/bug-853258.t +++ /dev/null @@ -1,45 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc - -cleanup; - -TEST glusterd -TEST pidof glusterd - -mkdir -p $H0:$B0/${V0}0 -mkdir -p $H0:$B0/${V0}1 -mkdir -p $H0:$B0/${V0}2 -mkdir -p $H0:$B0/${V0}3 - -# Create and start a volume. -TEST $CLI volume create $V0 $H0:$B0/${V0}0 $H0:$B0/${V0}1 $H0:$B0/${V0}2 -TEST $CLI volume start $V0 -EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'Started' volinfo_field $V0 'Status'; - -# Force assignment of initial ranges. -TEST $CLI volume rebalance $V0 fix-layout start -EXPECT_WITHIN $REBALANCE_TIMEOUT "fix-layout completed" rebalance_status_field $V0 - -# Get the original values. -xattrs="" -for i in $(seq 0 2); do - xattrs="$xattrs $(dht_get_layout $B0/${V0}$i)" -done - -# Expand the volume and force assignment of new ranges. -TEST $CLI volume add-brick $V0 $H0:$B0/${V0}3 -# Force assignment of initial ranges. -TEST $CLI volume rebalance $V0 fix-layout start -EXPECT_WITHIN $REBALANCE_TIMEOUT "fix-layout completed" rebalance_status_field $V0 - -for i in $(seq 0 3); do - xattrs="$xattrs $(dht_get_layout $B0/${V0}$i)" -done - -overlap=$( $PYTHON $(dirname $0)/overlap.py $xattrs) -# 2863311531 = 0xaaaaaaab = 2/3 overlap -TEST [ "$overlap" -ge 2863311531 ] - -cleanup diff --git a/tests/bugs/bug-853680.t b/tests/bugs/bug-853680.t deleted file mode 100755 index bd9ee8d134f..00000000000 --- a/tests/bugs/bug-853680.t +++ /dev/null @@ -1,53 +0,0 @@ -#!/bin/bash -# -# Bug 853680 -# -# Test that io-threads least-rate-limit throttling functions as expected. Set -# a limit, perform a few operations with a least-priority mount and verify -# said operations take a minimum amount of time according to the limit. - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc - -cleanup; - -TEST glusterd - -TEST $CLI volume create $V0 $H0:$B0/${V0}1 -TEST $CLI volume start $V0 - -#Accept min val -TEST $CLI volume set $V0 performance.least-rate-limit 0 -#Accept some value in between -TEST $CLI volume set $V0 performance.least-rate-limit 1035 -#Accept max val INT_MAX -TEST $CLI volume set $V0 performance.least-rate-limit 2147483647 - -#Reject other values -TEST ! $CLI volume set $V0 performance.least-rate-limit 2147483648 -TEST ! $CLI volume set $V0 performace.least-rate-limit -8 -TEST ! $CLI volume set $V0 performance.least-rate-limit abc -TEST ! $CLI volume set $V0 performance.least-rate-limit 0.0 -TEST ! $CLI volume set $V0 performance.least-rate-limit -10.0 -TEST ! $CLI volume set $V0 performance.least-rate-limit 1% - -# set rate limit to 1 operation/sec -TEST $CLI volume set $V0 performance.least-rate-limit 1 - -# use client-pid=-1 for least priority mount -TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0 --client-pid=-1 - -# create a few files and verify this takes more than a few seconds -date1=`date +%s` -TEST touch $M0/file{0..2} -date2=`date +%s` - -optime=$(($date2 - $date1)) -TEST [ $optime -ge 3 ] - -EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0 - -TEST $CLI volume stop $V0 -TEST $CLI volume delete $V0 - -cleanup; diff --git a/tests/bugs/bug-853690.t b/tests/bugs/bug-853690.t deleted file mode 100755 index 0f09eea9e40..00000000000 --- a/tests/bugs/bug-853690.t +++ /dev/null @@ -1,91 +0,0 @@ -#!/bin/bash -# -# Bug 853690 - Test that short writes do not lead to corruption. -# -# Mismanagement of short writes in AFR leads to corruption and immediately -# detectable split-brain. Write a file to a replica volume using error-gen -# to cause short writes on one replica. -# -# Short writes are also possible during heal. If ignored, the files are marked -# consistent and silently differ. After reading the file, cause a lookup, wait -# for self-heal and verify that the afr xattrs do not match. -# -######## - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc - -cleanup; - -TEST mkdir -p $B0/test{1,2} - -# Our graph is a two brick replica with 100% frequency of short writes on one -# side of the replica. This guarantees a single write fop leads to an out-of-sync -# situation. -cat > $B0/test.vol <&1 | grep = | cut -f2 -d=` -EXPECT_NOT 0x000000000000000000000000 echo $xa - -TEST rm -f $M0/file -EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0 - -rm -f $B0/test.vol -rm -rf $B0/test1 $B0/test2 - -cleanup; - diff --git a/tests/bugs/bug-856455.t b/tests/bugs/bug-856455.t deleted file mode 100644 index 0e79d9fa1c9..00000000000 --- a/tests/bugs/bug-856455.t +++ /dev/null @@ -1,42 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc - -cleanup; - -BRICK_COUNT=3 - -TEST glusterd -TEST pidof glusterd - -TEST $CLI volume create $V0 $H0:$B0/${V0}0 $H0:$B0/${V0}1 $H0:$B0/${V0}2 -TEST $CLI volume start $V0 - -## Mount FUSE with caching disabled -TEST $GFS -s $H0 --volfile-id $V0 $M0; - -function query_pathinfo() -{ - local path=$1; - local retval; - - local pathinfo=$(getfattr -n trusted.glusterfs.pathinfo $path); - retval=$(echo $pathinfo | grep -o 'POSIX' | wc -l); - echo $retval -} - -TEST touch $M0/f00f; -TEST mkdir $M0/f00d; - -# verify pathinfo for a file and directory -EXPECT 1 query_pathinfo $M0/f00f; -EXPECT $BRICK_COUNT query_pathinfo $M0/f00d; - -# Kill a brick process and then query for pathinfo -# for directories pathinfo should list backend patch from available (up) subvolumes - -kill -9 `cat $GLUSTERD_WORKDIR/vols/$V0/run/$H0-d-backends-${V0}1.pid`; - -EXPECT `expr $BRICK_COUNT - 1` query_pathinfo $M0/f00d; - -cleanup; diff --git a/tests/bugs/bug-857330/common.rc b/tests/bugs/bug-857330/common.rc deleted file mode 100644 index e5a7cd79adf..00000000000 --- a/tests/bugs/bug-857330/common.rc +++ /dev/null @@ -1,55 +0,0 @@ -. $(dirname $0)/../../include.rc - -UUID_REGEX='[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}' - -TASK_ID="" -COMMAND="" -PATTERN="" - -function check-and-store-task-id() -{ - TASK_ID="" - - local task_id=$($CLI $COMMAND | grep $PATTERN | grep -o -E "$UUID_REGEX") - - if [ -z "$task_id" ] && [ "${task_id+asdf}" = "asdf" ]; then - return 1 - fi - - TASK_ID=$task_id - return 0; -} - -function get-task-id() -{ - $CLI $COMMAND | grep $PATTERN | grep -o -E "$UUID_REGEX" | tail -n1 - -} - -function check-and-store-task-id-xml() -{ - TASK_ID="" - - local task_id=$($CLI $COMMAND --xml | xmllint --format - | grep $PATTERN | grep -o -E "$UUID_REGEX") - - if [ -z "$task_id" ] && [ "${task_id+asdf}" = "asdf" ]; then - return 1 - fi - - TASK_ID=$task_id - return 0; -} - -function get-task-id-xml() -{ - $CLI $COMMAND --xml | xmllint --format - | grep $PATTERN | grep -o -E "$UUID_REGEX" -} - -function get-task-status() -{ - $CLI $COMMAND | grep -o $PATTERN - if [ ${PIPESTATUS[0]} -ne 0 ]; then - return 1 - fi - return 0 -} diff --git a/tests/bugs/bug-857330/normal.t b/tests/bugs/bug-857330/normal.t deleted file mode 100755 index 9e348c53449..00000000000 --- a/tests/bugs/bug-857330/normal.t +++ /dev/null @@ -1,79 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/common.rc -. $(dirname $0)/../../volume.rc -cleanup; - -TEST glusterd -TEST pidof glusterd -TEST $CLI volume info; - -TEST $CLI volume create $V0 $H0:$B0/${V0}1; -TEST $CLI volume info $V0; -TEST $CLI volume start $V0; - -TEST glusterfs -s $H0 --volfile-id=$V0 $M0; - -TEST $PYTHON $(dirname $0)/../../utils/create-files.py \ - --multi -b 10 -d 10 -n 10 $M0; - -EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0 - -############### -## Rebalance ## -############### -TEST $CLI volume add-brick $V0 $H0:$B0/${V0}2; - -COMMAND="volume rebalance $V0 start" -PATTERN="ID:" -TEST check-and-store-task-id - -COMMAND="volume status $V0" -PATTERN="ID" -EXPECT $TASK_ID get-task-id - -COMMAND="volume rebalance $V0 status" -PATTERN="completed" -EXPECT_WITHIN 300 $PATTERN get-task-status - -################### -## Replace-brick ## -################### -REP_BRICK_PAIR="$H0:$B0/${V0}2 $H0:$B0/${V0}3" - -COMMAND="volume replace-brick $V0 $REP_BRICK_PAIR start" -PATTERN="ID:" -TEST check-and-store-task-id - -COMMAND="volume status $V0" -PATTERN="ID" -EXPECT $TASK_ID get-task-id - -COMMAND="volume replace-brick $V0 $REP_BRICK_PAIR status" -PATTERN="complete" -EXPECT_WITHIN 300 $PATTERN get-task-status - -TEST $CLI volume replace-brick $V0 $REP_BRICK_PAIR commit; - -################## -## Remove-brick ## -################## -COMMAND="volume remove-brick $V0 $H0:$B0/${V0}3 start" -PATTERN="ID:" -TEST check-and-store-task-id - -COMMAND="volume status $V0" -PATTERN="ID" -EXPECT $TASK_ID get-task-id - -COMMAND="volume remove-brick $V0 $H0:$B0/${V0}3 status" -PATTERN="completed" -EXPECT_WITHIN 300 $PATTERN get-task-status - -TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}3 commit - -TEST $CLI volume stop $V0; -TEST $CLI volume delete $V0; -TEST ! $CLI volume info $V0; - -cleanup; diff --git a/tests/bugs/bug-857330/xml.t b/tests/bugs/bug-857330/xml.t deleted file mode 100755 index 475c35b8faa..00000000000 --- a/tests/bugs/bug-857330/xml.t +++ /dev/null @@ -1,103 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/common.rc -. $(dirname $0)/../../volume.rc - -cleanup; - -TEST glusterd -TEST pidof glusterd -TEST $CLI volume info; - -TEST $CLI volume create $V0 $H0:$B0/${V0}1; -TEST $CLI volume info $V0; -TEST $CLI volume start $V0; - -TEST glusterfs -s $H0 --volfile-id=$V0 $M0; - -TEST $PYTHON $(dirname $0)/../../utils/create-files.py \ - --multi -b 10 -d 10 -n 10 $M0; - -EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0 - - -############### -## Rebalance ## -############### -TEST $CLI volume add-brick $V0 $H0:$B0/${V0}2; - -COMMAND="volume rebalance $V0 start" -PATTERN="task-id" -TEST check-and-store-task-id-xml - -COMMAND="volume status $V0" -PATTERN="id" -EXPECT $TASK_ID get-task-id-xml - -COMMAND="volume rebalance $V0 status" -PATTERN="task-id" -EXPECT $TASK_ID get-task-id-xml - -## TODO: Add tests for rebalance stop - -COMMAND="volume rebalance $V0 status" -PATTERN="completed" -EXPECT_WITHIN 300 $PATTERN get-task-status - -################### -## Replace-brick ## -################### -REP_BRICK_PAIR="$H0:$B0/${V0}2 $H0:$B0/${V0}3" - -COMMAND="volume replace-brick $V0 $REP_BRICK_PAIR start" -PATTERN="task-id" -TEST check-and-store-task-id-xml - -COMMAND="volume status $V0" -PATTERN="id" -EXPECT $TASK_ID get-task-id-xml - -COMMAND="volume replace-brick $V0 $REP_BRICK_PAIR status" -PATTERN="task-id" -EXPECT $TASK_ID get-task-id-xml - -## TODO: Add more tests for replace-brick pause|abort - -COMMAND="volume replace-brick $V0 $REP_BRICK_PAIR status" -PATTERN="complete" -EXPECT_WITHIN 300 $PATTERN get-task-status - -COMMAND="volume replace-brick $V0 $REP_BRICK_PAIR commit" -PATTERN="task-id" -EXPECT $TASK_ID get-task-id-xml - -################## -## Remove-brick ## -################## -COMMAND="volume remove-brick $V0 $H0:$B0/${V0}3 start" -PATTERN="task-id" -TEST check-and-store-task-id-xml - -COMMAND="volume status $V0" -PATTERN="id" -EXPECT $TASK_ID get-task-id-xml - -COMMAND="volume remove-brick $V0 $H0:$B0/${V0}3 status" -PATTERN="task-id" -EXPECT $TASK_ID get-task-id-xml - -COMMAND="volume remove-brick $V0 $H0:$B0/${V0}3 status" -PATTERN="completed" -EXPECT_WITHIN 300 $PATTERN get-task-status - -## TODO: Add tests for remove-brick stop - -COMMAND="volume remove-brick $V0 $H0:$B0/${V0}3 commit" -PATTERN="task-id" -EXPECT $TASK_ID get-task-id-xml - -TEST $CLI volume stop $V0; -TEST $CLI volume delete $V0; -TEST ! $CLI volume info $V0; - -cleanup; diff --git a/tests/bugs/bug-858215.t b/tests/bugs/bug-858215.t deleted file mode 100755 index 8034276dd6d..00000000000 --- a/tests/bugs/bug-858215.t +++ /dev/null @@ -1,78 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc - -cleanup; - - -## Start and create a volume -TEST glusterd; -TEST pidof glusterd; -TEST $CLI volume info; - -TEST $CLI volume create $V0 replica 2 stripe 2 $H0:$B0/${V0}{1,2,3,4,5,6,7,8}; - -function volinfo_field() -{ - local vol=$1; - local field=$2; - - $CLI volume info $vol | grep "^$field: " | sed 's/.*: //'; -} - - -## Verify volume is is created -EXPECT "$V0" volinfo_field $V0 'Volume Name'; -EXPECT 'Created' volinfo_field $V0 'Status'; - - -## Start volume and verify -TEST $CLI volume start $V0; -EXPECT 'Started' volinfo_field $V0 'Status'; - -## Mount FUSE with caching disabled -TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id $V0 $M0; - -## Test for checking whether the fops have been saved in the event-history -TEST ! stat $M0/newfile; -TEST touch $M0/newfile; -TEST stat $M0/newfile; -TEST rm $M0/newfile; - -nfs_pid=$(cat $GLUSTERD_WORKDIR/nfs/run/nfs.pid); -glustershd_pid=$(cat $GLUSTERD_WORKDIR/glustershd/run/glustershd.pid); - -pids=$(pidof glusterfs); -for i in $pids -do - if [ $i -ne $nfs_pid ] && [ $i -ne $glustershd_pid ]; then - mount_pid=$i; - break; - fi -done - -dump_dir='/tmp/gerrit_glusterfs' -cat >$statedumpdir/glusterdump.options < -#include -#include -#include -#include -#include -#include -#include - -#ifndef linux -#define fstat64(fd, st) fstat(fd, st) -#endif - -int -main (int argc, char *argv[]) -{ - char *filename = NULL, *volname = NULL, *cmd = NULL; - char buffer[1024] = {0, }; - int fd = -1; - int ret = -1; - struct stat statbuf = {0, }; - - if (argc != 3) { - fprintf (stderr, "usage: %s \n", argv[0]); - goto out; - } - - filename = argv[1]; - volname = argv[2]; - - fd = open (filename, O_RDWR | O_CREAT, 0); - if (fd < 0) { - fprintf (stderr, "open (%s) failed (%s)\n", filename, - strerror (errno)); - goto out; - } - - ret = write (fd, "test-content", 12); - if (ret < 0) { - fprintf (stderr, "write failed (%s)", strerror (errno)); - goto out; - } - - ret = fsync (fd); - if (ret < 0) { - fprintf (stderr, "fsync failed (%s)", strerror (errno)); - goto out; - } - - ret = fstat64 (fd, &statbuf); - if (ret < 0) { - fprintf (stderr, "fstat64 failed (%s)", strerror (errno)); - goto out; - } - - ret = asprintf (&cmd, "gluster --mode=script volume stop %s force", - volname); - if (ret < 0) { - fprintf (stderr, "cannot construct cli command string (%s)", - strerror (errno)); - goto out; - } - - ret = system (cmd); - if (ret < 0) { - fprintf (stderr, "stopping volume (%s) failed", volname); - goto out; - } - - ret = read (fd, buffer, 1024); - if (ret >= 0) { - fprintf (stderr, "read should've returned error, " - "but is successful\n"); - ret = -1; - goto out; - } - - ret = 0; -out: - return ret; -} diff --git a/tests/bugs/bug-858242.t b/tests/bugs/bug-858242.t deleted file mode 100755 index e93c2d24442..00000000000 --- a/tests/bugs/bug-858242.t +++ /dev/null @@ -1,28 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc - -cleanup; - -TEST glusterd -TEST pidof glusterd -TEST $CLI volume info; - -TEST $CLI volume create $V0 $H0:$B0/brick1; -EXPECT 'Created' volinfo_field $V0 'Status'; - -TEST $CLI volume start $V0; -EXPECT 'Started' volinfo_field $V0 'Status'; - -TEST $CLI volume set $V0 performance.quick-read off - -#mount on a random dir -TEST glusterfs --entry-timeout=3600 --attribute-timeout=3600 -s $H0 --volfile-id=$V0 $M0 --direct-io-mode=yes - -build_tester $(dirname $0)/bug-858242.c - -TEST $(dirname $0)/bug-858242 $M0/testfile $V0 - -TEST rm -rf $(dirname $0)/858242 -cleanup; - diff --git a/tests/bugs/bug-858488-min-free-disk.t b/tests/bugs/bug-858488-min-free-disk.t deleted file mode 100644 index 1933f707e9c..00000000000 --- a/tests/bugs/bug-858488-min-free-disk.t +++ /dev/null @@ -1,108 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc - -cleanup; - -## Start glusterd -TEST glusterd; -TEST pidof glusterd; -TEST $CLI volume info; - -## Lets create partitions for bricks -TEST truncate -s 100M $B0/brick1 -TEST truncate -s 200M $B0/brick2 -TEST LO1=`SETUP_LOOP $B0/brick1` -TEST MKFS_LOOP $LO1 -TEST LO2=`SETUP_LOOP $B0/brick2` -TEST MKFS_LOOP $LO2 -TEST mkdir -p $B0/${V0}1 $B0/${V0}2 -TEST MOUNT_LOOP $LO1 $B0/${V0}1 -TEST MOUNT_LOOP $LO2 $B0/${V0}2 - -## Lets create volume -TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2}; - -## Verify volume is created -EXPECT "$V0" volinfo_field $V0 'Volume Name'; -EXPECT 'Created' volinfo_field $V0 'Status'; - -## Start volume and verify -TEST $CLI volume start $V0; -EXPECT 'Started' volinfo_field $V0 'Status'; -TEST glusterfs -s $H0 --volfile-id=$V0 --acl $M0 -## Real test starts here -## ---------------------------------------------------------------------------- - -MINFREEDISKVALUE=90% - -## Set min free disk to MINFREEDISKVALUE percent -TEST $CLI volume set $V0 cluster.min-free-disk $MINFREEDISKVALUE - -## We need to have file name to brick map based on hash. -## We will use this info in test case 0. -i=1 -CONTINUE=2 -BRICK1FILE=0 -BRICK2FILE=0 -while [[ $CONTINUE -ne 0 ]] -do - dd if=/dev/zero of=$M0/file$i.data bs=1024 count=1024 1>/dev/null 2>&1 - - if [[ -e $B0/${V0}1/file$i.data && $BRICK1FILE = "0" ]] - then - BRICK1FILE=file$i.data - CONTINUE=$(( $CONTINUE - 1 )) - fi - - if [[ -e $B0/${V0}2/file$i.data && $BRICK2FILE = "0" ]] - then - BRICK2FILE=file$i.data - CONTINUE=$(( $CONTINUE - 1 )) - fi - - rm $M0/file$i.data - let i++ -done - - -## Bring free space on one of the bricks to less than minfree value by -## creating one big file. -dd if=/dev/zero of=$M0/fillonebrick.data bs=1024 count=25600 1>/dev/null 2>&1 - -#Lets find out where it was created -if [ -f $B0/${V0}1/fillonebrick.data ] -then - FILETOCREATE=$BRICK1FILE - OTHERBRICK=$B0/${V0}2 -else - FILETOCREATE=$BRICK2FILE - OTHERBRICK=$B0/${V0}1 -fi - -##--------------------------------TEST CASE 0----------------------------------- -## If we try to create a file which should go into full brick as per hash, it -## should go into the other brick instead. - -## Before that let us create files just to make gluster refresh the stat -## Using touch so it should not change the disk usage stats -for k in {1..20}; -do - touch $M0/dummyfile$k -done - -dd if=/dev/zero of=$M0/$FILETOCREATE bs=1024 count=2048 1>/dev/null 2>&1 -TEST [ -e $OTHERBRICK/$FILETOCREATE ] -## Done testing, lets clean up -TEST rm -rf $M0/* - -## Finish up -TEST $CLI volume stop $V0; -EXPECT 'Stopped' volinfo_field $V0 'Status'; -$CLI volume delete $V0; - -UMOUNT_LOOP ${B0}/${V0}{1,2} -rm -f ${B0}/brick{1,2} - -cleanup; diff --git a/tests/bugs/bug-859581.t b/tests/bugs/bug-859581.t deleted file mode 100755 index 08883fa3a65..00000000000 --- a/tests/bugs/bug-859581.t +++ /dev/null @@ -1,53 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc - -cleanup; - -TEST glusterd -TEST pidof glusterd -TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2} -EXPECT 'Created' volinfo_field $V0 'Status'; -TEST $CLI volume set $V0 performance.stat-prefetch off -TEST $CLI volume start $V0 -EXPECT 'Started' volinfo_field $V0 'Status'; - -TEST glusterfs --direct-io-mode=yes --use-readdirp=no --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id $V0 $M0; - -mkdir -p $M0/dir1/dir2 - -TEST rm -f $(gf_get_gfid_backend_file_path $B0/${V0}1 "dir1") -TEST rmdir $B0/${V0}1/dir1/dir2 - -TEST stat $M0/dir1/dir2 - -TEST [ -d $B0/${V0}1/dir1/dir2 ] -TEST [ ! -d $(gf_get_gfid_backend_file_path $B0/${V0}1 "dir1") ] - -# Stop the volume to flush caches and force symlink recreation -EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0 -TEST $CLI volume stop $V0 -EXPECT 'Stopped' volinfo_field $V0 'Status'; -TEST $CLI volume start $V0 -EXPECT 'Started' volinfo_field $V0 'Status'; -TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id $V0 $M0; - -# Till now, protocol/server was not doing inode linking as part of readdirp. -# But pas part of user servicable snapshots patcth, changes to do inode linking -# in protocol/server in readdirp, were introduced. So now to make sure -# the gfid handle of dir1 is healed, explicit lookup has to be sent on it. -# Otherwise, whenever ls -l is done just on the mount point $M0, lookup on the -# entries received as part of readdirp, is not sent, because the inodes for -# those entries were linked as part of readdirp itself. i.e instead of doing -# "ls -l $M0", it has to be the below command. -ls -l $M0/dir1; - -TEST [ -h $(gf_get_gfid_backend_file_path $B0/${V0}1 "dir1") ] - -EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0 -TEST $CLI volume stop $V0 -TEST $CLI volume delete $V0 - -cleanup - diff --git a/tests/bugs/bug-859927.t b/tests/bugs/bug-859927.t deleted file mode 100755 index ed74d3eb831..00000000000 --- a/tests/bugs/bug-859927.t +++ /dev/null @@ -1,70 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc -cleanup; - -glusterd; - -TEST $CLI volume create $V0 replica 2 stripe 2 $H0:$B0/${V0}{1,2,3,4,5,6,7,8}; - -TEST ! $CLI volume set $V0 statedump-path "" -TEST ! $CLI volume set $V0 statedump-path " " -TEST $CLI volume set $V0 statedump-path "/home/" -EXPECT "/home/" volume_option $V0 server.statedump-path - -TEST ! $CLI volume set $V0 background-self-heal-count "" -TEST ! $CLI volume set $V0 background-self-heal-count " " -TEST $CLI volume set $V0 background-self-heal-count 10 -EXPECT "10" volume_option $V0 cluster.background-self-heal-count - -TEST ! $CLI volume set $V0 cache-size "" -TEST ! $CLI volume set $V0 cache-size " " -TEST $CLI volume set $V0 cache-size 512MB -EXPECT "512MB" volume_option $V0 performance.cache-size - -TEST ! $CLI volume set $V0 self-heal-daemon "" -TEST ! $CLI volume set $V0 self-heal-daemon " " -TEST $CLI volume set $V0 self-heal-daemon on -EXPECT "on" volume_option $V0 cluster.self-heal-daemon - -TEST ! $CLI volume set $V0 read-subvolume "" -TEST ! $CLI volume set $V0 read-subvolume " " -TEST $CLI volume set $V0 read-subvolume $V0-client-0 -EXPECT "$V0-client-0" volume_option $V0 cluster.read-subvolume - -TEST ! $CLI volume set $V0 data-self-heal-algorithm "" -TEST ! $CLI volume set $V0 data-self-heal-algorithm " " -TEST ! $CLI volume set $V0 data-self-heal-algorithm on -TEST $CLI volume set $V0 data-self-heal-algorithm full -EXPECT "full" volume_option $V0 cluster.data-self-heal-algorithm - -TEST ! $CLI volume set $V0 min-free-inodes "" -TEST ! $CLI volume set $V0 min-free-inodes " " -TEST $CLI volume set $V0 min-free-inodes 60% -EXPECT "60%" volume_option $V0 cluster.min-free-inodes - -TEST ! $CLI volume set $V0 min-free-disk "" -TEST ! $CLI volume set $V0 min-free-disk " " -TEST $CLI volume set $V0 min-free-disk 60% -EXPECT "60%" volume_option $V0 cluster.min-free-disk - -TEST $CLI volume set $V0 min-free-disk 120 -EXPECT "120" volume_option $V0 cluster.min-free-disk - -TEST ! $CLI volume set $V0 frame-timeout "" -TEST ! $CLI volume set $V0 frame-timeout " " -TEST $CLI volume set $V0 frame-timeout 0 -EXPECT "0" volume_option $V0 network.frame-timeout - -TEST ! $CLI volume set $V0 auth.allow "" -TEST ! $CLI volume set $V0 auth.allow " " -TEST $CLI volume set $V0 auth.allow 192.168.122.1 -EXPECT "192.168.122.1" volume_option $V0 auth.allow - -TEST ! $CLI volume set $V0 stripe-block-size "" -TEST ! $CLI volume set $V0 stripe-block-size " " -TEST $CLI volume set $V0 stripe-block-size 512MB -EXPECT "512MB" volume_option $V0 cluster.stripe-block-size - -cleanup; diff --git a/tests/bugs/bug-860297.t b/tests/bugs/bug-860297.t deleted file mode 100644 index 2a3ca7a7a6c..00000000000 --- a/tests/bugs/bug-860297.t +++ /dev/null @@ -1,13 +0,0 @@ -#!/bin/bash -. $(dirname $0)/../include.rc -cleanup; - -TEST glusterd -TEST pidof glusterd -TEST $CLI volume info -TEST $CLI volume create $V0 $H0:$B0/brick1 -setfattr -x trusted.glusterfs.volume-id $B0/brick1 -## If Extended attribute trusted.glusterfs.volume-id is not present -## then volume should not be able to start -TEST ! $CLI volume start $V0; -cleanup; diff --git a/tests/bugs/bug-860663.c b/tests/bugs/bug-860663.c deleted file mode 100644 index 6f6d0696e64..00000000000 --- a/tests/bugs/bug-860663.c +++ /dev/null @@ -1,42 +0,0 @@ -#include -#include -#include -#include -#include -#include -#include - -int -main(argc, argv) - int argc; - char **argv; -{ - char *basepath; - char path[MAXPATHLEN + 1]; - unsigned int count; - int i, fd; - - if (argc != 3) - errx(1, "usage: %s path count", argv[0]); - - basepath = argv[1]; - count = atoi(argv[2]); - - if (count > 999999) - errx(1, "count too big"); - - if (strlen(basepath) > MAXPATHLEN - 6) - errx(1, "path too long"); - - for (i = 0; i < count; i++) { - (void)sprintf(path, "%s%06d", basepath, i); - - if ((fd = open(path, O_CREAT|O_RDWR, 0644)) == -1) - err(1, "create %s failed", path); - - if (close(fd) != 0) - warn("close %s failed", path); - } - - return 0; -} diff --git a/tests/bugs/bug-860663.t b/tests/bugs/bug-860663.t deleted file mode 100644 index 555a63436ba..00000000000 --- a/tests/bugs/bug-860663.t +++ /dev/null @@ -1,54 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc - -cleanup; - -function file_count() -{ - val=1 - - if [ "$1" == "$2" ] - then - val=0 - fi - echo $val -} - -BRICK_COUNT=3 - -build_tester $(dirname $0)/bug-860663.c - -TEST glusterd -TEST pidof glusterd - -TEST $CLI volume create $V0 $H0:$B0/${V0}0 $H0:$B0/${V0}1 $H0:$B0/${V0}2 -TEST $CLI volume start $V0 - -## Mount FUSE -TEST glusterfs -s $H0 --volfile-id $V0 $M0; - -TEST $(dirname $0)/bug-860663 $M0/files 10000 - -ORIG_FILE_COUNT=`ls -l $M0 | wc -l`; -TEST [ $ORIG_FILE_COUNT -ge 10000 ] - -# Kill a brick process -kill -9 `cat $GLUSTERD_WORKDIR/vols/$V0/run/$H0-d-backends-${V0}1.pid`; - -TEST $CLI volume rebalance $V0 fix-layout start - -sleep 30; - -TEST ! $(dirname $0)/bug-860663 $M0/files 10000 - -TEST $CLI volume start $V0 force - -sleep 5; - -NEW_FILE_COUNT=`ls -l $M0 | wc -l`; - -EXPECT "0" file_count $ORIG_FILE_COUNT $NEW_FILE_COUNT - -rm -f $(dirname $0)/bug-860663 -cleanup; diff --git a/tests/bugs/bug-861015-index.t b/tests/bugs/bug-861015-index.t deleted file mode 100644 index 614d4df2fac..00000000000 --- a/tests/bugs/bug-861015-index.t +++ /dev/null @@ -1,36 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc - -cleanup; - -TEST glusterd -TEST pidof glusterd -TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1,2,3,4,5} -TEST $CLI volume set $V0 ensure-durability off -TEST $CLI volume start $V0 -EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status -TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0 -TEST kill_brick $V0 $H0 $B0/${V0}0 -TEST kill_brick $V0 $H0 $B0/${V0}2 -TEST kill_brick $V0 $H0 $B0/${V0}4 -cd $M0 -HEAL_FILES=0 -for i in {1..10} -do - echo "abc" > $i - HEAL_FILES=$(($HEAL_FILES+1)) -done -HEAL_FILES=$(($HEAL_FILES+3)) #count brick root distribute-subvol num of times - -cd ~ -EXPECT "$HEAL_FILES" afr_get_pending_heal_count $V0 -TEST rm -f $M0/* -EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0 -TEST $CLI volume heal $V0 info -#Only root dir should be present now in the indices -EXPECT "1" afr_get_num_indices_in_brick $B0/${V0}1 -EXPECT "1" afr_get_num_indices_in_brick $B0/${V0}3 -EXPECT "1" afr_get_num_indices_in_brick $B0/${V0}5 -cleanup diff --git a/tests/bugs/bug-861015-log.t b/tests/bugs/bug-861015-log.t deleted file mode 100644 index c403f552949..00000000000 --- a/tests/bugs/bug-861015-log.t +++ /dev/null @@ -1,29 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc - -cleanup; - -log_wd=$(gluster --print-logdir) -TEST glusterd -TEST pidof glusterd -rm -f $log_wd/glustershd.log -TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1} -TEST $CLI volume start $V0 -TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0 -TEST kill_brick $V0 $H0 $B0/${V0}0 -cd $M0 -for i in {1..10} -do - dd if=/dev/urandom of=f bs=1024k count=10 2>/dev/null -done - -cd ~ -TEST $CLI volume heal $V0 info -function count_inode_link_failures { - logfile=$1 - grep "inode link failed on the inode" $logfile | wc -l -} -EXPECT "0" count_inode_link_failures $log_wd/glustershd.log -cleanup diff --git a/tests/bugs/bug-861542.t b/tests/bugs/bug-861542.t deleted file mode 100755 index 70fd12239e2..00000000000 --- a/tests/bugs/bug-861542.t +++ /dev/null @@ -1,50 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc - -cleanup; - -TEST glusterd -TEST pidof glusterd -TEST $CLI volume info; -# Distributed volume with a single brick was chosen solely for the ease of -#implementing the test case (to be precise, for the ease of extracting the port number). -TEST $CLI volume create $V0 $H0:$B0/brick0; - -TEST $CLI volume start $V0; - -function port_field() -{ - local vol=$1; - local opt=$2; - if [ $opt -eq '0' ]; then - $CLI volume status $vol | grep "brick0" | awk '{print $3}'; - else - $CLI volume status $vol detail | grep "^Port " | awk '{print $3}'; - fi -} - -function xml_port_field() -{ - local vol=$1; - local opt=$2; - $CLI --xml volume status $vol $opt | tr -d '\n' |\ -#Find the first occurrence of the string between and - sed -rn 's//&###/;s/<\/port>/###&/;s/^.*###(.*)###.*$/\1/p' -} - -TEST $CLI volume status $V0; -TEST $CLI volume status $V0 detail; -TEST $CLI --xml volume status $V0; -TEST $CLI --xml volume status $V0 detail; - -# Kill the brick process. After this, port number for the killed (in this case brick) process must be "N/A". -kill `cat $GLUSTERD_WORKDIR/vols/$V0/run/$H0-d-backends-brick0.pid` - -EXPECT "N/A" port_field $V0 '0'; # volume status -EXPECT "N/A" port_field $V0 '1'; # volume status detail - -EXPECT "N/A" xml_port_field $V0 ''; -EXPECT "N/A" xml_port_field $V0 'detail'; - -cleanup; diff --git a/tests/bugs/bug-862834.t b/tests/bugs/bug-862834.t deleted file mode 100755 index 33aaea1a8b1..00000000000 --- a/tests/bugs/bug-862834.t +++ /dev/null @@ -1,46 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc - -cleanup; - -V1="patchy2" -TEST glusterd -TEST pidof glusterd -TEST $CLI volume info; - -TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2}; - -function check_brick() -{ - vol=$1; - num=$2 - $CLI volume info $V0 | grep "Brick$num" | awk '{print $2}'; -} - -function volinfo_field() -{ - local vol=$1; - local field=$2; - - $CLI volume info $vol | grep "^$field: " | sed 's/.*: //'; -} - -function brick_count() -{ - local vol=$1; - - $CLI volume info $vol | egrep "^Brick[0-9]+: " | wc -l; -} - -EXPECT "$V0" volinfo_field $V0 'Volume Name'; -EXPECT 'Created' volinfo_field $V0 'Status'; -EXPECT '2' brick_count $V0 - - -EXPECT "$H0:$B0/${V0}1" check_brick $V0 '1'; -EXPECT "$H0:$B0/${V0}2" check_brick $V0 '2'; - -TEST ! $CLI volume create $V1 $H0:$B0/${V1}0 $H0:$B0/${V0}1; - -cleanup; diff --git a/tests/bugs/bug-862967.t b/tests/bugs/bug-862967.t deleted file mode 100644 index 30d71185405..00000000000 --- a/tests/bugs/bug-862967.t +++ /dev/null @@ -1,59 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc - -cleanup; - -function uid_gid_compare() -{ - val=1 - - if [ "$1" == "$3" ] - then - if [ "$2" == "$4" ] - then - val=0 - fi - fi - echo "$val" -} - -BRICK_COUNT=3 - -TEST glusterd -TEST pidof glusterd - -TEST $CLI volume create $V0 $H0:$B0/${V0}0 $H0:$B0/${V0}1 $H0:$B0/${V0}2 -TEST $CLI volume set $V0 stat-prefetch off -TEST $CLI volume start $V0 - -## Mount FUSE -TEST glusterfs --attribute-timeout=0 --entry-timeout=0 --gid-timeout=-1 -s $H0 --volfile-id $V0 $M0; - -# change dir permissions -mkdir $M0/dir; -chown 1:1 $M0/dir; - -# Kill a brick process - -kill -9 `cat $GLUSTERD_WORKDIR/vols/$V0/run/$H0-d-backends-${V0}1.pid`; -# change dir ownership -NEW_UID=36; -NEW_GID=36; -chown $NEW_UID:$NEW_GID $M0/dir; - -# bring the brick back up -TEST $CLI volume start $V0 force - -sleep 10; - -ls -l $M0/dir; - -# check if uid/gid is healed on backend brick which was taken down -BACKEND_UID=`stat -c %u $B0/${V0}1/dir`; -BACKEND_GID=`stat -c %g $B0/${V0}1/dir`; - - -EXPECT "0" uid_gid_compare $NEW_UID $NEW_GID $BACKEND_UID $BACKEND_GID - -cleanup; diff --git a/tests/bugs/bug-864222.t b/tests/bugs/bug-864222.t deleted file mode 100755 index 35d90abac3f..00000000000 --- a/tests/bugs/bug-864222.t +++ /dev/null @@ -1,27 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../nfs.rc -. $(dirname $0)/../volume.rc - -cleanup; - -TEST glusterd -TEST pidof glusterd -TEST $CLI volume create $V0 $H0:$B0/brick0 -TEST $CLI volume start $V0 - -EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available; -TEST mount_nfs $H0:/$V0 $N0 nolock -cd $N0 - -TEST ls - -TEST $CLI volume set $V0 nfs.enable-ino32 on -# Main test. This should pass. -TEST ls - -cd -EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $N0 -cleanup - diff --git a/tests/bugs/bug-865825.t b/tests/bugs/bug-865825.t deleted file mode 100755 index a66ede677fb..00000000000 --- a/tests/bugs/bug-865825.t +++ /dev/null @@ -1,82 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc - -cleanup; - -TEST glusterd -TEST pidof glusterd -TEST $CLI volume info; - -## Start and create a volume -mkdir -p ${B0}/${V0}-0 -mkdir -p ${B0}/${V0}-1 -mkdir -p ${B0}/${V0}-2 -TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}-{0,1,2} - -function volinfo_field() -{ - local vol=$1; - local field=$2; - - $CLI volume info $vol | grep "^$field: " | sed 's/.*: //'; -} - - -## Verify volume is created -EXPECT "$V0" volinfo_field $V0 'Volume Name'; -EXPECT 'Created' volinfo_field $V0 'Status'; - -## Make sure io-cache and write-behind don't interfere. -TEST $CLI volume set $V0 cluster.background-self-heal-count 0 -TEST $CLI volume set $V0 performance.io-cache off; -TEST $CLI volume set $V0 performance.quick-read off; -TEST $CLI volume set $V0 performance.write-behind off; -TEST $CLI volume set $V0 performance.stat-prefetch off - -## Make sure automatic self-heal doesn't perturb our results. -TEST $CLI volume set $V0 cluster.self-heal-daemon off - -## Start volume and verify -TEST $CLI volume start $V0; -EXPECT 'Started' volinfo_field $V0 'Status'; - -## Mount native -TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 $M0 - -## Create a file with some recognizable contents. -echo "test_data" > $M0/a_file; - -## Unmount. -EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0 - -## Mess with the flags as though brick-0 accuses brick-2 while brick-1 is -## missing its brick-2 changelog altogether. -value=0x000000010000000000000000 -setfattr -n trusted.afr.${V0}-client-2 -v $value $B0/${V0}-0/a_file -setfattr -x trusted.afr.${V0}-client-2 $B0/${V0}-1/a_file -echo "wrong_data" > $B0/${V0}-2/a_file - -gluster volume set $V0 cluster.self-heal-daemon on -EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status -EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0 -EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1 -EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 2 -gluster volume heal $V0 full - -## Make sure brick 2 now has the correct contents. -EXPECT_WITHIN $HEAL_TIMEOUT "test_data" cat $B0/${V0}-2/a_file - -if [ "$EXIT_EARLY" = "1" ]; then - exit 0; -fi - -## Finish up -TEST $CLI volume stop $V0; -EXPECT 'Stopped' volinfo_field $V0 'Status'; - -TEST $CLI volume delete $V0; -TEST ! $CLI volume info $V0; - -cleanup; diff --git a/tests/bugs/bug-866459.t b/tests/bugs/bug-866459.t deleted file mode 100644 index fe92f3a4ec5..00000000000 --- a/tests/bugs/bug-866459.t +++ /dev/null @@ -1,45 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc - -cleanup; - - -## Start and create a volume -TEST glusterd -TEST pidof glusterd -TEST $CLI volume info; - -## Create and start a volume with aio enabled -TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2}; -TEST $CLI volume set $V0 linux-aio on -TEST $CLI volume set $V0 background-self-heal-count 0 -TEST $CLI volume set $V0 performance.stat-prefetch off; -TEST $CLI volume start $V0 - -## Mount FUSE with caching disabled -TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id $V0 $M0; - -dd of=$M0/a if=/dev/urandom bs=1024k count=1 2>&1 > /dev/null -B0_hiphenated=`echo $B0 | tr '/' '-'` -## Bring a brick down -TEST kill_brick $V0 $H0 $B0/${V0}1 -EXPECT '1' echo `pgrep glusterfsd | wc -l` -## Rewrite the file -dd of=$M0/a if=/dev/urandom bs=1024k count=1 2>&1 > /dev/null -TEST $CLI volume start $V0 force -## Wait for the brick to give CHILD_UP in client protocol -EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0 -md5offile2=`md5sum $B0/${V0}2/a | awk '{print $1}'` - -##trigger self-heal -ls -l $M0/a - -EXPECT "$md5offile2" echo `md5sum $B0/${V0}1/a | awk '{print $1}'` - -## Finish up -TEST $CLI volume stop $V0; -TEST $CLI volume delete $V0; - -cleanup; diff --git a/tests/bugs/bug-867252.t b/tests/bugs/bug-867252.t deleted file mode 100644 index 17edcd9c5dc..00000000000 --- a/tests/bugs/bug-867252.t +++ /dev/null @@ -1,41 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc - -cleanup; - -TEST glusterd -TEST pidof glusterd -TEST $CLI volume info; - -TEST $CLI volume create $V0 $H0:$B0/${V0}1; - - -function volinfo_field() -{ - local vol=$1; - local field=$2; - - $CLI volume info $vol | grep "^$field: " | sed 's/.*: //'; -} - - -function brick_count() -{ - local vol=$1; - - $CLI volume info $vol | egrep "^Brick[0-9]+: " | wc -l; -} - - -EXPECT "$V0" volinfo_field $V0 'Volume Name'; -EXPECT 'Created' volinfo_field $V0 'Status'; -EXPECT '1' brick_count $V0 - -TEST $CLI volume add-brick $V0 $H0:$B0/${V0}2; -EXPECT '2' brick_count $V0 - -TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}2 force; -EXPECT '1' brick_count $V0 - -cleanup; diff --git a/tests/bugs/bug-867253.t b/tests/bugs/bug-867253.t deleted file mode 100644 index bd3f6788975..00000000000 --- a/tests/bugs/bug-867253.t +++ /dev/null @@ -1,69 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc -. $(dirname $0)/../nfs.rc - -# Skip the entire test if /proc/sys/vm/drop_caches does not exist -if [ ! -f /proc/sys/vm/drop_caches ] ; then - echo "Skip test using /proc/sys/vm/drop_caches, "\ - "which does not exists on this system" >&2 - SKIP_TESTS - exit 0 -fi - -cleanup; - -function file_count() -{ - val=1 - - if [ "$1" == "0" ] - then - if [ "$2" == "0" ] - then - val=0 - fi - fi - echo $val -} - -BRICK_COUNT=2 - -TEST glusterd -TEST pidof glusterd - -TEST $CLI volume create $V0 $H0:$B0/${V0}0 $H0:$B0/${V0}1 -TEST $CLI volume start $V0 - -EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available; -## Mount nfs, with nocache option -TEST mount_nfs $H0:/$V0 $M0 nolock,noac; - -touch $M0/files{1..1000}; - -# Kill a brick process -kill -9 `cat $GLUSTERD_WORKDIR/vols/$V0/run/$H0-d-backends-${V0}0.pid`; - -echo 3 >/proc/sys/vm/drop_caches; - -ls -l $M0 >/dev/null; - -NEW_FILE_COUNT=`echo $?`; - -TEST $CLI volume start $V0 force - -# Kill a brick process -kill -9 `cat $GLUSTERD_WORKDIR/vols/$V0/run/$H0-d-backends-${V0}1.pid`; - -echo 3 >/proc/sys/vm/drop_caches; - -ls -l $M0 >/dev/null; - -NEW_FILE_COUNT1=`echo $?`; - -EXPECT "0" file_count $NEW_FILE_COUNT $NEW_FILE_COUNT1 - -EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0 - -cleanup diff --git a/tests/bugs/bug-869724.t b/tests/bugs/bug-869724.t deleted file mode 100644 index eec5d344c10..00000000000 --- a/tests/bugs/bug-869724.t +++ /dev/null @@ -1,37 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc - -cleanup; - - -## Start and create a volume -TEST glusterd -TEST pidof glusterd - -TEST $CLI volume create $V0 $H0:$B0/${V0}1; - -## Verify volume is is created -EXPECT "$V0" volinfo_field $V0 'Volume Name'; -EXPECT 'Created' volinfo_field $V0 'Status'; - - -## Start volume and verify -TEST $CLI volume start $V0; -EXPECT 'Started' volinfo_field $V0 'Status'; - - -## Make volume tightly consistent for metdata -TEST $CLI volume set $V0 performance.stat-prefetch off; - -## Mount FUSE with caching disabled -TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id $V0 $M0; - -touch $M0/test; -build_tester $(dirname $0)/getlk_owner.c - -TEST $(dirname $0)/getlk_owner $M0/test; - -rm -f $(dirname $0)/getlk_owner -cleanup; - diff --git a/tests/bugs/bug-872923.t b/tests/bugs/bug-872923.t deleted file mode 100755 index afbf6aca222..00000000000 --- a/tests/bugs/bug-872923.t +++ /dev/null @@ -1,56 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc -. $(dirname $0)/../nfs.rc - -cleanup; - -TEST glusterd -TEST pidof glusterd -TEST $CLI volume info -TEST $CLI volume create $V0 replica 2 $H0:$B0/brick0 $H0:$B0/brick1 -TEST $CLI volume start $V0 - -EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available; -TEST mount_nfs $H0:/$V0 $N0 nolock - -cd $N0 -mkdir test_hardlink_self_heal; -cd test_hardlink_self_heal; - -for i in `seq 1 5`; -do - mkdir dir.$i; - for j in `seq 1 10`; - do - dd if=/dev/zero of=dir.$i/file.$j bs=1k count=$j > /dev/null 2>&1; - done; -done; - -cd .. -TEST kill_brick $V0 $H0 $B0/brick0 -cd test_hardlink_self_heal; - -RET=0 -for i in `seq 1 5`; -do - for j in `seq 1 10`; - do - ln dir.$i/file.$j dir.$i/link_file.$j > /dev/null 2>&1; - RET=$? - if [ $RET -ne 0 ]; then - break; - fi - done ; - if [ $RET -ne 0 ]; then - break; - fi -done; - -cd -EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $N0 - -EXPECT "0" echo $RET; - -cleanup; diff --git a/tests/bugs/bug-873367.t b/tests/bugs/bug-873367.t deleted file mode 100755 index 771c8628219..00000000000 --- a/tests/bugs/bug-873367.t +++ /dev/null @@ -1,45 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc - -SSL_BASE=/etc/ssl -SSL_KEY=$SSL_BASE/glusterfs.key -SSL_CERT=$SSL_BASE/glusterfs.pem -SSL_CA=$SSL_BASE/glusterfs.ca - -cleanup; -rm -f $SSL_BASE/glusterfs.* -mkdir -p $B0/1 -mkdir -p $M0 - -TEST openssl genrsa -out $SSL_KEY 1024 -TEST openssl req -new -x509 -key $SSL_KEY -subj /CN=Anyone -out $SSL_CERT -ln $SSL_CERT $SSL_CA - -TEST glusterd -TEST pidof glusterd -TEST $CLI volume info; - -TEST $CLI volume create $V0 $H0:$B0/1 -TEST $CLI volume set $V0 server.ssl on -TEST $CLI volume set $V0 client.ssl on -TEST $CLI volume set $V0 ssl.certificate-depth 6 -TEST $CLI volume set $V0 ssl.cipher-list HIGH -TEST $CLI volume set $V0 auth.ssl-allow Anyone -TEST $CLI volume start $V0 - -TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 $M0 -echo some_data > $M0/data_file -EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0 - -# If the bug is not fixed, the next mount will fail. - -TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 $M0 -EXPECT some_data cat $M0/data_file - -EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0 -TEST $CLI volume stop $V0 -TEST $CLI volume delete $V0 - -cleanup; diff --git a/tests/bugs/bug-873549.t b/tests/bugs/bug-873549.t deleted file mode 100644 index 5b541de6c65..00000000000 --- a/tests/bugs/bug-873549.t +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc - -cleanup; - -TEST glusterd -LDEBUG; -TEST pidof glusterd; -TEST $CLI volume info; - -TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2}; - -TEST $CLI volume set $V0 performance.cache-size 512MB -TEST $CLI volume start $V0 -TEST $CLI volume statedump $V0 all - -cleanup; diff --git a/tests/bugs/bug-873962-spb.t b/tests/bugs/bug-873962-spb.t deleted file mode 100644 index 2821952ac9a..00000000000 --- a/tests/bugs/bug-873962-spb.t +++ /dev/null @@ -1,39 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc - -cleanup; -TEST glusterd -TEST pidof glusterd - -TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1} -TEST $CLI volume set $V0 cluster.self-heal-daemon off -TEST $CLI volume set $V0 performance.quick-read off -TEST $CLI volume set $V0 performance.io-cache off -TEST $CLI volume set $V0 performance.write-behind off -TEST $CLI volume set $V0 performance.stat-prefetch off -TEST $CLI volume set $V0 performance.read-ahead off -TEST $CLI volume set $V0 cluster.background-self-heal-count 0 -TEST $CLI volume start $V0 -TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id=$V0 $M0 --direct-io-mode=enable -touch $M0/a - -exec 5<$M0/a - -kill_brick $V0 $H0 $B0/${V0}0 -echo "hi" > $M0/a -TEST $CLI volume start $V0 force -EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0 - -kill_brick $V0 $H0 $B0/${V0}1 -echo "bye" > $M0/a -TEST $CLI volume start $V0 force -EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1 - -TEST ! cat $M0/a #To mark split-brain - -TEST ! read -u 5 line -exec 5<&- - -cleanup; diff --git a/tests/bugs/bug-873962.t b/tests/bugs/bug-873962.t deleted file mode 100755 index bd3132ce7f0..00000000000 --- a/tests/bugs/bug-873962.t +++ /dev/null @@ -1,107 +0,0 @@ -#!/bin/bash - -#AFR TEST-IDENTIFIER SPLIT-BRAIN -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc - -cleanup; - -TEST glusterd -TEST pidof glusterd -TEST $CLI volume info; - -B0_hiphenated=`echo $B0 | tr '/' '-'` -TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2} - -# If we allow self-heal to happen in the background, we'll get spurious -# failures - especially at the point labeled "FAIL HERE" but -# occasionally elsewhere. This behavior is very timing-dependent. It -# doesn't show up in Jenkins, but it does on JD's and KP's machines, and -# it got sharply worse because of an unrelated fsync change (6ae6f3d) -# which changed timing. Putting anything at the FAIL HERE marker tends -# to make it go away most of the time on affected machines, even if the -# "anything" is unrelated. -# -# What's going on is that the I/O on the first mountpoint is allowed to -# complete even though self-heal is still in progress and the state on -# disk does not reflect its result. In fact, the state changes during -# self-heal create the appearance of split brain when the second I/O -# comes in, so that fails even though we haven't actually been in split -# brain since the manual xattr operations. By disallowing background -# self-heal, we ensure that the second I/O can't happen before self-heal -# is complete, because it has to follow the first I/O which now has to -# follow self-heal. -TEST $CLI volume set $V0 cluster.background-self-heal-count 0 - -#Make sure self-heal is not triggered when the bricks are re-started -TEST $CLI volume set $V0 cluster.self-heal-daemon off -TEST $CLI volume set $V0 performance.stat-prefetch off -TEST $CLI volume start $V0 -TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id=$V0 $M0 --direct-io-mode=enable -TEST touch $M0/a -TEST touch $M0/b -TEST touch $M0/c -TEST touch $M0/d -echo "1" > $M0/b -echo "1" > $M0/d -TEST kill_brick $V0 $H0 $B0/${V0}2 -echo "1" > $M0/a -echo "1" > $M0/c -TEST setfattr -n trusted.mdata -v abc $M0/b -TEST setfattr -n trusted.mdata -v abc $M0/d -TEST $CLI volume start $V0 force -EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1 -TEST kill_brick $V0 $H0 $B0/${V0}1 -echo "2" > $M0/a -echo "2" > $M0/c -TEST setfattr -n trusted.mdata -v def $M0/b -TEST setfattr -n trusted.mdata -v def $M0/d -TEST $CLI volume start $V0 force -EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0 -EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1 - -TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id=$V0 $M1 --direct-io-mode=enable - -#Files are in split-brain, so open should fail -TEST ! cat $M0/a; -TEST ! cat $M1/a; -TEST cat $M0/b; -TEST cat $M1/b; - -#Reset split-brain status -TEST setfattr -n trusted.afr.$V0-client-1 -v 0x000000000000000000000000 $B0/${V0}1/a; -TEST setfattr -n trusted.afr.$V0-client-1 -v 0x000000000000000000000000 $B0/${V0}1/b; - -#The operations should do self-heal and give correct output -EXPECT "2" cat $M0/a; -# FAIL HERE - see comment about cluster.self-heal-background-count above. -EXPECT "2" cat $M1/a; -TEST dd if=$M0/b of=/dev/null bs=1024k -EXPECT "def" getfattr -n trusted.mdata --only-values $M0/b 2>/dev/null -EXPECT "def" getfattr -n trusted.mdata --only-values $M1/b 2>/dev/null - -EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0 -EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M1 - -TEST $CLI volume set $V0 cluster.data-self-heal off -TEST $CLI volume set $V0 cluster.metadata-self-heal off - -TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id=$V0 $M0 --direct-io-mode=enable -TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id=$V0 $M1 --direct-io-mode=enable - -#Files are in split-brain, so open should fail -TEST ! cat $M0/c -TEST ! cat $M1/c -TEST cat $M0/d -TEST cat $M1/d - -TEST setfattr -n trusted.afr.$V0-client-1 -v 0x000000000000000000000000 $B0/${V0}1/c -TEST setfattr -n trusted.afr.$V0-client-1 -v 0x000000000000000000000000 $B0/${V0}1/d - -#The operations should NOT do self-heal but give correct output -EXPECT "2" cat $M0/c -EXPECT "2" cat $M1/c -EXPECT "1" cat $M0/d -EXPECT "1" cat $M1/d - -cleanup; diff --git a/tests/bugs/bug-874498.t b/tests/bugs/bug-874498.t deleted file mode 100644 index 843698d2a61..00000000000 --- a/tests/bugs/bug-874498.t +++ /dev/null @@ -1,64 +0,0 @@ -#!/bin/bash -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc -. $(dirname $0)/../afr.rc - -cleanup; - -TEST glusterd -TEST pidof glusterd -TEST $CLI volume info; -TEST $CLI volume create $V0 replica 2 $H0:$B0/brick1 $H0:$B0/brick2; -TEST $CLI volume start $V0; - - -TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 $M0; -B0_hiphenated=`echo $B0 | tr '/' '-'` -kill -9 `cat $GLUSTERD_WORKDIR/vols/$V0/run/$H0$B0_hiphenated-brick1.pid` ; - -echo "GLUSTER FILE SYSTEM" > $M0/FILE1 -echo "GLUSTER FILE SYSTEM" > $M0/FILE2 - -FILEN=$B0"/brick2" -XATTROP=$FILEN/.glusterfs/indices/xattrop - -function get_gfid() -{ -path_of_file=$1 - -gfid_value=`getfattr -d -m . $path_of_file -e hex 2>/dev/null | grep trusted.gfid | cut --complement -c -15 | sed 's/\([a-f0-9]\{8\}\)\([a-f0-9]\{4\}\)\([a-f0-9]\{4\}\)\([a-f0-9]\{4\}\)/\1-\2-\3-\4-/'` - -echo $gfid_value -} - -GFID_ROOT=`get_gfid $B0/brick2` -GFID_FILE1=`get_gfid $B0/brick2/FILE1` -GFID_FILE2=`get_gfid $B0/brick2/FILE2` - - -count=0 -for i in `ls $XATTROP` -do - if [ "$i" == "$GFID_ROOT" ] || [ "$i" == "$GFID_FILE1" ] || [ "$i" == "$GFID_FILE2" ] - then - count=$(( count + 1 )) - fi -done - -EXPECT "3" echo $count - - -TEST $CLI volume start $V0 force -EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status -EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0 -EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1 -TEST $CLI volume heal $V0 - - -##Expected number of entries are 0 in the .glusterfs/indices/xattrop directory -EXPECT_WITHIN $HEAL_TIMEOUT '1' count_sh_entries $FILEN; - -TEST $CLI volume stop $V0; -TEST $CLI volume delete $V0; - -cleanup; diff --git a/tests/bugs/bug-877293.t b/tests/bugs/bug-877293.t deleted file mode 100755 index d9bd8a33b12..00000000000 --- a/tests/bugs/bug-877293.t +++ /dev/null @@ -1,41 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc - -TEST glusterd -TEST pidof glusterd - -## Start and create a replicated volume -mkdir -p ${B0}/${V0}-0 -mkdir -p ${B0}/${V0}-1 -TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}-{0,1} - -TEST $CLI volume set $V0 indexing on - -TEST $CLI volume start $V0; - -## Mount native -TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 $M0 - -## Mount client-pid=-1 -TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 --client-pid=-1 $M1 - -TEST touch $M0 - -vol_uuid=`getfattr -n trusted.glusterfs.volume-mark -ehex $M1 | sed -n 's/^trusted.glusterfs.volume-mark=0x//p' | cut -b5-36 | sed 's/\([a-f0-9]\{8\}\)\([a-f0-9]\{4\}\)\([a-f0-9]\{4\}\)\([a-f0-9]\{4\}\)/\1-\2-\3-\4-/'` -xtime=trusted.glusterfs.$vol_uuid.xtime - -TEST "getfattr -n $xtime $M1 | grep -q ${xtime}=" - -TEST kill_brick $V0 $H0 $B0/${V0}-0 - -TEST "getfattr -n $xtime $M1 | grep -q ${xtime}=" - -EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0 -EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M1 - -TEST $CLI volume stop $V0; -TEST $CLI volume delete $V0; - -cleanup diff --git a/tests/bugs/bug-877885.t b/tests/bugs/bug-877885.t deleted file mode 100755 index 01b645e7d4c..00000000000 --- a/tests/bugs/bug-877885.t +++ /dev/null @@ -1,36 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../nfs.rc -. $(dirname $0)/../volume.rc - -cleanup; - -TEST glusterd -TEST pidof glusterd -TEST $CLI volume create $V0 replica 2 $H0:$B0/brick0 $H0:$B0/brick1 -TEST $CLI volume start $V0 - -## Mount FUSE with caching disabled -TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id $V0 \ -$M0; - -TEST touch $M0/file -TEST mkdir $M0/dir - -EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available; -TEST mount_nfs $H0:/$V0 $N0 nolock -cd $N0 - -rm -rf * & - -TEST mount_nfs $H0:/$V0 $N1 retry=0,nolock; - -cd; - -kill %1; - -EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $N0 -EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $N1 - -cleanup diff --git a/tests/bugs/bug-877992.t b/tests/bugs/bug-877992.t deleted file mode 100755 index 979cbfd3fdc..00000000000 --- a/tests/bugs/bug-877992.t +++ /dev/null @@ -1,61 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc - -cleanup; - - -## Start and create a volume -TEST glusterd -LDEBUG -TEST pidof glusterd - - -function volinfo_field() -{ - local vol=$1; - local field=$2; - - $CLI volume info $vol | grep "^$field: " | sed 's/.*: //'; -} - - -function hooks_prep () -{ - local event=$1 - touch /tmp/pre.out /tmp/post.out - touch $GLUSTERD_WORKDIR/hooks/1/"$event"/pre/Spre.sh - touch $GLUSTERD_WORKDIR/hooks/1/"$event"/post/Spost.sh - - printf "#! /bin/bash\necho "$event"Pre > /tmp/pre.out\n" > $GLUSTERD_WORKDIR/hooks/1/"$event"/pre/Spre.sh - printf "#! /bin/bash\necho "$event"Post > /tmp/post.out\n" > $GLUSTERD_WORKDIR/hooks/1/"$event"/post/Spost.sh - chmod a+x $GLUSTERD_WORKDIR/hooks/1/"$event"/pre/Spre.sh - chmod a+x $GLUSTERD_WORKDIR/hooks/1/"$event"/post/Spost.sh -} - -function hooks_cleanup () -{ - local event=$1 - rm /tmp/pre.out /tmp/post.out - rm $GLUSTERD_WORKDIR/hooks/1/"$event"/pre/Spre.sh - rm $GLUSTERD_WORKDIR/hooks/1/"$event"/post/Spost.sh -} - -## Verify volume is created and its hooks script ran -hooks_prep 'create' -TEST $CLI volume create $V0 $H0:$B0/${V0}1; -EXPECT "$V0" volinfo_field $V0 'Volume Name'; -EXPECT 'Created' volinfo_field $V0 'Status'; -EXPECT 'createPre' cat /tmp/pre.out; -EXPECT 'createPost' cat /tmp/post.out; -hooks_cleanup 'create' - - -## Start volume and verify that its hooks script ran -hooks_prep 'start' -TEST $CLI volume start $V0; -EXPECT 'Started' volinfo_field $V0 'Status'; -EXPECT 'startPre' cat /tmp/pre.out; -EXPECT 'startPost' cat /tmp/post.out; -hooks_cleanup 'start' - -cleanup; diff --git a/tests/bugs/bug-878004.t b/tests/bugs/bug-878004.t deleted file mode 100644 index 407fd6eccec..00000000000 --- a/tests/bugs/bug-878004.t +++ /dev/null @@ -1,29 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc - -cleanup; - -TEST glusterd -TEST pidof glusterd -TEST $CLI volume info; - -TEST $CLI volume create $V0 $H0:$B0/${V0}1 $H0:$B0/${V0}2 $H0:$B0/${V0}3; - -function brick_count() -{ - local vol=$1; - - $CLI volume info $vol | egrep "^Brick[0-9]+: " | wc -l; -} - - -TEST $CLI volume start $V0 -TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}2 force; -EXPECT '2' brick_count $V0 - -TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}3 force; -EXPECT '1' brick_count $V0 - -cleanup; - diff --git a/tests/bugs/bug-879490.t b/tests/bugs/bug-879490.t deleted file mode 100755 index 5b9ae7bb9b0..00000000000 --- a/tests/bugs/bug-879490.t +++ /dev/null @@ -1,37 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc - -cleanup; - -## Start and create a volume -TEST glusterd; -TEST pidof glusterd; -TEST $CLI volume info; - -TEST $CLI volume create $V0 replica 2 stripe 2 $H0:$B0/${V0}{1,2,3,4,5,6,7,8}; - -## Verify volume is is created -EXPECT "$V0" volinfo_field $V0 'Volume Name'; -EXPECT 'Created' volinfo_field $V0 'Status'; - -## Start volume and verify -TEST $CLI volume start $V0; -EXPECT 'Started' volinfo_field $V0 'Status'; - -function peer_probe() -{ - $CLI peer probe a.b.c.d --xml | xmllint --format - | grep "" -} - -EXPECT " Probe returned with unknown errno 107" peer_probe - -## Finish up -TEST $CLI volume stop $V0; -EXPECT 'Stopped' volinfo_field $V0 'Status'; - -TEST $CLI volume delete $V0; -TEST ! $CLI volume info $V0; - -cleanup; diff --git a/tests/bugs/bug-879494.t b/tests/bugs/bug-879494.t deleted file mode 100755 index 5caca7922b1..00000000000 --- a/tests/bugs/bug-879494.t +++ /dev/null @@ -1,37 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc - -cleanup; - -## Start and create a volume -TEST glusterd; -TEST pidof glusterd; -TEST $CLI volume info; - -TEST $CLI volume create $V0 replica 2 stripe 2 $H0:$B0/${V0}{1,2,3,4,5,6,7,8}; - -## Verify volume is is created -EXPECT "$V0" volinfo_field $V0 'Volume Name'; -EXPECT 'Created' volinfo_field $V0 'Status'; - -## Start volume and verify -TEST $CLI volume start $V0; -EXPECT 'Started' volinfo_field $V0 'Status'; - -function peer_probe() -{ - $CLI peer detach a.b.c.d --xml | xmllint --format - | grep "" -} - -EXPECT " a.b.c.d is not part of cluster" peer_probe - -## Finish up -TEST $CLI volume stop $V0; -EXPECT 'Stopped' volinfo_field $V0 'Status'; - -TEST $CLI volume delete $V0; -TEST ! $CLI volume info $V0; - -cleanup; diff --git a/tests/bugs/bug-880898.t b/tests/bugs/bug-880898.t deleted file mode 100644 index 4b9fb50a522..00000000000 --- a/tests/bugs/bug-880898.t +++ /dev/null @@ -1,23 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc - -cleanup; - -TEST glusterd -TEST $CLI volume create $V0 replica 2 $H0:$B0/brick1 $H0:$B0/brick2 -TEST $CLI volume start $V0 -pkill glusterfs -uuid="" -for line in $(cat $GLUSTERD_WORKDIR/glusterd.info) -do - if [[ $line == UUID* ]] - then - uuid=`echo $line | sed -r 's/^.{5}//'` - fi -done - -#Command execution should fail reporting that the bricks are not running. -TEST ! $CLI volume heal $V0 info - -cleanup; diff --git a/tests/bugs/bug-882278.t b/tests/bugs/bug-882278.t deleted file mode 100755 index 1a31cd9fa20..00000000000 --- a/tests/bugs/bug-882278.t +++ /dev/null @@ -1,73 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc -cleanup - -# Is there a good reason to require --fqdn elsewhere? It's worse than useless -# here. -H0=$(hostname -s) - -function recreate { - # The rm is necessary so we don't get fooled by leftovers from old runs. - rm -rf $1 && mkdir -p $1 -} - -function count_lines { - grep "$1" $2/* | wc -l -} - -TEST glusterd -TEST pidof glusterd -TEST $CLI volume info; - -## Start and create a volume -TEST recreate ${B0}/${V0}-0 -TEST recreate ${B0}/${V0}-1 -TEST $CLI volume create $V0 $H0:$B0/${V0}-{0,1} -TEST $CLI volume set $V0 cluster.nufa on - -function volinfo_field() -{ - local vol=$1; - local field=$2; - - $CLI volume info $vol | grep "^$field: " | sed 's/.*: //'; -} - - -## Verify volume is created -EXPECT "$V0" volinfo_field $V0 'Volume Name'; -EXPECT 'Created' volinfo_field $V0 'Status'; - -## Start volume and verify -TEST $CLI volume start $V0; -EXPECT 'Started' volinfo_field $V0 'Status'; - -## Mount native -special_option="--xlator-option ${V0}-dht.local-volume-name=${V0}-client-1" -TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 $special_option $M0 - -## Create a bunch of test files. -for i in $(seq 0 99); do - echo hello > $(printf $M0/file%02d $i) -done - -## Make sure the files went to the right place. There might be link files in -## the other brick, but they won't have any contents. -EXPECT "0" count_lines hello ${B0}/${V0}-0 -EXPECT "100" count_lines hello ${B0}/${V0}-1 - -if [ "$EXIT_EARLY" = "1" ]; then - exit 0; -fi - -## Finish up -EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0 -TEST $CLI volume stop $V0; -EXPECT 'Stopped' volinfo_field $V0 'Status'; - -TEST $CLI volume delete $V0; -TEST ! $CLI volume info $V0; - -cleanup; diff --git a/tests/bugs/bug-884328.t b/tests/bugs/bug-884328.t deleted file mode 100644 index ee5509bbc1d..00000000000 --- a/tests/bugs/bug-884328.t +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc - -cleanup; -TEST glusterd -TEST pidof glusterd - -TEST check_option_help_presence "cluster.quorum-type" -TEST check_option_help_presence "cluster.quorum-count" -cleanup; diff --git a/tests/bugs/bug-884452.t b/tests/bugs/bug-884452.t deleted file mode 100644 index aa2921289c2..00000000000 --- a/tests/bugs/bug-884452.t +++ /dev/null @@ -1,47 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc - -cleanup; - -TEST glusterd -TEST pidof glusterd - -TEST $CLI volume create $V0 $H0:$B0/$V0 -TEST $CLI volume start $V0 - -TEST glusterfs -s $H0 --volfile-id $V0 $M0 -TEST touch $M0/{1..10000} - -RUN_LS_LOOP_FILE="$M0/run-ls-loop" -function ls-loop -{ - while [ -f $RUN_LS_LOOP_FILE ]; do - ls -lR $M0 1>/dev/null 2>&1 - done; -} - -touch $RUN_LS_LOOP_FILE -ls-loop & - -function vol-status-loop -{ - for i in {1..1000}; do - $CLI volume status $V0 clients >/dev/null 2>&1 - if [ $? -ne 0 ]; then - return 1 - fi - done; - - return 0 -} - -TEST vol-status-loop - -rm -f $RUN_LS_LOOP_FILE -wait - -EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0 - -cleanup; diff --git a/tests/bugs/bug-884455.t b/tests/bugs/bug-884455.t deleted file mode 100755 index e63af4334ae..00000000000 --- a/tests/bugs/bug-884455.t +++ /dev/null @@ -1,84 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../dht.rc - -cleanup; - -function layout_compare() -{ - res=0 - - if [ "$1" == "$2" ] - then - res=1 - fi - if [ "$1" == "$3" ] - then - res=1 - fi - if [ "$2" == "$3" ] - then - res=1 - fi - - echo $res -} - -function get_layout() -{ - layout1=`getfattr -n trusted.glusterfs.dht -e hex $1 2>&1|grep dht |cut -d = -f2` - layout2=`getfattr -n trusted.glusterfs.dht -e hex $2 2>&1|grep dht |cut -d = -f2` - layout3=`getfattr -n trusted.glusterfs.dht -e hex $3 2>&1|grep dht |cut -d = -f2` - - ret=$(layout_compare $layout1 $layout2 $layout3) - - if [ $ret -ne 0 ] - then - echo 1 - else - echo 0 - fi - -} - -BRICK_COUNT=3 - -TEST glusterd -TEST pidof glusterd - -TEST $CLI volume create $V0 $H0:$B0/${V0}0 $H0:$B0/${V0}1 -## set subvols-per-dir option -TEST $CLI volume set $V0 subvols-per-directory 2 -TEST $CLI volume start $V0 - -## Mount FUSE -TEST glusterfs -s $H0 --volfile-id $V0 $M0; - -TEST mkdir $M0/dir{1..10} 2>/dev/null; - -## Add-brick n run rebalance to force re-write of layout -TEST $CLI volume add-brick $V0 $H0:$B0/${V0}2 -sleep 5; - -## trigger dir self heal on client -TEST ls -l $M0 2>/dev/null; - -TEST $CLI volume rebalance $V0 start force - -EXPECT_WITHIN $REBALANCE_TIMEOUT "0" rebalance_completed - -## check for layout overlaps. -EXPECT "0" get_layout $B0/${V0}0 $B0/${V0}1 $B0/${V0}2 -EXPECT "0" get_layout $B0/${V0}0/dir1 $B0/${V0}1/dir1 $B0/${V0}2/dir1 -EXPECT "0" get_layout $B0/${V0}0/dir2 $B0/${V0}1/dir2 $B0/${V0}2/dir2 -EXPECT "0" get_layout $B0/${V0}0/dir3 $B0/${V0}1/dir3 $B0/${V0}2/dir3 -EXPECT "0" get_layout $B0/${V0}0/dir4 $B0/${V0}1/dir4 $B0/${V0}2/dir4 -EXPECT "0" get_layout $B0/${V0}0/dir5 $B0/${V0}1/dir5 $B0/${V0}2/dir5 -EXPECT "0" get_layout $B0/${V0}0/dir6 $B0/${V0}1/dir6 $B0/${V0}2/dir6 -EXPECT "0" get_layout $B0/${V0}0/dir7 $B0/${V0}1/dir7 $B0/${V0}2/dir7 -EXPECT "0" get_layout $B0/${V0}0/dir8 $B0/${V0}1/dir8 $B0/${V0}2/dir8 -EXPECT "0" get_layout $B0/${V0}0/dir9 $B0/${V0}1/dir9 $B0/${V0}2/dir9 -EXPECT "0" get_layout $B0/${V0}0/dir10 $B0/${V0}1/dir10 $B0/${V0}2/dir10 - -cleanup; diff --git a/tests/bugs/bug-884597.t b/tests/bugs/bug-884597.t deleted file mode 100755 index 0daa025df34..00000000000 --- a/tests/bugs/bug-884597.t +++ /dev/null @@ -1,173 +0,0 @@ -#!/bin/bash -. $(dirname $0)/../include.rc -. $(dirname $0)/../dht.rc -. $(dirname $0)/../volume.rc - -cleanup; -BRICK_COUNT=3 -function uid_gid_compare() -{ - val=1 - - if [ "$1" == "$3" ] - then - if [ "$2" == "$4" ] - then - val=0 - fi - fi - echo "$val" -} - - -TEST glusterd -TEST pidof glusterd - -TEST $CLI volume create $V0 $H0:$B0/${V0}0 $H0:$B0/${V0}1 $H0:$B0/${V0}2 -TEST $CLI volume start $V0 - -## Mount FUSE -TEST glusterfs --attribute-timeout=0 --entry-timeout=0 -s $H0 --volfile-id $V0 $M0; - -i=1 -NEW_UID=36 -NEW_GID=36 - -TEST touch $M0/$i - -chown $NEW_UID:$NEW_GID $M0/$i -## rename till file gets a linkfile - -has_link=0 -while [ $i -lt 100 ] -do - mv $M0/$i $M0/$(( $i+1 )) - if [ $? -ne 0 ] - then - break - fi - let i++ - file_has_linkfile $i - has_link=$? - if [ $has_link -eq 2 ] - then - break; - fi -done - -TEST [ $has_link -eq 2 ] - -get_hashed_brick $i -cached=$? - -# check if uid/gid on linkfile is created with correct uid/gid -BACKEND_UID=`stat -c %u $B0/${V0}$cached/$i`; -BACKEND_GID=`stat -c %g $B0/${V0}$cached/$i`; - -EXPECT "0" uid_gid_compare $NEW_UID $NEW_GID $BACKEND_UID $BACKEND_GID - -# remove linkfile from backend, and trigger a lookup heal. uid/gid should match -rm -rf $B0/${V0}$cached/$i - -# without a unmount, we are not able to trigger a lookup based heal - -EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0 - -## Mount FUSE -TEST glusterfs --attribute-timeout=0 --entry-timeout=0 -s $H0 --volfile-id $V0 $M0; - -lookup=`ls -l $M0/$i 2>/dev/null` - -# check if uid/gid on linkfile is created with correct uid/gid -BACKEND_UID=`stat -c %u $B0/${V0}$cached/$i`; -BACKEND_GID=`stat -c %g $B0/${V0}$cached/$i`; - -EXPECT "0" uid_gid_compare $NEW_UID $NEW_GID $BACKEND_UID $BACKEND_GID -# create hardlinks. Make sure a linkfile gets created - -i=1 -NEW_UID=36 -NEW_GID=36 - -TEST touch $M0/file -chown $NEW_UID:$NEW_GID $M0/file; - -## ln till file gets a linkfile - -has_link=0 -while [ $i -lt 100 ] -do - ln $M0/file $M0/link$i - if [ $? -ne 0 ] - then - break - fi - file_has_linkfile link$i - has_link=$? - if [ $has_link -eq 2 ] - then - break; - fi - let i++ -done - -TEST [ $has_link -eq 2 ] - -get_hashed_brick link$i -cached=$? - -# check if uid/gid on linkfile is created with correct uid/gid -BACKEND_UID=`stat -c %u $B0/${V0}$cached/link$i`; -BACKEND_GID=`stat -c %g $B0/${V0}$cached/link$i`; - -EXPECT "0" uid_gid_compare $NEW_UID $NEW_GID $BACKEND_UID $BACKEND_GID - -## UID/GID creation as different user -i=1 -NEW_UID=36 -NEW_GID=36 - -TEST touch $M0/user_file1 -TEST chown $NEW_UID:$NEW_GID $M0/user_file1; - -## Give permission on volume, so that different users can perform rename - -TEST chmod 0777 $M0 - -## Add a user known as ABC and perform renames -TEST `useradd -M ABC 2>/dev/null` - -TEST cd $M0 -## rename as different user till file gets a linkfile - -has_link=0 -while [ $i -lt 100 ] -do - su -m ABC -c "mv $M0/user_file$i $M0/user_file$(( $i+1 ))" - if [ $? -ne 0 ] - then - break - fi - let i++ - file_has_linkfile user_file$i - has_link=$? - if [ $has_link -eq 2 ] - then - break; - fi -done - -TEST [ $has_link -eq 2 ] - -## del user ABC -TEST userdel ABC - -get_hashed_brick user_file$i -cached=$? - -# check if uid/gid on linkfile is created with correct uid/gid -BACKEND_UID=`stat -c %u $B0/${V0}$cached/user_file$i`; -BACKEND_GID=`stat -c %g $B0/${V0}$cached/user_file$i`; - -EXPECT "0" uid_gid_compare $NEW_UID $NEW_GID $BACKEND_UID $BACKEND_GID -cleanup; diff --git a/tests/bugs/bug-886998.t b/tests/bugs/bug-886998.t deleted file mode 100644 index 7a905a11391..00000000000 --- a/tests/bugs/bug-886998.t +++ /dev/null @@ -1,52 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc - -cleanup; - -# This tests that the replicate trash directory(.landfill) has following -# properties. -# Note: This is to have backward compatibility with 3.3 glusterfs -# In the latest releases this dir is present inside .glusterfs of brick. -# 1) lookup of trash dir fails -# 2) readdir does not show this directory -# 3) Self-heal does not do any self-heal of these directories. -gfid1="0xc2e75dde97f346e7842d1076a8e699f8" -TEST glusterd -TEST pidof glusterd -TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}0 $H0:$B0/${V0}1 -TEST $CLI volume start $V0 -TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0 --direct-io-mode=enable - -TEST mkdir $B0/${V0}1/.landfill -TEST setfattr -n trusted.gfid -v $gfid1 $B0/${V0}1/.landfill -TEST mkdir $B0/${V0}0/.landfill -TEST setfattr -n trusted.gfid -v $gfid1 $B0/${V0}0/.landfill - -TEST ! stat $M0/.landfill -EXPECT "" echo $(ls -a $M0 | grep ".landfill") - -TEST rmdir $B0/${V0}0/.landfill -#Force a conservative merge and it should not create .landfill -TEST setfattr -n trusted.afr.$V0-client-0 -v 0x000000000000000000000000 $B0/${V0}0/ -TEST setfattr -n trusted.afr.$V0-client-1 -v 0x000000000000000000000001 $B0/${V0}0/ - -TEST setfattr -n trusted.afr.$V0-client-0 -v 0x000000000000000000000001 $B0/${V0}1/ -TEST setfattr -n trusted.afr.$V0-client-1 -v 0x000000000000000000000000 $B0/${V0}1/ - -EXPECT "" echo $(ls -a $M0 | grep ".landfill") -TEST ! stat $B0/${V0}0/.landfill -TEST stat $B0/${V0}1/.landfill - -#TEST that the dir is not deleted even when xattrs suggest to delete -TEST setfattr -n trusted.afr.$V0-client-0 -v 0x000000000000000000000000 $B0/${V0}0/ -TEST setfattr -n trusted.afr.$V0-client-1 -v 0x000000000000000000000001 $B0/${V0}0/ - -TEST setfattr -n trusted.afr.$V0-client-0 -v 0x000000000000000000000000 $B0/${V0}1/ -TEST setfattr -n trusted.afr.$V0-client-1 -v 0x000000000000000000000000 $B0/${V0}1/ - -EXPECT "" echo $(ls -a $M0 | grep ".landfill") -TEST ! stat $B0/${V0}0/.landfill -TEST stat $B0/${V0}1/.landfill -cleanup; diff --git a/tests/bugs/bug-887098-gmount-crash.t b/tests/bugs/bug-887098-gmount-crash.t deleted file mode 100644 index 5a6a9fdb931..00000000000 --- a/tests/bugs/bug-887098-gmount-crash.t +++ /dev/null @@ -1,42 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc - -cleanup; - -## Start and create a volume -TEST glusterd; -TEST pidof glusterd; -TEST $CLI volume info; - -TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2,3,4}; - -## Verify volume is is created -EXPECT "$V0" volinfo_field $V0 'Volume Name'; -EXPECT 'Created' volinfo_field $V0 'Status'; - -## Start volume and verify -TEST $CLI volume start $V0; -EXPECT 'Started' volinfo_field $V0 'Status'; - -TEST glusterfs -s $H0 --volfile-id=$V0 --acl $M0 -MOUNT_PID=$(get_mount_process_pid $V0) - -for i in {1..25}; -do - mkdir $M0/tmp_$i && cat /etc/hosts > $M0/tmp_$i/file - cp -RPp $M0/tmp_$i $M0/newtmp_$i && cat /etc/hosts > $M0/newtmp_$i/newfile -done - -EXPECT "$MOUNT_PID" get_mount_process_pid $V0 -TEST rm -rf $M0/* -EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0 - -## Finish up -TEST $CLI volume stop $V0; -EXPECT 'Stopped' volinfo_field $V0 'Status'; - -TEST $CLI volume delete $V0; - -cleanup; diff --git a/tests/bugs/bug-887145.t b/tests/bugs/bug-887145.t deleted file mode 100755 index 554e6ce95d6..00000000000 --- a/tests/bugs/bug-887145.t +++ /dev/null @@ -1,88 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../nfs.rc - -cleanup; - -TEST glusterd -TEST pidof glusterd -TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2}; -TEST $CLI volume set $V0 performance.open-behind off; -TEST $CLI volume start $V0 - -## Mount FUSE with caching disabled -TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id $V0 $M0; - -EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available; - - -useradd tmp_user 2>/dev/null 1>/dev/null; -mkdir $M0/dir; -mkdir $M0/other; -cp /etc/passwd $M0/; -cp $M0/passwd $M0/file; -chmod 600 $M0/file; - -TEST mount_nfs $H0:/$V0 $N0 nolock; - -chown -R nfsnobody:nfsnobody $M0/dir; -chown -R tmp_user:tmp_user $M0/other; - -TEST $CLI volume set $V0 server.root-squash on; - -EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available; - -# create files and directories in the root of the glusterfs and nfs mount -# which is owned by root and hence the right behavior is getting EACCESS -# as the fops are executed as nfsnobody. -touch $M0/foo 2>/dev/null; -TEST [ $? -ne 0 ] -touch $N0/foo 2>/dev/null; -TEST [ $? -ne 0 ] -mkdir $M0/new 2>/dev/null; -TEST [ $? -ne 0 ] -mkdir $N0/new 2>/dev/null; -TEST [ $? -ne 0 ] -cp $M0/file $M0/tmp_file 2>/dev/null; -TEST [ $? -ne 0 ] -cp $N0/file $N0/tmp_file 2>/dev/null; -TEST [ $? -ne 0 ] -cat $M0/file 2>/dev/null; -TEST [ $? -ne 0 ] -# here read should be allowed because eventhough file "passwd" is owned -# by root, the permissions if the file allow other users to read it. -cat $M0/passwd 1>/dev/null; -TEST [ $? -eq 0 ] -cat $N0/passwd 1>/dev/null; -TEST [ $? -eq 0 ] - -# create files and directories should succeed as the fops are being executed -# inside the directory owned by nfsnobody -TEST touch $M0/dir/file; -TEST touch $N0/dir/foo; -TEST mkdir $M0/dir/new; -TEST mkdir $N0/dir/other; -TEST rm -f $M0/dir/file $M0/dir/foo; -TEST rmdir $N0/dir/*; - -# create files and directories here should fail as other directory is owned -# by tmp_user. -touch $M0/other/foo 2>/dev/null; -TEST [ $? -ne 0 ] -touch $N0/other/foo 2>/dev/null; -TEST [ $? -ne 0 ] -mkdir $M0/other/new 2>/dev/null; -TEST [ $? -ne 0 ] -mkdir $N0/other/new 2>/dev/null; -TEST [ $? -ne 0 ] - -userdel tmp_user; -rm -rf /home/tmp_user; - -EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" umount_nfs $N0 - -TEST $CLI volume stop $V0; -TEST $CLI volume delete $V0; - -cleanup; diff --git a/tests/bugs/bug-888174.t b/tests/bugs/bug-888174.t deleted file mode 100644 index d45124d0dee..00000000000 --- a/tests/bugs/bug-888174.t +++ /dev/null @@ -1,62 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc - -#This tests if flush, fsync wakes up the delayed post-op or not. -#If it is not woken up, INODELK from the next command waits -#for post-op-delay secs. There would be pending changelog even after the command -#completes. - -cleanup; -TEST glusterd -TEST pidof glusterd - -TEST $CLI volume create $V0 replica 2 $H0:$B0/r2_0 $H0:$B0/r2_1 - -TEST $CLI volume set $V0 cluster.eager-lock on - -TEST $CLI volume set $V0 performance.flush-behind off -EXPECT "off" volume_option $V0 performance.flush-behind - -TEST $CLI volume set $V0 cluster.post-op-delay-secs 3 -EXPECT "3" volume_option $V0 cluster.post-op-delay-secs - -TEST $CLI volume start $V0 -TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id=$V0 $M0 - -#Check that INODELK MAX latency is not in the order of seconds -TEST gluster volume profile $V0 start -for i in {1..5} -do - echo hi > $M0/a -done -#Test if the MAX INODELK fop latency is of the order of seconds. -inodelk_max_latency=$($CLI volume profile $V0 info | grep INODELK | awk 'BEGIN {max = 0} {if ($6 > max) max=$6;} END {print max}' | cut -d. -f 1 | egrep "[0-9]{7,}") - -TEST [ -z $inodelk_max_latency ] - -TEST dd of=$M0/a if=/dev/urandom bs=1024k count=10 conv=fsync -#Check for no trace of pending changelog. Flush should make sure of it. -EXPECT "0x000000000000000000000000" afr_get_changelog_xattr $B0/r2_0/a trusted.afr.dirty -EXPECT "0x000000000000000000000000" afr_get_changelog_xattr $B0/r2_1/a trusted.afr.dirty - - -dd of=$M0/a if=/dev/urandom bs=1024k count=1024 2>/dev/null & -p=$! -#trigger graph switches, tests for fsync not leaving any pending flags -TEST $CLI volume set $V0 performance.quick-read off -TEST $CLI volume set $V0 performance.io-cache off -TEST $CLI volume set $V0 performance.stat-prefetch off -TEST $CLI volume set $V0 performance.read-ahead off - -kill -TERM $p -#wait for dd to exit -wait > /dev/null 2>&1 - -#Goal is to check if there is permanent FOOL changelog -sleep 5 -EXPECT "0x000000000000000000000000" afr_get_changelog_xattr $B0/r2_0/a trusted.afr.dirty -EXPECT "0x000000000000000000000000" afr_get_changelog_xattr $B0/r2_1/a trusted.afr.dirty - -cleanup; diff --git a/tests/bugs/bug-888752.t b/tests/bugs/bug-888752.t deleted file mode 100644 index b82c0ddb33a..00000000000 --- a/tests/bugs/bug-888752.t +++ /dev/null @@ -1,24 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../cluster.rc - -# Check if xml output is generated correctly for volume status for a single brick -# present on another peer and no async tasks are running. - -function get_peer_count { - $CLI_1 peer status | grep 'Peer in Cluster (Connected)' | wc -l -} -cleanup - -TEST launch_cluster 2; -TEST $CLI_1 peer probe $H2; -EXPECT_WITHIN $PROBE_TIMEOUT 1 get_peer_count -TEST $CLI_1 volume create $V0 $H1:$B1/$V0 $H2:$B2/$V0 -TEST $CLI_1 volume start $V0 - -TEST $CLI_1 volume status $V0 $H2:$B2/$V0 --xml - -TEST $CLI_1 volume stop $V0 - -cleanup diff --git a/tests/bugs/bug-889630.t b/tests/bugs/bug-889630.t deleted file mode 100755 index d2fcc10a4d4..00000000000 --- a/tests/bugs/bug-889630.t +++ /dev/null @@ -1,56 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../cluster.rc - -function check_peers { - $CLI_1 peer status | grep 'Peer in Cluster (Connected)' | wc -l -} - -function volume_count { - local cli=$1; - if [ $cli -eq '1' ] ; then - $CLI_1 volume info | grep 'Volume Name' | wc -l; - else - $CLI_2 volume info | grep 'Volume Name' | wc -l; - fi -} - -cleanup; - -TEST launch_cluster 2; -TEST $CLI_1 peer probe $H2; - -EXPECT_WITHIN $PROBE_TIMEOUT 1 check_peers - -TEST $CLI_1 volume create $V0 $H1:$B1/$V0 $H2:$B2/$V0 -TEST $CLI_1 volume start $V0 - -b="B1"; - -#Create an extra file in the originator's volume store -touch ${!b}/glusterd/vols/$V0/run/file - -TEST $CLI_1 volume stop $V0 -#Test for self-commit failure -TEST $CLI_1 volume delete $V0 - -#Check whether delete succeeded on both the nodes -EXPECT "0" volume_count '1' -EXPECT "0" volume_count '2' - -#Check whether the volume name can be reused after deletion -TEST $CLI_1 volume create $V0 $H1:$B1/${V0}1 $H2:$B2/${V0}1 -TEST $CLI_1 volume start $V0 - -#Create an extra file in the peer's volume store -touch ${!b}/glusterd/vols/$V0/run/file - -TEST $CLI_1 volume stop $V0 -#Test for commit failure on the other node -TEST $CLI_2 volume delete $V0 - -EXPECT "0" volume_count '1'; -EXPECT "0" volume_count '2'; - -cleanup; diff --git a/tests/bugs/bug-889996.t b/tests/bugs/bug-889996.t deleted file mode 100644 index 6b07d8918d0..00000000000 --- a/tests/bugs/bug-889996.t +++ /dev/null @@ -1,19 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc - -cleanup; - -TEST glusterd -TEST pidof glusterd -TEST $CLI volume info; - -TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2}; - -rm -rf $B0/${V0}1; - -TEST ! $CLI volume start $V0; -EXPECT 0 online_brick_count; - -cleanup; diff --git a/tests/bugs/bug-892730.t b/tests/bugs/bug-892730.t deleted file mode 100755 index 2a497d0095e..00000000000 --- a/tests/bugs/bug-892730.t +++ /dev/null @@ -1,77 +0,0 @@ -#!/bin/bash -# -# Bug 892730 - Verify that afr handles EIO errors from the brick properly. -# -# The associated bug describes a problem where EIO errors returned from the -# local filesystem of a brick that is part of a replica volume are exposed to -# the user. This test simulates such failures and verifies that the volume -# operates as expected. -# -######## - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc - -cleanup; - -TEST mkdir -p $B0/test{1,2} - -# The graph is a two brick replica with error-gen enabled on the second brick -# and configured to return EIO lookup errors 100% of the time. This simulates -# a brick with a crashed or shut down local filesystem. Note that the order in -# which errors occur is a factor in reproducing the original bug (error-gen -# must be enabled in the second brick for this test to be effective). - -cat > $B0/test.vol </dev/null` - if [ $? -eq 0 ] - then - let j++ - let "BRICK${j}=$i" - - fi - let i++ - done - return $j -} - -function get_cached_brick() -{ - i=1 - while [ $i -lt 3 ] - do - test=`getfattr -n trusted.glusterfs.dht.linkto -e text $B0/${V0}$BRICK$i 2>&1` - if [ $? -eq 1 ] - then - cached=$BRICK"$i" - i=$(( $i+3 )) - fi - let i++ - done - - return $cached -} - -TEST glusterd -TEST pidof glusterd - -TEST $CLI volume create $V0 $H0:$B0/${V0}0 $H0:$B0/${V0}1 $H0:$B0/${V0}2 -TEST $CLI volume start $V0 - -## Mount FUSE -TEST glusterfs --attribute-timeout=0 --entry-timeout=0 -s $H0 --volfile-id $V0 $M0; - -## create a linkfile on subvolume 0 -TEST touch $M0/1 -TEST mv $M0/1 $M0/2 - -file_has_linkfile 2 -has_link=$? -if [ $has_link -eq 2 ] -then - get_cached_brick - CACHED=$? - # Kill a brick process - kill -9 `cat $GLUSTERD_WORKDIR/vols/$V0/run/$H0-d-backends-${V0}$CACHED.pid`; -fi - -## trigger a lookup -ls -l $M0/2 2>/dev/null - -## fail dd if file exists. - -dd if=/dev/zero of=$M0/2 bs=1 count=1 conv=excl 2>/dev/null -EXPECT "1" echo $? - -cleanup; diff --git a/tests/bugs/bug-895235.t b/tests/bugs/bug-895235.t deleted file mode 100644 index d02120c2796..00000000000 --- a/tests/bugs/bug-895235.t +++ /dev/null @@ -1,23 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc - -cleanup; -TEST glusterd -TEST pidof glusterd - -TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1} -TEST $CLI volume set $V0 ensure-durability off -TEST $CLI volume set $V0 performance.write-behind off -TEST $CLI volume set $V0 cluster.eager-lock off -TEST $CLI volume start $V0 -TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id=$V0 $M0 --direct-io-mode=enable - -TEST gluster volume profile $V0 start -TEST dd of=$M0/a if=/dev/zero bs=1024k count=1 oflag=append -finodelk_max_latency=$($CLI volume profile $V0 info | grep FINODELK | awk 'BEGIN {max = 0} {if ($6 > max) max=$6;} END {print max}' | cut -d. -f 1 | egrep "[0-9]{7,}") - -TEST [ -z $finodelk_max_latency ] - -cleanup; diff --git a/tests/bugs/bug-896431.t b/tests/bugs/bug-896431.t deleted file mode 100755 index f968e59c1b3..00000000000 --- a/tests/bugs/bug-896431.t +++ /dev/null @@ -1,124 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc - -cleanup; - -## Start and create a volume -TEST glusterd; -TEST pidof glusterd; -TEST $CLI volume create $V0 replica 2 stripe 2 $H0:$B0/${V0}{1,2,3,4,5,6,7,8}; - -## Verify volume is created -EXPECT "$V0" volinfo_field $V0 'Volume Name'; -EXPECT 'Created' volinfo_field $V0 'Status'; - -## Start volume and verify -TEST $CLI volume start $V0; -EXPECT 'Started' volinfo_field $V0 'Status'; - -## Setting cluster.subvols-per-directory as -5 -TEST ! $CLI volume set $V0 cluster.subvols-per-directory -5 -EXPECT '' volinfo_field $V0 'cluster.subvols-per-directory'; -TEST ! $CLI volume set $V0 subvols-per-directory -5 -EXPECT '' volinfo_field $V0 'cluster.subvols-per-directory'; - -## Setting cluster.subvols-per-directory as 0 -TEST ! $CLI volume set $V0 cluster.subvols-per-directory 0 -EXPECT '' volinfo_field $V0 'cluster.subvols-per-directory'; -TEST ! $CLI volume set $V0 subvols-per-directory 0 -EXPECT '' volinfo_field $V0 'cluster.subvols-per-directory'; - -## Setting cluster.subvols-per-directory as 4 (the total number of bricks) -TEST ! $CLI volume set $V0 cluster.subvols-per-directory 4 -EXPECT '' volinfo_field $V0 'cluster.subvols-per-directory'; -TEST ! $CLI volume set $V0 subvols-per-directory 4 -EXPECT '' volinfo_field $V0 'cluster.subvols-per-directory'; - -## Setting cluster.subvols-per-directory as 2 (the total number of subvolumes) -TEST $CLI volume set $V0 cluster.subvols-per-directory 2 -EXPECT '2' volinfo_field $V0 'cluster.subvols-per-directory'; - -## Setting cluster.subvols-per-directory as 1 -TEST $CLI volume set $V0 subvols-per-directory 1 -EXPECT '1' volinfo_field $V0 'cluster.subvols-per-directory'; - -## Finish up -TEST $CLI volume stop $V0; -EXPECT 'Stopped' volinfo_field $V0 'Status'; - -TEST $CLI volume delete $V0; -TEST ! $CLI volume info $V0; - -cleanup; - -## Start and create a pure replicate volume -TEST glusterd; -TEST pidof glusterd; -TEST $CLI volume create $V0 replica 8 $H0:$B0/${V0}{1,2,3,4,5,6,7,8}; - -## Verify volume is created -EXPECT "$V0" volinfo_field $V0 'Volume Name'; -EXPECT 'Created' volinfo_field $V0 'Status'; -EXPECT 'Replicate' volinfo_field $V0 'Type'; - -## Start volume and verify -TEST $CLI volume start $V0; -EXPECT 'Started' volinfo_field $V0 'Status'; - -## Setting cluster.subvols-per-directory as 8 for a replicate volume -TEST ! $CLI volume set $V0 cluster.subvols-per-directory 8 -EXPECT '' volinfo_field $V0 'cluster.subvols-per-directory'; -TEST ! $CLI volume set $V0 subvols-per-directory 8 -EXPECT '' volinfo_field $V0 'cluster.subvols-per-directory'; - -## Setting cluster.subvols-per-directory as 1 for a replicate volume -TEST $CLI volume set $V0 cluster.subvols-per-directory 1 -EXPECT '1' volinfo_field $V0 'cluster.subvols-per-directory'; -TEST $CLI volume set $V0 subvols-per-directory 1 -EXPECT '1' volinfo_field $V0 'cluster.subvols-per-directory'; - -## Finish up -TEST $CLI volume stop $V0; -EXPECT 'Stopped' volinfo_field $V0 'Status'; - -TEST $CLI volume delete $V0; -TEST ! $CLI volume info $V0; - -cleanup; - -## Start and create a pure stripe volume -TEST glusterd; -TEST pidof glusterd; -TEST $CLI volume create $V0 stripe 8 $H0:$B0/${V0}{1,2,3,4,5,6,7,8}; - -## Verify volume is created -EXPECT "$V0" volinfo_field $V0 'Volume Name'; -EXPECT 'Created' volinfo_field $V0 'Status'; -EXPECT 'Stripe' volinfo_field $V0 'Type'; - -## Start volume and verify -TEST $CLI volume start $V0; -EXPECT 'Started' volinfo_field $V0 'Status'; - -## Setting cluster.subvols-per-directory as 8 for a stripe volume -TEST ! $CLI volume set $V0 cluster.subvols-per-directory 8 -EXPECT '' volinfo_field $V0 'cluster.subvols-per-directory'; -TEST ! $CLI volume set $V0 subvols-per-directory 8 -EXPECT '' volinfo_field $V0 'cluster.subvols-per-directory'; - -## Setting cluster.subvols-per-directory as 1 for a stripe volume -TEST $CLI volume set $V0 cluster.subvols-per-directory 1 -EXPECT '1' volinfo_field $V0 'cluster.subvols-per-directory'; -TEST $CLI volume set $V0 subvols-per-directory 1 -EXPECT '1' volinfo_field $V0 'cluster.subvols-per-directory'; - -## Finish up -TEST $CLI volume stop $V0; -EXPECT 'Stopped' volinfo_field $V0 'Status'; - -TEST $CLI volume delete $V0; -TEST ! $CLI volume info $V0; - -cleanup; diff --git a/tests/bugs/bug-902610.t b/tests/bugs/bug-902610.t deleted file mode 100755 index 8b1e91fb9cb..00000000000 --- a/tests/bugs/bug-902610.t +++ /dev/null @@ -1,65 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc - -cleanup; - -## Layout-spread set to 3, but subvols up are 2. So layout should split 50-50 -function get_layout() -{ - layout1=`getfattr -n trusted.glusterfs.dht -e hex $1 2>&1|grep dht |cut -d = -f2` - layout1_s=$(echo $layout1 | cut -c 19-26) - layout1_e=$(echo $layout1 | cut -c 27-34) - #echo "layout1 from $layout1_s to $layout1_e" > /dev/tty - layout2=`getfattr -n trusted.glusterfs.dht -e hex $2 2>&1|grep dht |cut -d = -f2` - layout2_s=$(echo $layout2 | cut -c 19-26) - layout2_e=$(echo $layout2 | cut -c 27-34) - #echo "layout2 from $layout2_s to $layout2_e" > /dev/tty - - if [ x"$layout2_s" = x"00000000" ]; then - # Reverse so we only have the real logic in one place. - tmp_s=$layout1_s - tmp_e=$layout1_e - layout1_s=$layout2_s - layout1_e=$layout2_e - layout2_s=$tmp_s - layout2_e=$tmp_e - fi - - # Figure out where the join point is. - target=$( $PYTHON -c "print '%08x' % (0x$layout1_e + 1)") - #echo "target for layout2 = $target" > /dev/tty - - # The second layout should cover everything that the first doesn't. - if [ x"$layout2_s" = x"$target" -a x"$layout2_e" = x"ffffffff" ]; then - return 0 - fi - - return 1 -} - -BRICK_COUNT=4 - -TEST glusterd -TEST pidof glusterd - -TEST $CLI volume create $V0 $H0:$B0/${V0}0 $H0:$B0/${V0}1 $H0:$B0/${V0}2 $H0:$B0/${V0}3 -## set subvols-per-dir option -TEST $CLI volume set $V0 subvols-per-directory 3 -TEST $CLI volume start $V0 - -## Mount FUSE -TEST glusterfs -s $H0 --volfile-id $V0 $M0 --entry-timeout=0 --attribute-timeout=0; - -TEST ls -l $M0 - -## kill 2 bricks to bring down available subvol < spread count -kill -9 `cat $GLUSTERD_WORKDIR/vols/$V0/run/$H0-d-backends-${V0}2.pid`; -kill -9 `cat $GLUSTERD_WORKDIR/vols/$V0/run/$H0-d-backends-${V0}3.pid`; - -mkdir $M0/dir1 2>/dev/null - -get_layout $B0/${V0}0/dir1 $B0/${V0}1/dir1 -EXPECT "0" echo $? - -cleanup; diff --git a/tests/bugs/bug-903336.t b/tests/bugs/bug-903336.t deleted file mode 100644 index c1f91312ae3..00000000000 --- a/tests/bugs/bug-903336.t +++ /dev/null @@ -1,13 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc - -cleanup; - -TEST glusterd -TEST pidof glusterd -TEST $CLI volume create $V0 $H0:$B0/${V0} -TEST $CLI volume start $V0 -TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0 -TEST setfattr -n trusted.io-stats-dump -v /tmp $M0 -cleanup diff --git a/tests/bugs/bug-904065.t b/tests/bugs/bug-904065.t deleted file mode 100755 index 0b39adb5c68..00000000000 --- a/tests/bugs/bug-904065.t +++ /dev/null @@ -1,91 +0,0 @@ -#!/bin/bash -# -# This test does not use 'showmount' from the nfs-utils package, it would -# require setting up a portmapper (either rpcbind or portmap, depending on the -# Linux distribution used for testing). The persistancy of the rmtab should not -# affect the current showmount outputs, so existing regression tests should be -# sufficient. -# - -# count the lines of a file, return 0 if the file does not exist -function count_lines() -{ - if [ -e "$1" ] - then - wc -l < $1 - else - echo 0 - fi -} - - -. $(dirname $0)/../include.rc -. $(dirname $0)/../nfs.rc -. $(dirname $0)/../volume.rc - -cleanup - -TEST glusterd -TEST pidof glusterd - -TEST $CLI volume create $V0 $H0:$B0/brick1 -EXPECT 'Created' volinfo_field $V0 'Status' - -TEST $CLI volume start $V0; -EXPECT 'Started' volinfo_field $V0 'Status' - -# glusterfs/nfs needs some time to start up in the background -EXPECT_WITHIN $NFS_EXPORT_TIMEOUT 1 is_nfs_export_available - -# before mounting the rmtab should be empty -EXPECT '0' count_lines $GLUSTERD_WORKDIR/nfs/rmtab - -TEST mount_nfs $H0:/$V0 $N0 nolock -# the output would looks similar to: -# -# hostname-0=172.31.122.104 -# mountpoint-0=/ufo -# -EXPECT '2' count_lines $GLUSTERD_WORKDIR/nfs/rmtab - -# duplicate mounts should not be recorded (client could have crashed) -TEST mount_nfs $H0:/$V0 $N1 nolock -EXPECT '2' count_lines $GLUSTERD_WORKDIR/nfs/rmtab - -# removing a mount should (even if there are two) should remove the entry -EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $N1 -EXPECT '0' count_lines $GLUSTERD_WORKDIR/nfs/rmtab - -# unmounting the other mount should work flawlessly -EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $N0 -EXPECT '0' count_lines $GLUSTERD_WORKDIR/nfs/rmtab - -TEST glusterfs --entry-timeout=0 --attribute-timeout=0 --volfile-server=$H0 --volfile-id=$V0 $M0 - -# we'll create a fake rmtab here, similar to how an other storage server would do -# using an invalid IP address to prevent (unlikely) collisions on the test-machine -cat << EOF > $M0/rmtab -hostname-0=127.0.0.256 -mountpoint-0=/ufo -EOF -EXPECT '2' count_lines $M0/rmtab - -# reconfigure merges the rmtab with the one on the volume -TEST gluster volume set $V0 nfs.mount-rmtab $M0/rmtab - -# glusterfs/nfs needs some time to restart -EXPECT_WITHIN $NFS_EXPORT_TIMEOUT 1 is_nfs_export_available - -# a new mount should be added to the rmtab, not overwrite exiting ones -TEST mount_nfs $H0:/$V0 $N0 nolock -EXPECT '4' count_lines $M0/rmtab - -EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $N0 -EXPECT '2' count_lines $M0/rmtab - -# TODO: nfs/reconfigure() is never called and is therefor disabled. When the -# NFS-server supports reloading and does not get restarted anymore, we should -# add a test that includes the merging of entries in the old rmtab with the new -# rmtab. - -cleanup diff --git a/tests/bugs/bug-904300.t b/tests/bugs/bug-904300.t deleted file mode 100755 index 648838bbf7a..00000000000 --- a/tests/bugs/bug-904300.t +++ /dev/null @@ -1,62 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../nfs.rc -. $(dirname $0)/../volume.rc - -cleanup; - -# 1-8 -TEST glusterd -TEST pidof glusterd - -TEST $CLI volume create $V0 $H0:$B0/$V0; -TEST $CLI volume start $V0 -EXPECT_WITHIN $NFS_EXPORT_TIMEOUT 1 is_nfs_export_available - -TEST mount_nfs $H0:/$V0 $N0 nolock -TEST mkdir $N0/dir1 -EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $N0 - -# -# Case 1: Allow "dir1" to be mounted only from 127.0.0.1 -# 9-12 -TEST $CLI volume set $V0 export-dir \""/dir1(127.0.0.1)"\" -EXPECT_WITHIN $NFS_EXPORT_TIMEOUT 2 is_nfs_export_available - -TEST mount_nfs localhost:/$V0/dir1 $N0 nolock -EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $N0 - -# -# Case 2: Allow "dir1" to be mounted only from 8.8.8.8. This is -# a negative test case therefore the mount should fail. -# 13-16 -TEST $CLI volume set $V0 export-dir \""/dir1(8.8.8.8)"\" -EXPECT_WITHIN $NFS_EXPORT_TIMEOUT 2 is_nfs_export_available - -TEST ! mount_nfs $H0:/$V0/dir1 $N0 nolock -TEST ! umount $N0 - - -# Case 3: Variation of test case1. Here we are checking with hostname -# instead of ip address. -# 17-20 -TEST $CLI volume set $V0 export-dir \""/dir1($H0)"\" -EXPECT_WITHIN $NFS_EXPORT_TIMEOUT 2 is_nfs_export_available - -TEST mount_nfs $H0:/$V0/dir1 $N0 nolock -EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $N0 - -# Case 4: Variation of test case1. Here we are checking with IP range -# 21-24 -TEST $CLI volume set $V0 export-dir \""/dir1(127.0.0.0/24)"\" -EXPECT_WITHIN $NFS_EXPORT_TIMEOUT 2 is_nfs_export_available - -TEST mount_nfs localhost:/$V0/dir1 $N0 nolock -EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $N0 - -## Finish up -TEST $CLI volume stop $V0; -TEST $CLI volume delete $V0; - -cleanup; diff --git a/tests/bugs/bug-905307.t b/tests/bugs/bug-905307.t deleted file mode 100644 index d81d81c9fa3..00000000000 --- a/tests/bugs/bug-905307.t +++ /dev/null @@ -1,36 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc - -cleanup; -TEST glusterd -TEST pidof glusterd - -#test functionality of post-op-delay-secs -TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1} - -#Strings should not be accepted. -TEST ! $CLI volume set $V0 cluster.post-op-delay-secs abc - -#-ve ints should not be accepted. -TEST ! $CLI volume set $V0 cluster.post-op-delay-secs -1 - -#INT_MAX+1 should not be accepted. -TEST ! $CLI volume set $V0 cluster.post-op-delay-secs 2147483648 - -#floats should not be accepted. -TEST ! $CLI volume set $V0 cluster.post-op-delay-secs 1.25 - -#min val 0 should be accepted -TEST $CLI volume set $V0 cluster.post-op-delay-secs 0 -EXPECT "0" volume_option $V0 cluster.post-op-delay-secs - -#max val 2147483647 should be accepted -TEST $CLI volume set $V0 cluster.post-op-delay-secs 2147483647 -EXPECT "2147483647" volume_option $V0 cluster.post-op-delay-secs - -#some middle val in range 2147 should be accepted -TEST $CLI volume set $V0 cluster.post-op-delay-secs 2147 -EXPECT "2147" volume_option $V0 cluster.post-op-delay-secs -cleanup; diff --git a/tests/bugs/bug-905864.c b/tests/bugs/bug-905864.c deleted file mode 100644 index ed09b6e2bc4..00000000000 --- a/tests/bugs/bug-905864.c +++ /dev/null @@ -1,82 +0,0 @@ -#include -#include -#include -#include -#include - - -pthread_t th[5] = {0}; -void -flock_init (struct flock *f, short int type, off_t start, off_t len) -{ - f->l_type = type; - f->l_start = start; - f->l_len = len; -} - -int -flock_range_in_steps (int fd, int is_set, short l_type, - int start, int end, int step) -{ - int ret = 0; - int i = 0; - struct flock f = {0,}; - - for (i = start; i+step < end; i += step) { - flock_init (&f, l_type, i, step); - ret = fcntl (fd, (is_set)? F_SETLKW:F_GETLK, &f); - if (ret) { - perror ("fcntl"); - goto out; - } - } -out: - return ret; -} - -void * -random_locker (void *arg) -{ - int fd = *(int *)arg; - int i = 0; - int is_set = 0; - - /* use thread id to choose GETLK or SETLK operation*/ - is_set = pthread_self () % 2; - (void)flock_range_in_steps (fd, is_set, F_WRLCK, 0, 400, 1); - - return NULL; -} - - -int main (int argc, char **argv) -{ - int fd = -1; - int ret = 1; - int i = 0; - char *fname = NULL; - - if (argc < 2) - goto out; - - fname = argv[1]; - fd = open (fname, O_RDWR); - if (fd == -1) { - perror ("open"); - goto out; - } - - ret = flock_range_in_steps (fd, 1, F_WRLCK, 0, 2000, 2); - for (i = 0; i < 5; i++) { - pthread_create (&th[i], NULL, random_locker, (void *) &fd); - } - ret = flock_range_in_steps (fd, 1, F_WRLCK, 0, 2000, 2); - for (i = 0; i < 5; i++) { - pthread_join (th[i], NULL); - } -out: - if (fd != -1) - close (fd); - - return ret; -} diff --git a/tests/bugs/bug-905864.t b/tests/bugs/bug-905864.t deleted file mode 100644 index 39598d6e92b..00000000000 --- a/tests/bugs/bug-905864.t +++ /dev/null @@ -1,32 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc - -cleanup; - -## Start and create a volume -TEST glusterd; -TEST pidof glusterd; -TEST $CLI volume info; - -TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2,3,4}; -TEST $CLI volume start $V0; - -TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id $V0 $M0; -TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id $V0 $M1; - -TEST touch $M0/file1; - -#following C program tries open up race(s) if any, in F_GETLK/F_SETLKW codepaths -#of locks xlator -TEST $CC -pthread -g3 $(dirname $0)/bug-905864.c -o $(dirname $0)/bug-905864 - -$(dirname $0)/bug-905864 $M0/file1 & -$(dirname $0)/bug-905864 $M1/file1; -wait - -TEST rm -f $(dirname $0)/bug-905864 -EXPECT $(brick_count $V0) online_brick_count - -cleanup diff --git a/tests/bugs/bug-906646.t b/tests/bugs/bug-906646.t deleted file mode 100644 index b11bb08bdd5..00000000000 --- a/tests/bugs/bug-906646.t +++ /dev/null @@ -1,93 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc - -cleanup; - -REPLICA=2 - -TEST glusterd -TEST pidof glusterd - -TEST $CLI volume create $V0 replica $REPLICA $H0:$B0/${V0}-00 $H0:$B0/${V0}-01 $H0:$B0/${V0}-10 $H0:$B0/${V0}-11 -TEST $CLI volume start $V0 - -TEST $CLI volume set $V0 cluster.self-heal-daemon off -TEST $CLI volume set $V0 cluster.background-self-heal-count 0 - -## Mount FUSE with caching disabled -TEST $GFS -s $H0 --volfile-id $V0 $M0; - -function xattr_query_check() -{ - local path=$1 - local xa_name=$2 - - local ret=$(getfattr -n $xa_name $path 2>&1 | grep -o "$xa_name: No such attribute" | wc -l) - echo $ret -} - -function set_xattr() -{ - local path=$1 - local xa_name=$2 - local xa_val=$3 - - setfattr -n $xa_name -v $xa_val $path - echo $? -} - -function remove_xattr() -{ - local path=$1 - local xa_name=$2 - - setfattr -x $xa_name $path - echo $? -} - -f=f00f -pth=$M0/$f - -TEST touch $pth - -# fetch backend paths -backend_paths=`get_backend_paths $pth` - -# convert it into and array -backend_paths_array=($backend_paths) - -# setxattr xattr for this file -EXPECT 0 set_xattr $pth "trusted.name" "test" - -# confirm the set on backend -EXPECT 0 xattr_query_check ${backend_paths_array[0]} "trusted.name" -EXPECT 0 xattr_query_check ${backend_paths_array[1]} "trusted.name" - -brick_path=`echo ${backend_paths_array[0]} | sed -n 's/\(.*\)\/'$f'/\1/p'` -brick_id=`$CLI volume info $V0 | grep "Brick[[:digit:]]" | grep -n $brick_path | cut -f1 -d:` - -# Kill a brick process -TEST kill_brick $V0 $H0 $brick_path - -# remove the xattr from the mount point -EXPECT 0 remove_xattr $pth "trusted.name" - -# we killed ${backend_paths[0]} - so expect the xattr to be there -# on the backend there -EXPECT 0 xattr_query_check ${backend_paths_array[0]} "trusted.name" -EXPECT 1 xattr_query_check ${backend_paths_array[1]} "trusted.name" - -# restart the brick process -TEST $CLI volume start $V0 force - -EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 `expr $brick_id - 1` - -cat $pth >/dev/null - -# check backends - xattr should not be present anywhere -EXPECT 1 xattr_query_check ${backend_paths_array[0]} "trusted.name" -EXPECT 1 xattr_query_check ${backend_paths_array[1]} "trusted.name" - -cleanup; diff --git a/tests/bugs/bug-907072.t b/tests/bugs/bug-907072.t deleted file mode 100755 index a04f4c2498f..00000000000 --- a/tests/bugs/bug-907072.t +++ /dev/null @@ -1,47 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../fileio.rc -. $(dirname $0)/../dht.rc -. $(dirname $0)/../volume.rc - -cleanup; - -TEST glusterd; -TEST pidof glusterd; - -TEST $CLI volume create $V0 $H0:$B0/${V0}{0,1,2,3}; -TEST $CLI volume start $V0; - -TEST glusterfs -s $H0 --volfile-id $V0 $M0; - -TEST mkdir $M0/test; - -OLD_LAYOUT0=`get_layout $B0/${V0}0/test`; -OLD_LAYOUT1=`get_layout $B0/${V0}1/test`; -OLD_LAYOUT2=`get_layout $B0/${V0}2/test`; -OLD_LAYOUT3=`get_layout $B0/${V0}3/test`; - -TEST killall glusterfsd; - -# Delete directory on one brick -TEST rm -rf $B0/${V}1/test; - -# And only layout xattr on another brick -TEST setfattr -x trusted.glusterfs.dht $B0/${V0}2/test; - -TEST $CLI volume start $V0 force; - -EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0 -TEST glusterfs -s $H0 --volfile-id $V0 $M0; -TEST stat $M0/test; - -NEW_LAYOUT0=`get_layout $B0/${V0}0/test`; -NEW_LAYOUT1=`get_layout $B0/${V0}1/test`; -NEW_LAYOUT2=`get_layout $B0/${V0}2/test`; -NEW_LAYOUT3=`get_layout $B0/${V0}3/test`; - -EXPECT $OLD_LAYOUT0 echo $NEW_LAYOUT0; -EXPECT $OLD_LAYOUT1 echo $NEW_LAYOUT1; -EXPECT $OLD_LAYOUT2 echo $NEW_LAYOUT2; -EXPECT $OLD_LAYOUT3 echo $NEW_LAYOUT3; diff --git a/tests/bugs/bug-908146.t b/tests/bugs/bug-908146.t deleted file mode 100755 index 87b456e6e22..00000000000 --- a/tests/bugs/bug-908146.t +++ /dev/null @@ -1,39 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc - -function get_fd_count { - local vol=$1 - local host=$2 - local brick=$3 - local fname=$4 - local gfid_str=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $brick/$fname)) - local statedump=$(generate_brick_statedump $vol $host $brick) - local count=$(grep "gfid=$gfid_str" $statedump -A2 | grep fd-count | cut -f2 -d'=' | tail -1) - rm -f $statedump - echo $count -} -cleanup; - -TEST glusterd -TEST pidof glusterd -TEST $CLI volume create $V0 $H0:$B0/${V0}0 -TEST $CLI volume set $V0 performance.open-behind off -TEST $CLI volume start $V0 -TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0 --direct-io-mode=enable -TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M1 --attribute-timeout=0 --entry-timeout=0 --direct-io-mode=enable - -TEST touch $M0/a - -exec 4>"$M0/a" -exec 5>"$M1/a" -EXPECT "2" get_fd_count $V0 $H0 $B0/${V0}0 a - -exec 4>&- -EXPECT "1" get_fd_count $V0 $H0 $B0/${V0}0 a - -exec 5>&- -EXPECT "0" get_fd_count $V0 $H0 $B0/${V0}0 a - -cleanup diff --git a/tests/bugs/bug-912297.t b/tests/bugs/bug-912297.t deleted file mode 100755 index f5a5babf5f3..00000000000 --- a/tests/bugs/bug-912297.t +++ /dev/null @@ -1,44 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc - -cleanup; - -## Start and create a volume -TEST glusterd; -TEST pidof glusterd; -TEST $CLI volume create $V0 replica 2 stripe 2 $H0:$B0/${V0}{1,2,3,4,5,6,7,8}; - -## Verify volume is is created -EXPECT "$V0" volinfo_field $V0 'Volume Name'; -EXPECT 'Created' volinfo_field $V0 'Status'; - -## Start volume and verify -TEST $CLI volume start $V0; -EXPECT 'Started' volinfo_field $V0 'Status'; - -## Setting owner-uid as -12 -TEST ! $CLI volume set $V0 owner-uid -12 -EXPECT '' volinfo_field $V0 'storage.owner-uid' - -## Setting owner-gid as -5 -TEST ! $CLI volume set $V0 owner-gid -5 -EXPECT '' volinfo_field $V0 'storage.owner-gid' - -## Setting owner-uid as 36 -TEST $CLI volume set $V0 owner-uid 36 -EXPECT '36' volinfo_field $V0 'storage.owner-uid' - -## Setting owner-gid as 36 -TEST $CLI volume set $V0 owner-gid 36 -EXPECT '36' volinfo_field $V0 'storage.owner-gid' - -## Finish up -TEST $CLI volume stop $V0; -EXPECT 'Stopped' volinfo_field $V0 'Status'; - -TEST $CLI volume delete $V0; -TEST ! $CLI volume info $V0; - -cleanup; diff --git a/tests/bugs/bug-912564.t b/tests/bugs/bug-912564.t deleted file mode 100755 index 4fc548c695c..00000000000 --- a/tests/bugs/bug-912564.t +++ /dev/null @@ -1,92 +0,0 @@ -#!/bin/bash - -# Test that the rsync and "extra" regexes cause rename-in-place without -# creating linkfiles, when they're supposed to. Without the regex we'd have a -# 1/4 chance of each file being assigned to the right place, so with 16 files -# we have a 1/2^32 chance of getting the correct result by accident. - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc - -function count_linkfiles { - local i - local count=0 - for i in $(seq $2 $3); do - x=$(find $1$i -perm -1000 | wc -l) - # Divide by two because of the .glusterfs links. - count=$((count+x/2)) - done - echo $count -} - -# This function only exists to get around quoting difficulties in TEST. -function set_regex { - $CLI volume set $1 cluster.extra-hash-regex '^foo(.+)bar$' -} - -cleanup; - -TEST glusterd -TEST pidof glusterd - -mkdir -p $H0:$B0/${V0}0 -mkdir -p $H0:$B0/${V0}1 -mkdir -p $H0:$B0/${V0}2 -mkdir -p $H0:$B0/${V0}3 - -# Create and start a volume. -TEST $CLI volume create $V0 $H0:$B0/${V0}0 $H0:$B0/${V0}1 \ - $H0:$B0/${V0}2 $H0:$B0/${V0}3 -TEST $CLI volume start $V0 -EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'Started' volinfo_field $V0 'Status'; - -# Mount it. -TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 $M0 - -# Make sure the rsync regex works, by verifying that no linkfiles are -# created. -rm -f $M0/file* -for i in $(seq 0 15); do - fn=$(printf file%x $i) - tmp_fn=$(printf .%s.%d $fn $RANDOM) - echo testing > $M0/$tmp_fn - mv $M0/$tmp_fn $M0/$fn -done -lf=$(count_linkfiles $B0/$V0 0 3) -TEST [ "$lf" -eq "0" ] - -# Make sure that linkfiles *are* created for normal files. -rm -f $M0/file* -for i in $(seq 0 15); do - fn=$(printf file%x $i) - tmp_fn=$(printf foo%sbar $fn) - echo testing > $M0/$tmp_fn - mv $M0/$tmp_fn $M0/$fn -done -lf=$(count_linkfiles $B0/$V0 0 3) -TEST [ "$lf" -ne "0" ] - -# Make sure that setting an extra regex suppresses the linkfiles. -TEST set_regex $V0 -rm -f $M0/file* -for i in $(seq 0 15); do - fn=$(printf file%x $i) - tmp_fn=$(printf foo%sbar $fn) - echo testing > $M0/$tmp_fn - mv $M0/$tmp_fn $M0/$fn -done -lf=$(count_linkfiles $B0/$V0 0 3) -TEST [ "$lf" -eq "0" ] - -# Re-test the rsync regex, to make sure the extra one didn't break it. -rm -f $M0/file* -for i in $(seq 0 15); do - fn=$(printf file%x $i) - tmp_fn=$(printf .%s.%d $fn $RANDOM) - echo testing > $M0/$tmp_fn - mv $M0/$tmp_fn $M0/$fn -done -lf=$(count_linkfiles $B0/$V0 0 3) -TEST [ "$lf" -eq "0" ] - -cleanup diff --git a/tests/bugs/bug-913051.t b/tests/bugs/bug-913051.t deleted file mode 100644 index 69845f7a554..00000000000 --- a/tests/bugs/bug-913051.t +++ /dev/null @@ -1,67 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc -. $(dirname $0)/../fileio.rc - -cleanup; - -#Test that afr opens the file on the bricks that were offline at the time of -# open after the brick comes online. This tests for writev, readv triggering -# open-fd-fix in afr. -TEST glusterd -TEST pidof glusterd -TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}0 $H0:$B0/${V0}1 -TEST $CLI volume set $V0 cluster.self-heal-daemon off -TEST $CLI volume set $V0 performance.quick-read off -TEST $CLI volume set $V0 performance.open-behind off -TEST $CLI volume set $V0 performance.io-cache off -TEST $CLI volume set $V0 performance.write-behind off -TEST $CLI volume set $V0 performance.stat-prefetch off -TEST $CLI volume set $V0 performance.read-ahead off -TEST $CLI volume set $V0 cluster.background-self-heal-count 0 -TEST $CLI volume start $V0 -TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id=$V0 $M0 --direct-io-mode=enable -TEST kill_brick $V0 $H0 $B0/${V0}0 - -TEST mkdir $M0/dir -TEST touch $M0/dir/a -TEST touch $M0/dir/b -echo abc > $M0/dir/b - -TEST wfd=`fd_available` -TEST fd_open $wfd "w" $M0/dir/a -TEST rfd=`fd_available` -TEST fd_open $rfd "r" $M0/dir/b - -TEST $CLI volume start $V0 force -EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0 - -#check that the files are not opned on brick-0 -TEST stat $M0/dir/a -realpatha=$(gf_get_gfid_backend_file_path $B0/${V0}0 "dir/a") -EXPECT "N" gf_check_file_opened_in_brick $V0 $H0 $B0/${V0}0 "$realpatha" -EXPECT "N" gf_check_file_opened_in_brick $V0 $H0 $B0/${V0}0 $B0/${V0}0/dir/a - -TEST stat $M0/dir/b -realpathb=$(gf_get_gfid_backend_file_path $B0/${V0}0 "dir/b") -EXPECT "N" gf_check_file_opened_in_brick $V0 $H0 $B0/${V0}0 "$realpathb" -EXPECT "N" gf_check_file_opened_in_brick $V0 $H0 $B0/${V0}0 $B0/${V0}0/dir/b - -#attempt self-heal so that the files are created on brick-0 - -TEST dd if=$M0/dir/a of=/dev/null bs=1024k -TEST dd if=$M0/dir/b of=/dev/null bs=1024k - -#trigger writev for attempting open-fd-fix in afr -TEST fd_write $wfd "open sesame" - -#trigger readv for attempting open-fd-fix in afr -TEST fd_cat $rfd - -EXPECT_WITHIN $REOPEN_TIMEOUT "Y" gf_check_file_opened_in_brick $V0 $H0 $B0/${V0}0 "$realpatha" -EXPECT_WITHIN $REOPEN_TIMEOUT "Y" gf_check_file_opened_in_brick $V0 $H0 $B0/${V0}0 "$realpathb" - -TEST fd_close $wfd -TEST fd_close $rfd -cleanup; diff --git a/tests/bugs/bug-913487.t b/tests/bugs/bug-913487.t deleted file mode 100644 index 2095903d9d2..00000000000 --- a/tests/bugs/bug-913487.t +++ /dev/null @@ -1,14 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc - -cleanup; - -TEST glusterd; -TEST pidof glusterd; - -TEST ! $CLI volume set $V0 performance.open-behind off; - -TEST pidof glusterd; - -cleanup; diff --git a/tests/bugs/bug-913544.t b/tests/bugs/bug-913544.t deleted file mode 100644 index db28ca814ce..00000000000 --- a/tests/bugs/bug-913544.t +++ /dev/null @@ -1,24 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc - -cleanup; - -#simulate a split-brain of a file and do truncate. This should not crash the mount point -TEST glusterd -TEST pidof glusterd -TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}0 $H0:$B0/${V0}1 -TEST $CLI volume set $V0 stat-prefetch off -TEST $CLI volume start $V0 -TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0 -cd $M0 -TEST touch a -#simulate no-changelog data split-brain -echo "abc" > $B0/${V0}1/a -echo "abcd" > $B0/${V0}0/a -TEST truncate -s 0 a -TEST ls -cd - -cleanup diff --git a/tests/bugs/bug-913555.t b/tests/bugs/bug-913555.t deleted file mode 100755 index 2393a16ad6f..00000000000 --- a/tests/bugs/bug-913555.t +++ /dev/null @@ -1,54 +0,0 @@ -#!/bin/bash - -# Test that a volume becomes unwritable when the cluster loses quorum. - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc -. $(dirname $0)/../cluster.rc - - -function check_fs { - df $1 &> /dev/null - echo $? -} - -function check_peers { - $CLI_1 peer status | grep 'Peer in Cluster (Connected)' | wc -l -} - -function glusterfsd_count { - pidof glusterfsd | wc -w; -} - -cleanup; - -TEST launch_cluster 3; # start 3-node virtual cluster -TEST $CLI_1 peer probe $H2; # peer probe server 2 from server 1 cli -TEST $CLI_1 peer probe $H3; # peer probe server 3 from server 1 cli - -EXPECT_WITHIN $PROBE_TIMEOUT 2 check_peers - -TEST $CLI_1 volume create $V0 $H1:$B1/$V0 $H2:$B2/$V0 $H3:$B3/$V0 -TEST $CLI_1 volume set $V0 cluster.server-quorum-type server -TEST $CLI_1 volume start $V0 -TEST glusterfs --volfile-server=$H1 --volfile-id=$V0 $M0 - -# Kill one pseudo-node, make sure the others survive and volume stays up. -TEST kill_node 3; -EXPECT_WITHIN $PROBE_TIMEOUT 1 check_peers; -EXPECT 0 check_fs $M0; -EXPECT 2 glusterfsd_count; - -# Kill another pseudo-node, make sure the last one dies and volume goes down. -TEST kill_node 2; -EXPECT_WITHIN $PROBE_TIMEOUT 0 check_peers -EXPECT 1 check_fs $M0; -EXPECT 0 glusterfsd_count; # the two glusterfsds of the other two glusterds - # must be dead - -TEST $glusterd_2; -TEST $glusterd_3; -EXPECT_WITHIN $PROCESS_UP_TIMEOUT 3 glusterfsd_count; # restore quorum, all ok -EXPECT_WITHIN $PROCESS_UP_TIMEOUT 0 check_fs $M0; - -cleanup diff --git a/tests/bugs/bug-915280.t b/tests/bugs/bug-915280.t deleted file mode 100755 index 1a8b78f8cf3..00000000000 --- a/tests/bugs/bug-915280.t +++ /dev/null @@ -1,51 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc -. $(dirname $0)/../nfs.rc - -cleanup; - -TEST glusterd -TEST pidof glusterd - -function volinfo_field() -{ - local vol=$1; - local field=$2; - - $CLI volume info $vol | grep "^$field: " | sed 's/.*: //'; -} - -TEST $CLI volume create $V0 $H0:$B0/brick1 $H0:$B0/brick2; -EXPECT 'Created' volinfo_field $V0 'Status'; - -TEST $CLI volume start $V0; -EXPECT 'Started' volinfo_field $V0 'Status'; - -MOUNTDIR=$N0; -EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available; -TEST mount_nfs $H0:/$V0 $N0 nolock,timeo=30,retrans=1 -TEST touch $N0/testfile - -TEST $CLI volume set $V0 debug.error-gen client -TEST $CLI volume set $V0 debug.error-fops stat -TEST $CLI volume set $V0 debug.error-failure 100 - -EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available; - -pid_file=$(read_nfs_pidfile); - -getfacl $N0/testfile 2>/dev/null - -nfs_pid=$(get_nfs_pid); -if [ ! $nfs_pid ] -then - nfs_pid=0; -fi - -TEST [ $nfs_pid -eq $pid_file ] - -EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $MOUNTDIR - -cleanup; diff --git a/tests/bugs/bug-915554.t b/tests/bugs/bug-915554.t deleted file mode 100755 index b0c653ec5f7..00000000000 --- a/tests/bugs/bug-915554.t +++ /dev/null @@ -1,76 +0,0 @@ -#!/bin/bash -# -# Bug <915554> -# -# This test checks for a condition where a rebalance migrates a file and does -# not preserve the original file size. This can occur due to hole preservation -# logic in the file migration code. If a file size is aligned to a disk sector -# boundary (512b) and the tail portion of the file is zero-filled, the file -# may end up truncated to the end of the last data region in the file. -# -### - -. $(dirname $0)/../include.rc -. $(dirname $0)/../dht.rc -. $(dirname $0)/../volume.rc - -cleanup; - -TEST glusterd -TEST pidof glusterd - -BRICK_COUNT=3 -# create, start and mount a two brick DHT volume -TEST $CLI volume create $V0 $H0:$B0/${V0}0 $H0:$B0/${V0}1 $H0:$B0/${V0}2 -TEST $CLI volume start $V0 - -TEST glusterfs --attribute-timeout=0 --entry-timeout=0 --gid-timeout=-1 -s $H0 --volfile-id $V0 $M0; - -i=1 -# Write some data to a file and extend such that the file is sparse to a sector -# aligned boundary. -echo test > $M0/$i -TEST truncate -s 1M $M0/$i - -# cache the original size -SIZE1=`stat -c %s $M0/$i` - -# rename till file gets a linkfile - -while [ $i -ne 0 ] -do - test=`mv $M0/$i $M0/$(( $i+1 )) 2>/dev/null` - if [ $? -ne 0 ] - then - echo "rename failed" - break - fi - let i++ - file_has_linkfile $i - has_link=$? - if [ $has_link -eq 2 ] - then - break; - fi -done - -# start a rebalance (force option to overide checks) to trigger migration of -# file - -TEST $CLI volume rebalance $V0 start force - -# check if rebalance has completed for upto 15 secs - -EXPECT_WITHIN $REBALANCE_TIMEOUT "0" rebalance_completed - -# validate the file size after the migration -SIZE2=`stat -c %s $M0/$i` - -TEST [ $SIZE1 -eq $SIZE2 ] - -TEST rm -f $M0/$i -EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0 -TEST $CLI volume stop $V0 -TEST $CLI volume delete $V0 - -cleanup; diff --git a/tests/bugs/bug-916226.t b/tests/bugs/bug-916226.t deleted file mode 100644 index 50d1e312012..00000000000 --- a/tests/bugs/bug-916226.t +++ /dev/null @@ -1,26 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc - -cleanup; - -TEST glusterd -TEST pidof glusterd - -TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}0 $H0:$B0/${V0}1 $H0:$B0/${V0}2 $H0:$B0/${V0}3 -TEST $CLI volume set $V0 cluster.eager-lock on -TEST $CLI volume start $V0 - -## Mount FUSE -TEST glusterfs -s $H0 --volfile-id $V0 $M0; - -TEST mkdir $M0/dir{1..10}; -TEST touch $M0/dir{1..10}/files{1..10}; - -TEST $CLI volume add-brick $V0 $H0:$B0/${V0}4 $H0:/$B0/${V0}5 - -TEST $CLI volume rebalance $V0 start force -EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" rebalance_status_field $V0 - -cleanup; diff --git a/tests/bugs/bug-916549.t b/tests/bugs/bug-916549.t deleted file mode 100755 index 344c6abaaf1..00000000000 --- a/tests/bugs/bug-916549.t +++ /dev/null @@ -1,19 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc - -cleanup; - -TEST glusterd; -TEST $CLI volume create $V0 $H0:$B0/${V0}1; -TEST $CLI volume start $V0; - -pid_file=$(ls $GLUSTERD_WORKDIR/vols/$V0/run); -brick_pid=$(cat $GLUSTERD_WORKDIR/vols/$V0/run/$pid_file); - - -kill -SIGKILL $brick_pid; -TEST $CLI volume start $V0 force; -TEST process_leak_count $(pidof glusterd); - -cleanup; diff --git a/tests/bugs/bug-918437-sh-mtime.t b/tests/bugs/bug-918437-sh-mtime.t deleted file mode 100644 index 9d3ebd57508..00000000000 --- a/tests/bugs/bug-918437-sh-mtime.t +++ /dev/null @@ -1,71 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc - -function get_mtime { - local f=$1 - stat $f | grep Modify | awk '{print $2 $3}' | cut -f1 -d'.' -} - -function file_exists { - if [ -f $1 ]; then echo "Y"; else echo "N"; fi -} -cleanup; - -## Tests if mtime is correct after self-heal. -TEST glusterd -TEST pidof glusterd -TEST mkdir -p $B0/gfs0/brick0{1,2} -TEST $CLI volume create $V0 replica 2 transport tcp $H0:$B0/gfs0/brick01 $H0:$B0/gfs0/brick02 -TEST $CLI volume set $V0 nfs.disable on -TEST $CLI volume set $V0 performance.stat-prefetch off -TEST $CLI volume set $V0 cluster.background-self-heal-count 0 -TEST $CLI volume set $V0 cluster.self-heal-daemon off -TEST $CLI volume start $V0 -TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --direct-io-mode=enable -# file 'a' is healed from brick02 to brick01 where as file 'b' is healed from -# brick01 to brick02 - -TEST cp -p /etc/passwd $M0/a -TEST cp -p /etc/passwd $M0/b - -#Store mtimes before self-heals -TEST modify_atstamp=$(get_mtime $B0/gfs0/brick02/a) -TEST modify_btstamp=$(get_mtime $B0/gfs0/brick02/b) - -TEST $CLI volume stop $V0 -TEST gf_rm_file_and_gfid_link $B0/gfs0/brick01 a -TEST gf_rm_file_and_gfid_link $B0/gfs0/brick02 b - -TEST $CLI volume start $V0 force -EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0 -EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1 - -TEST $CLI volume set $V0 cluster.self-heal-daemon on -EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status -EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0 -EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1 - -#TODO remove these 2 lines once heal-full is fixed in v2. -TEST stat $M0/a -TEST stat $M0/b - -TEST gluster volume heal $V0 full -EXPECT_WITHIN $HEAL_TIMEOUT "Y" file_exists $B0/gfs0/brick01/a -EXPECT_WITHIN $HEAL_TIMEOUT "Y" file_exists $B0/gfs0/brick02/b -EXPECT_WITHIN $HEAL_TIMEOUT 0 afr_get_pending_heal_count $V0 - -size=`stat -c '%s' /etc/passwd` -EXPECT $size stat -c '%s' $B0/gfs0/brick01/a - -TEST modify_atstamp1=$(get_mtime $B0/gfs0/brick01/a) -TEST modify_atstamp2=$(get_mtime $B0/gfs0/brick02/a) -EXPECT $modify_atstamp echo $modify_atstamp1 -EXPECT $modify_atstamp echo $modify_atstamp2 - -TEST modify_btstamp1=$(get_mtime $B0/gfs0/brick01/b) -TEST modify_btstamp2=$(get_mtime $B0/gfs0/brick02/b) -EXPECT $modify_btstamp echo $modify_btstamp1 -EXPECT $modify_btstamp echo $modify_btstamp2 -cleanup; diff --git a/tests/bugs/bug-921072.t b/tests/bugs/bug-921072.t deleted file mode 100755 index 8f7a5d05362..00000000000 --- a/tests/bugs/bug-921072.t +++ /dev/null @@ -1,124 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../nfs.rc -. $(dirname $0)/../volume.rc - -cleanup; - -#1 -TEST glusterd -TEST pidof glusterd - -TEST $CLI volume create $V0 $H0:$B0/$V0 -TEST $CLI volume start $V0 -EXPECT_WITHIN $NFS_EXPORT_TIMEOUT 1 is_nfs_export_available -TEST mount_nfs $H0:/$V0 $N0 nolock -EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $N0 - -# based on ip addresses (1-4) -# case 1: allow only localhost ip -TEST $CLI volume set $V0 nfs.rpc-auth-allow 127.0.0.1 -EXPECT_WITHIN $NFS_EXPORT_TIMEOUT 1 is_nfs_export_available - -TEST mount_nfs localhost:/$V0 $N0 nolock -EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $N0 - -# case 2: allow only non-localhost ip -TEST $CLI volume set $V0 nfs.rpc-auth-allow 192.168.1.1 -EXPECT_WITHIN $NFS_EXPORT_TIMEOUT 1 is_nfs_export_available -#11 -TEST ! mount_nfs localhost:/$V0 $N0 nolock -TEST $CLI volume reset --mode=script $V0 -# case 3: reject only localhost ip -TEST $CLI volume set $V0 nfs.rpc-auth-reject 127.0.0.1 -EXPECT_WITHIN $NFS_EXPORT_TIMEOUT 1 is_nfs_export_available - -TEST ! mount_nfs localhost:/$V0 $N0 nolock - -# case 4: reject only non-localhost ip -TEST $CLI volume set $V0 nfs.rpc-auth-reject 192.168.1.1 -EXPECT_WITHIN $NFS_EXPORT_TIMEOUT 1 is_nfs_export_available - -TEST mount_nfs localhost:/$V0 $N0 nolock -EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $N0 - - - -# NEED TO CHECK BOTH IP AND NAME BASED AUTH. -# CASES WITH NFS.ADDR-NAMELOOKUP ON (5-12) -TEST $CLI volume reset --mode=script $V0 -TEST $CLI volume set $V0 nfs.addr-namelookup on -EXPECT_WITHIN $NFS_EXPORT_TIMEOUT 1 is_nfs_export_available -#20 -TEST mount_nfs localhost:/$V0 $N0 nolock -EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $N0 - -# case 5: allow only localhost -TEST $CLI volume set $V0 nfs.rpc-auth-allow localhost -EXPECT_WITHIN $NFS_EXPORT_TIMEOUT 1 is_nfs_export_available - -TEST mount_nfs localhost:/$V0 $N0 nolock -EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $N0 - -# case 6: allow only somehost -TEST $CLI volume set $V0 nfs.rpc-auth-allow somehost -EXPECT_WITHIN $NFS_EXPORT_TIMEOUT 1 is_nfs_export_available - -TEST ! mount_nfs localhost:/$V0 $N0 nolock - -# case 7: reject only localhost -TEST $CLI volume reset --mode=script $V0 -TEST $CLI volume set $V0 nfs.addr-namelookup on -TEST $CLI volume set $V0 nfs.rpc-auth-reject localhost -EXPECT_WITHIN $NFS_EXPORT_TIMEOUT 1 is_nfs_export_available -#30 -TEST ! mount_nfs localhost:/$V0 $N0 nolock - -# case 8: reject only somehost -TEST $CLI volume set $V0 nfs.rpc-auth-reject somehost -EXPECT_WITHIN $NFS_EXPORT_TIMEOUT 1 is_nfs_export_available - -TEST mount_nfs localhost:/$V0 $N0 nolock -EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $N0 - -# based on ip addresses: repeat of cases 1-4 -# case 9: allow only localhost ip -TEST $CLI volume reset --mode=script $V0 -TEST $CLI volume set $V0 nfs.addr-namelookup on -TEST $CLI volume set $V0 nfs.rpc-auth-allow 127.0.0.1 -EXPECT_WITHIN $NFS_EXPORT_TIMEOUT 1 is_nfs_export_available - -TEST mount_nfs localhost:/$V0 $N0 nolock -TEST mkdir -p $N0/subdir -EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $N0 - -# case 10: allow a non-localhost ip -TEST $CLI volume set $V0 nfs.rpc-auth-allow 192.168.1.1 -EXPECT_WITHIN $NFS_EXPORT_TIMEOUT 1 is_nfs_export_available -#41 -TEST ! mount_nfs localhost:/$V0 $N0 nolock - -# case 11: reject only localhost ip -TEST $CLI volume reset --mode=script $V0 -TEST $CLI volume set $V0 nfs.addr-namelookup on -TEST $CLI volume set $V0 nfs.rpc-auth-reject 127.0.0.1 -EXPECT_WITHIN $NFS_EXPORT_TIMEOUT 1 is_nfs_export_available - -TEST ! mount_nfs localhost:/$V0 $N0 nolock -TEST ! mount_nfs localhost:/$V0/subdir $N0 nolock - -# case 12: reject only non-localhost ip -TEST $CLI volume set $V0 nfs.rpc-auth-reject 192.168.1.1 -EXPECT_WITHIN $NFS_EXPORT_TIMEOUT 1 is_nfs_export_available - -TEST mount_nfs localhost:/$V0 $N0 nolock -EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $N0 - -TEST mount_nfs localhost:/$V0/subdir $N0 nolock -EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $N0 - -TEST $CLI volume stop --mode=script $V0 -#52 -TEST $CLI volume delete --mode=script $V0 -cleanup diff --git a/tests/bugs/bug-921215.t b/tests/bugs/bug-921215.t deleted file mode 100755 index 344b568591c..00000000000 --- a/tests/bugs/bug-921215.t +++ /dev/null @@ -1,13 +0,0 @@ -#!/bin/bash - -# This is test case for bug no 921215 "Can not create volume with a . in the name" - -. $(dirname $0)/../include.rc - -cleanup; - -TEST glusterd -TEST pidof glusterd -TEST ! $CLI volume create $V0.temp replica 2 $H0:$B0/${V0}0 $H0:$B0/${V0}1 - -cleanup; diff --git a/tests/bugs/bug-921231.t b/tests/bugs/bug-921231.t deleted file mode 100644 index 79c7522a24a..00000000000 --- a/tests/bugs/bug-921231.t +++ /dev/null @@ -1,31 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc - -# This test writes to same file with 2 fds and tests that eager-lock is not -# causing extra delay because of post-op-delay-secs -cleanup; - -function write_to_file { - dd of=$M0/1 if=/dev/zero bs=1024k count=128 oflag=append 2>&1 >/dev/null -} - -TEST glusterd -TEST pidof glusterd -TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}0 $H0:$B0/${V0}1 -TEST $CLI volume set $V0 eager-lock on -TEST $CLI volume set $V0 post-op-delay-secs 3 -TEST $CLI volume set $V0 client-log-level DEBUG -TEST $CLI volume start $V0 -TEST $CLI volume profile $V0 start -TEST $CLI volume set $V0 ensure-durability off -TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0 -write_to_file & -write_to_file & -wait -#Test if the MAX [F]INODELK fop latency is of the order of seconds. -inodelk_max_latency=$($CLI volume profile $V0 info | grep INODELK | awk 'BEGIN {max = 0} {if ($6 > max) max=$6;} END {print max}' | cut -d. -f 1 | egrep "[0-9]{7,}") -TEST [ -z $inodelk_max_latency ] - -cleanup; diff --git a/tests/bugs/bug-921408.t b/tests/bugs/bug-921408.t deleted file mode 100755 index 6490a93c8ef..00000000000 --- a/tests/bugs/bug-921408.t +++ /dev/null @@ -1,90 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../fileio.rc -. $(dirname $0)/../dht.rc -. $(dirname $0)/../volume.rc - -cleanup; -wait_check_status () -{ - n=0 - while [ $n -lt $1 ] - do - ret=$(rebalance_completed) - if [ $ret == "0" ] - then - return 0; - else - sleep 1 - n=`expr $n + 1`; - fi - done - return 1; -} - -addbr_rebal_till_layout_change() -{ - val=1 - l=$1 - i=1 - while [ $i -lt 5 ] - do - $CLI volume add-brick $V0 $H0:$B0/${V0}$l &>/dev/null - $CLI volume rebalance $V0 fix-layout start &>/dev/null - wait_check_status $REBALANCE_TIMEOUT - if [ $? -eq 1 ] - then - break - fi - NEW_LAYOUT=`get_layout $B0/${V0}0` - if [ $OLD_LAYOUT == $NEW_LAYOUT ] - then - i=`expr $i + 1`; - l=`expr $l + 1`; - else - val=0 - break - fi - done - return $val -} -TEST glusterd -TEST pidof glusterd - -TEST $CLI volume create $V0 $H0:$B0/${V0}0 -TEST $CLI volume set $V0 subvols-per-directory 1 -TEST $CLI volume start $V0 - -TEST glusterfs -s $H0 --volfile-id $V0 $M0; - -TEST mkdir $M0/test -TEST touch $M0/test/test - -fd=`fd_available` -TEST fd_open $fd "rw" $M0/test/test - -OLD_LAYOUT=`get_layout $B0/${V0}0` - -addbr_rebal_till_layout_change 1 - -TEST [ $? -eq 0 ] - -for i in $(seq 1 1000) -do - ls -l $M0/ >/dev/null - ret=$? - if [ $ret != 0 ] - then - break - fi -done - -TEST [ $ret == 0 ]; -TEST fd_close $fd; - -EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0 -TEST $CLI volume stop $V0 -TEST $CLI volume delete $V0 - -cleanup diff --git a/tests/bugs/bug-924075.t b/tests/bugs/bug-924075.t deleted file mode 100755 index f4e03e33a96..00000000000 --- a/tests/bugs/bug-924075.t +++ /dev/null @@ -1,23 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc - -cleanup; - -#FIXME: there is another patch which moves the following function into -#include.rc -function process_leak_count () -{ - local pid=$1; - return $(ls -lh /proc/$pid/fd | grep "(deleted)" | wc -l) -} - -TEST glusterd; -TEST $CLI volume create $V0 $H0:$B0/${V0}1; -TEST $CLI volume start $V0; -TEST glusterfs -s $H0 --volfile-id $V0 $M0; -mount_pid=$(get_mount_process_pid $V0); -TEST process_leak_count $mount_pid; - -cleanup; diff --git a/tests/bugs/bug-924265.t b/tests/bugs/bug-924265.t deleted file mode 100755 index 51eda7f6e97..00000000000 --- a/tests/bugs/bug-924265.t +++ /dev/null @@ -1,35 +0,0 @@ -#!/bin/bash - -# Test that setting cluster.dht-xattr-name works, and that DHT consistently -# uses the specified name instead of the default. - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc - -# We only care about the exit code, so keep it quiet. -function silent_getfattr { - getfattr $* &> /dev/null -} - -cleanup - -TEST glusterd -TEST pidof glusterd - -mkdir -p $H0:$B0/${V0}0 - -# Create a volume and set the option. -TEST $CLI volume create $V0 $H0:$B0/${V0}0 -TEST $CLI volume set $V0 cluster.dht-xattr-name trusted.foo.bar - -# Start and mount the volume. -TEST $CLI volume start $V0 -EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'Started' volinfo_field $V0 'Status'; -TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 $M0 - -# Create a directory and make sure it has the right xattr. -mkdir $M0/test -TEST ! silent_getfattr -n trusted.glusterfs.dht $B0/${V0}0/test -TEST silent_getfattr -n trusted.foo.bar $B0/${V0}0/test - -cleanup diff --git a/tests/bugs/bug-924726.t b/tests/bugs/bug-924726.t deleted file mode 100755 index 82c11af878e..00000000000 --- a/tests/bugs/bug-924726.t +++ /dev/null @@ -1,45 +0,0 @@ -#!/bin/bash - -TESTS_EXPECTED_IN_LOOP=10 - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc - -cleanup; - -function get_socket_count() { - netstat -nap | grep $1 | wc -l -} - -TEST glusterd -TEST pidof glusterd -TEST $CLI volume info; - -TEST $CLI volume create $V0 $H0:$B0/$V0 -EXPECT 'Created' volinfo_field $V0 'Status'; - -TEST $CLI volume start $V0; -EXPECT 'Started' volinfo_field $V0 'Status'; - -TEST glusterfs -s $H0 --volfile-id $V0 $M0; - -TEST ls $M0 - -GLFS_MNT_PID=`ps ax | grep -i $M0 | grep glusterfs | grep -v grep | sed -e "s/^ *\([0-9]*\).*/\1/g"` - -SOCKETS_BEFORE_SWITCH=`netstat -nap | grep $GLFS_MNT_PID | grep ESTABLISHED | wc -l` - -for i in $(seq 1 5); do - TEST_IN_LOOP $CLI volume set $V0 performance.write-behind off; - sleep 1; - TEST_IN_LOOP $CLI volume set $V0 performance.write-behind on; - sleep 1; -done - -SOCKETS_AFTER_SWITCH=`netstat -nap | grep $GLFS_MNT_PID | grep ESTABLISHED | wc -l` - -# currently active graph is not cleaned up till some operation on -# mount-point. Hence there is one extra graph. -TEST [ $SOCKETS_AFTER_SWITCH = `expr $SOCKETS_BEFORE_SWITCH + 1` ] - -cleanup; diff --git a/tests/bugs/bug-927616.t b/tests/bugs/bug-927616.t deleted file mode 100755 index 4525ddbb747..00000000000 --- a/tests/bugs/bug-927616.t +++ /dev/null @@ -1,62 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../nfs.rc - -cleanup; - -TEST glusterd -TEST pidof glusterd -TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2}; -TEST $CLI volume set $V0 performance.open-behind off; -TEST $CLI volume start $V0 - -## Mount FUSE with caching disabled -TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id $V0 $M0; - -EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available; -TEST mount_nfs $H0:/$V0 $N0 nolock; - -TEST mkdir $M0/dir; - -mkdir $M0/other; -cp /etc/passwd $M0/; -cp $M0/passwd $M0/file; -chmod 600 $M0/file; - -chown -R nfsnobody:nfsnobody $M0/dir; - -TEST $CLI volume set $V0 server.root-squash on; - -sleep 1; - -# tests should fail. -touch $M0/foo 2>/dev/null; -TEST [ $? -ne 0 ] -touch $N0/foo 2>/dev/null; -TEST [ $? -ne 0 ] -mkdir $M0/new 2>/dev/null; -TEST [ $? -ne 0 ] -mkdir $N0/new 2>/dev/null; -TEST [ $? -ne 0 ] - -TEST $CLI volume set $V0 server.root-squash off; - -sleep 1; - -# tests should pass. -touch $M0/foo 2>/dev/null; -TEST [ $? -eq 0 ] -touch $N0/bar 2>/dev/null; -TEST [ $? -eq 0 ] -mkdir $M0/new 2>/dev/null; -TEST [ $? -eq 0 ] -mkdir $N0/old 2>/dev/null; -TEST [ $? -eq 0 ] - -EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" umount_nfs $N0 - -TEST $CLI volume stop $V0; -TEST $CLI volume delete $V0; - -cleanup; diff --git a/tests/bugs/bug-948686.t b/tests/bugs/bug-948686.t deleted file mode 100755 index 040287024e8..00000000000 --- a/tests/bugs/bug-948686.t +++ /dev/null @@ -1,46 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc -. $(dirname $0)/../cluster.rc - -function check_peers { - $CLI_1 peer status | grep 'Peer in Cluster (Connected)' | wc -l -} -cleanup; -#setup cluster and test volume -TEST launch_cluster 3; # start 3-node virtual cluster -TEST $CLI_1 peer probe $H2; # peer probe server 2 from server 1 cli -TEST $CLI_1 peer probe $H3; # peer probe server 3 from server 1 cli - -EXPECT_WITHIN $PROBE_TIMEOUT 2 check_peers; - -TEST $CLI_1 volume create $V0 replica 2 $H1:$B1/$V0 $H1:$B1/${V0}_1 $H2:$B2/$V0 $H3:$B3/$V0 -TEST $CLI_1 volume start $V0 -TEST glusterfs --volfile-server=$H1 --volfile-id=$V0 $M0 - -#kill a node -TEST kill_node 3 - -#modify volume config to see change in volume-sync -TEST $CLI_1 volume set $V0 write-behind off -#add some files to the volume to see effect of volume-heal cmd -TEST touch $M0/{1..100}; -TEST $CLI_1 volume stop $V0; -TEST $glusterd_3; -EXPECT_WITHIN $PROBE_TIMEOUT 2 check_peers; -TEST $CLI_3 volume start $V0; -TEST $CLI_2 volume stop $V0; -TEST $CLI_2 volume delete $V0; - -cleanup; - -TEST glusterd; -TEST $CLI volume create $V0 $H0:$B0/$V0 -TEST $CLI volume start $V0 -pkill glusterd; -pkill glusterfsd; -TEST glusterd -TEST $CLI volume status $V0 - -cleanup; diff --git a/tests/bugs/bug-948729/bug-948729-force.t b/tests/bugs/bug-948729/bug-948729-force.t deleted file mode 100644 index b4106a58736..00000000000 --- a/tests/bugs/bug-948729/bug-948729-force.t +++ /dev/null @@ -1,103 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../../include.rc -. $(dirname $0)/../../volume.rc -. $(dirname $0)/../../cluster.rc - -function check_peers { - $CLI_1 peer status | grep 'Peer in Cluster (Connected)' | wc -l -} - -cleanup; -uuid1=`uuidgen`; -uuid2=`uuidgen`; -uuid3=`uuidgen`; - -V1=patchy1 -V2=patchy2 - -TEST launch_cluster 2; - -TEST $CLI_1 peer probe $H2; - -EXPECT_WITHIN $PROBE_TIMEOUT 1 check_peers; - -B3=/d/backends/3 -B4=/d/backends/4 -B5=/d/backends/5 -B6=/d/backends/6 - -mkdir -p $B3 $B4 $B5 $B6 - -TEST truncate -s 16M $B1/brick1 -TEST truncate -s 16M $B2/brick2 -TEST truncate -s 16M $B3/brick3 -TEST truncate -s 16M $B4/brick4 -TEST truncate -s 16M $B5/brick5 -TEST truncate -s 16M $B6/brick6 - -TEST LD1=`SETUP_LOOP $B1/brick1` -TEST MKFS_LOOP $LD1 -TEST LD2=`SETUP_LOOP $B2/brick2` -TEST MKFS_LOOP $LD2 -TEST LD3=`SETUP_LOOP $B3/brick3` -TEST MKFS_LOOP $LD3 -TEST LD4=`SETUP_LOOP $B4/brick4` -TEST MKFS_LOOP $LD4 -TEST LD5=`SETUP_LOOP $B5/brick5` -TEST MKFS_LOOP $LD5 -TEST LD6=`SETUP_LOOP $B6/brick6` -TEST MKFS_LOOP $LD6 - -mkdir -p $B1/$V0 $B2/$V0 $B3/$V0 $B4/$V0 $B5/$V0 $B6/$V0 - -TEST MOUNT_LOOP $LD1 $B1/$V0 -TEST MOUNT_LOOP $LD2 $B2/$V0 -TEST MOUNT_LOOP $LD3 $B3/$V0 -TEST MOUNT_LOOP $LD4 $B4/$V0 -TEST MOUNT_LOOP $LD5 $B5/$V0 -TEST MOUNT_LOOP $LD6 $B6/$V0 - -#Case 0: Parent directory of the brick is absent -TEST ! $CLI1 volume create $V0 $H1:$B1/$V0/nonexistent/b1 $H2:$B2/$V0/nonexistent/b2 force - -#Case 1: File system root is being used as brick directory -TEST $CLI1 volume create $V0 $H1:$B5/$V0 $H2:$B6/$V0 force - -#Case 2: Brick directory contains only one component -TEST $CLI1 volume create $V1 $H1:/$uuid1 $H2:/$uuid2 force - -#Case 3: Sub-directories of the backend FS being used as brick directory -TEST $CLI1 volume create $V2 $H1:$B1/$V0/brick1 $H2:$B2/$V0/brick2 force - -#add-brick tests -TEST ! $CLI1 volume add-brick $V0 $H1:$B3/$V0/nonexistent/brick3 force -TEST $CLI1 volume add-brick $V0 $H1:$B3/$V0 force -TEST $CLI1 volume add-brick $V1 $H1:/$uuid3 force -TEST $CLI1 volume add-brick $V2 $H1:$B4/$V0/brick3 force - -#####replace-brick tests -#FIX-ME: replace-brick does not work with the newly introduced cluster test -#####framework - -rmdir /$uuid1 /$uuid2 /$uuid3; - -$CLI volume stop $V0 -$CLI volume stop $V1 -$CLI volume stop $V2 - -UMOUNT_LOOP $B1/$V0 -UMOUNT_LOOP $B2/$V0 -UMOUNT_LOOP $B3/$V0 -UMOUNT_LOOP $B4/$V0 -UMOUNT_LOOP $B5/$V0 -UMOUNT_LOOP $B6/$V0 - -rm -f $B1/brick1 -rm -f $B2/brick2 -rm -f $B3/brick3 -rm -f $B4/brick4 -rm -f $B5/brick5 -rm -f $B6/brick6 - -cleanup; diff --git a/tests/bugs/bug-948729/bug-948729-mode-script.t b/tests/bugs/bug-948729/bug-948729-mode-script.t deleted file mode 100644 index 6264e18f1bd..00000000000 --- a/tests/bugs/bug-948729/bug-948729-mode-script.t +++ /dev/null @@ -1,77 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../../include.rc -. $(dirname $0)/../../volume.rc -. $(dirname $0)/../../cluster.rc - -function check_peers { - $CLI_1 peer status | grep 'Peer in Cluster (Connected)' | wc -l -} - -cleanup; - -uuid1=`uuidgen`; -uuid2=`uuidgen`; -uuid3=`uuidgen`; - -TEST launch_cluster 2; - -TEST $CLI_1 peer probe $H2; - -EXPECT_WITHIN $PROBE_TIMEOUT 1 check_peers; - -B3=/d/backends/3 -mkdir -p $B3 - -TEST truncate -s 16M $B1/brick1 -TEST truncate -s 16M $B2/brick2 -TEST truncate -s 16M $B3/brick3 - -TEST LD1=`SETUP_LOOP $B1/brick1` -TEST MKFS_LOOP $LD1 -TEST LD2=`SETUP_LOOP $B2/brick2` -TEST MKFS_LOOP $LD2 -TEST LD3=`SETUP_LOOP $B3/brick3` -TEST MKFS_LOOP $LD3 - -mkdir -p $B1/$V0 $B2/$V0 $B3/$V0 - -TEST MOUNT_LOOP $LD1 $B1/$V0 -TEST MOUNT_LOOP $LD2 $B2/$V0 -TEST MOUNT_LOOP $LD3 $B3/$V0 - -cli1=$(echo $CLI1 | sed 's/ --wignore//') - -#Case 0: Parent directory of the brick is absent -TEST ! $cli1 volume create $V0 $H1:$B1/$V0/nonexistent/b1 $H2:$B2/$V0/nonexistent/b2 - -#Case 1: File system root being used as brick directory -TEST ! $cli1 volume create $V0 $H1:$B1/$V0 $H2:$B2/$V0 - -#Case 2: Brick directory contains only one component -TEST ! $cli1 volume create $V0 $H1:/$uuid1 $H2:/$uuid2 - -#Case 3: Sub-directories of the backend FS being used as brick directory -TEST $cli1 volume create $V0 $H1:$B1/$V0/brick1 $H2:$B2/$V0/brick2 - -#add-brick tests -TEST ! $cli1 volume add-brick $V0 $H1:$B3/$V0/nonexistent/brick3 -TEST ! $cli1 volume add-brick $V0 $H1:$B3/$V0 -TEST ! $cli1 volume add-brick $V0 $H1:/$uuid3 -TEST $cli1 volume add-brick $V0 $H1:$B3/$V0/brick3 - -#####replace-brick tests -#FIX-ME : replace-brick does not currently work in the newly introduced -#####cluster test framework - -$CLI1 volume stop $V0 - -UMOUNT_LOOP $B1/$V0 -UMOUNT_LOOP $B2/$V0 -UMOUNT_LOOP $B3/$V0 - -rm -f $B1/brick1 -rm -f $B2/brick2 -rm -f $B3/brick3 - -cleanup; diff --git a/tests/bugs/bug-948729/bug-948729.t b/tests/bugs/bug-948729/bug-948729.t deleted file mode 100644 index 46cdb3d8e8c..00000000000 --- a/tests/bugs/bug-948729/bug-948729.t +++ /dev/null @@ -1,80 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../../include.rc -. $(dirname $0)/../../volume.rc -. $(dirname $0)/../../cluster.rc - -function check_peers { - $CLI_1 peer status | grep 'Peer in Cluster (Connected)' | wc -l -} - -cleanup; - -uuid1=`uuidgen`; -uuid2=`uuidgen`; -uuid3=`uuidgen`; - -TEST launch_cluster 2; - -TEST $CLI_1 peer probe $H2; - -EXPECT_WITHIN $PROBE_TIMEOUT 1 check_peers; - -B3=/d/backends/3 - -mkdir -p $B3 - -TEST truncate -s 16M $B1/brick1 -TEST truncate -s 16M $B2/brick2 -TEST truncate -s 16M $B3/brick3 - -TEST LD1=`SETUP_LOOP $B1/brick1` -TEST MKFS_LOOP $LD1 -TEST LD2=`SETUP_LOOP $B2/brick2` -TEST MKFS_LOOP $LD2 -TEST LD3=`SETUP_LOOP $B3/brick3` -TEST MKFS_LOOP $LD3 - -mkdir -p $B1/$V0 $B2/$V0 $B3/$V0 - -TEST MOUNT_LOOP $LD1 $B1/$V0 -TEST MOUNT_LOOP $LD2 $B2/$V0 -TEST MOUNT_LOOP $LD3 $B3/$V0 - -#Tests without options 'mode=script' and 'wignore' -cli1=$(echo $CLI1 | sed 's/ --mode=script//') -cli1=$(echo $cli1 | sed 's/ --wignore//') -#Case 0: Parent directory of the brick is absent -TEST ! $cli1 volume create $V0 $H1:$B1/$V0/nonexistent/b1 $H2:$B2/$V0/nonexistent/b2 - -#Case 1: File system root being used as brick directory -TEST ! $cli1 volume create $V0 $H1:$B1/$V0 $H2:$B2/$V0 - -#Case 2: Brick directory contains only one component -TEST ! $cli1 volume create $V0 $H1:/$uuid1 $H2:/$uuid2 - -#Case 3: Sub-directories of the backend FS being used as brick directory -TEST $cli1 volume create $V0 $H1:$B1/$V0/brick1 $H2:$B2/$V0/brick2 - -#add-brick tests -TEST ! $cli1 volume add-brick $V0 $H1:$B3/$V0/nonexistent/b3 -TEST ! $cli1 volume add-brick $V0 $H1:$B3/$V0 -TEST ! $cli1 volume add-brick $V0 $H1:/$uuid3 -TEST $cli1 volume add-brick $V0 $H1:$B3/$V0/brick3 - -#####replace-brick tests -#FIX-ME: Replace-brick does not work currently in the newly introduced cluster -#####test framework. - -$CLI1 volume stop $V0 - -UMOUNT_LOOP $B1/$V0 -UMOUNT_LOOP $B2/$V0 -UMOUNT_LOOP $B3/$V0 - -rm -f $B1/brick1 -rm -f $B2/brick2 -rm -f $B3/brick3 - - -cleanup; diff --git a/tests/bugs/bug-949242.t b/tests/bugs/bug-949242.t deleted file mode 100644 index 31e5bf6be6f..00000000000 --- a/tests/bugs/bug-949242.t +++ /dev/null @@ -1,55 +0,0 @@ -#!/bin/bash -# -# Bug 949242 - Test basic fallocate functionality. -# -# Run several commands to verify basic fallocate functionality. We verify that -# fallocate creates and allocates blocks to a file. We also verify that the keep -# size option does not modify the file size. -### - -. $(dirname $0)/../include.rc -. $(dirname $0)/../fallocate.rc -. $(dirname $0)/../volume.rc - -cleanup; - -TEST glusterd - -TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2,3,4} -TEST $CLI volume start $V0 - -TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0 - -# check for fallocate support before continuing the test -require_fallocate -l 1m -n $M0/file && rm -f $M0/file - -# fallocate a file and verify blocks are allocated -TEST fallocate -l 1m $M0/file -blksz=`stat -c %b $M0/file` -nblks=`stat -c %B $M0/file` -TEST [ $(($blksz * $nblks)) -eq 1048576 ] - -TEST unlink $M0/file - -# truncate a file to a fixed size, fallocate and verify that the size does not -# change -TEST truncate -s 1M $M0/file -TEST fallocate -l 2m -n $M0/file -blksz=`stat -c %b $M0/file` -nblks=`stat -c %B $M0/file` -sz=`stat -c %s $M0/file` -TEST [ $sz -eq 1048576 ] -# Note that gluster currently incorporates a hack to limit the number of blocks -# reported as allocated to the file by the file size. We have allocated beyond the -# file size here. Just check for non-zero allocation to avoid setting a land mine -# for if/when that behavior might change. -TEST [ ! $(($blksz * $nblks)) -eq 0 ] - -TEST unlink $M0/file - -EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0 - -TEST $CLI volume stop $V0 -TEST $CLI volume delete $V0 - -cleanup; diff --git a/tests/bugs/bug-949298.t b/tests/bugs/bug-949298.t deleted file mode 100644 index 1394127ec57..00000000000 --- a/tests/bugs/bug-949298.t +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc - -cleanup; -TEST glusterd -TEST pidof glusterd - -TEST $CLI --xml volume info $V0 - -cleanup; diff --git a/tests/bugs/bug-949930.t b/tests/bugs/bug-949930.t deleted file mode 100644 index 4a738befac4..00000000000 --- a/tests/bugs/bug-949930.t +++ /dev/null @@ -1,27 +0,0 @@ -#!/bin/bash -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc - -V1=patchy2 - -cleanup; - -TEST glusterd; -TEST pidof glusterd; - -TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2}; -TEST $CLI volume start $V0; - -TEST $CLI volume create $V1 $H0:$B0/${V1}{1,2}; -TEST $CLI volume start $V1; - -TEST ! $CLI volume set $V0 performance.nfs.read-ahead blah -EXPECT '' volume_option $V0 performance.nfs.read-ahead - -TEST $CLI volume set $V0 performance.nfs.read-ahead on -EXPECT "on" volume_option $V0 performance.nfs.read-ahead - -EXPECT '' volume_option $V1 performance.nfs.read-ahead - -cleanup; - diff --git a/tests/bugs/bug-954057.t b/tests/bugs/bug-954057.t deleted file mode 100755 index 30bc1d77e6c..00000000000 --- a/tests/bugs/bug-954057.t +++ /dev/null @@ -1,44 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc - -#This script checks if use-readdirp option works as accepted in mount options - - -TEST glusterd -TEST pidof glusterd -TEST $CLI volume create $V0 $H0:$B0/${V0} -TEST $CLI volume start $V0 - -TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 - -TEST mkdir $M0/dir -TEST mkdir $M0/nobody -TEST chown nfsnobody:nfsnobody $M0/nobody -TEST `echo "file" >> $M0/file` -TEST cp $M0/file $M0/new -TEST chmod 700 $M0/new -TEST cat $M0/new - -TEST $CLI volume set $V0 server.root-squash enable -TEST `echo 3 > /proc/sys/vm/drop_caches` -TEST ! mkdir $M0/other -TEST mkdir $M0/nobody/other -TEST cat $M0/file -TEST ! cat $M0/new -TEST `echo "nobody" >> $M0/nobody/file` - -#mount the client without root-squashing -TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 --no-root-squash=yes $M1 -TEST mkdir $M1/m1_dir -TEST `echo "file" >> $M1/m1_file` -TEST cp $M0/file $M1/new -TEST chmod 700 $M1/new -TEST cat $M1/new - -TEST $CLI volume set $V0 server.root-squash disable -TEST mkdir $M0/other -TEST cat $M0/new - -cleanup diff --git a/tests/bugs/bug-955588.t b/tests/bugs/bug-955588.t deleted file mode 100755 index 182035c6132..00000000000 --- a/tests/bugs/bug-955588.t +++ /dev/null @@ -1,27 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc - -cleanup; -TEST glusterd -TEST pidof glusterd - -function get_brick_host_uuid() -{ - local vol=$1; - local uuid_regex='[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}' - local host_uuid_list=$($CLI volume info $vol --xml | grep "brick.uuid" | grep -o -E "$uuid_regex"); - - echo $host_uuid_list | awk '{print $1}' -} - -TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1} - -uuid=`grep UUID $GLUSTERD_WORKDIR/glusterd.info | cut -f2 -d=` -EXPECT $uuid get_brick_host_uuid $V0 - -TEST $CLI volume delete $V0; -TEST ! $CLI volume info $V0; - -cleanup; diff --git a/tests/bugs/bug-957877.t b/tests/bugs/bug-957877.t deleted file mode 100644 index 52bbd62ec2a..00000000000 --- a/tests/bugs/bug-957877.t +++ /dev/null @@ -1,33 +0,0 @@ -#!/bin/bash -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc -. $(dirname $0)/../afr.rc -cleanup; - -TEST glusterd -TEST pidof glusterd -TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1} -TEST $CLI volume start $V0; - -TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 $M0; -kill_brick $V0 $H0 $B0/${V0}0 -TEST touch $M0/f1 -TEST setfattr -n "user.foo" -v "test" $M0/f1 - -BRICK=$B0"/${V0}1" - -TEST $CLI volume start $V0 force -EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status -EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0 -EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1 -TEST $CLI volume heal $V0 - -# Wait for self-heal to complete -EXPECT_WITHIN $HEAL_TIMEOUT '1' count_sh_entries $BRICK; - -TEST getfattr -n "user.foo" $B0/${V0}0/f1; - -TEST $CLI volume stop $V0; -TEST $CLI volume delete $V0; - -cleanup; diff --git a/tests/bugs/bug-958691.t b/tests/bugs/bug-958691.t deleted file mode 100644 index 18a2fb5c87d..00000000000 --- a/tests/bugs/bug-958691.t +++ /dev/null @@ -1,49 +0,0 @@ -#!/bin/bash -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc -. $(dirname $0)/../nfs.rc - -cleanup; - -TEST glusterd -TEST pidof glusterd -TEST $CLI volume create $V0 $H0:$B0/${V0}{0,1} -TEST $CLI volume start $V0; - -TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 $M0; -EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available; -TEST mount_nfs $H0:/$V0 $N0 nolock; - -# Tests for the fuse mount -TEST mkdir $M0/dir; -TEST chmod 1777 $M0/dir; -TEST touch $M0/dir/file{1,2}; - -TEST $CLI volume set $V0 server.root-squash enable; - -mv $M0/dir/file1 $M0/dir/file11 2>/dev/null; -TEST [ $? -ne 0 ]; - -TEST $CLI volume set $V0 server.root-squash disable; -TEST rm -rf $M0/dir; - -sleep 1; - -# tests for nfs mount -TEST mkdir $N0/dir; -TEST chmod 1777 $N0/dir; -TEST touch $N0/dir/file{1,2}; - -TEST $CLI volume set $V0 server.root-squash enable; - -mv $N0/dir/file1 $N0/dir/file11 2>/dev/null; -TEST [ $? -ne 0 ]; - -TEST $CLI volume set $V0 server.root-squash disable; -TEST rm -rf $N0/dir; -EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $N0 - -TEST $CLI volume stop $V0; -TEST $CLI volume delete $V0; - -cleanup; diff --git a/tests/bugs/bug-958790.t b/tests/bugs/bug-958790.t deleted file mode 100644 index fc5f63a0c28..00000000000 --- a/tests/bugs/bug-958790.t +++ /dev/null @@ -1,21 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc - -cleanup; - -TEST glusterd; -TEST pidof glusterd; -TEST $CLI volume info; - -touch $GLUSTERD_WORKDIR/groups/test -echo "read-ahead=off" > $GLUSTERD_WORKDIR/groups/test -echo "open-behind=off" >> $GLUSTERD_WORKDIR/groups/test - -TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2}; -TEST $CLI volume set $V0 group test -EXPECT "off" volume_option $V0 performance.read-ahead -EXPECT "off" volume_option $V0 performance.open-behind - -cleanup; diff --git a/tests/bugs/bug-961307.t b/tests/bugs/bug-961307.t deleted file mode 100644 index 9775c3ad0c9..00000000000 --- a/tests/bugs/bug-961307.t +++ /dev/null @@ -1,20 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc - -cleanup; - -REPLICA=2 - -TEST glusterd -TEST pidof glusterd - -TEST $CLI volume create $V0 replica $REPLICA $H0:$B0/${V0}-00 $H0:$B0/${V0}-01 $H0:$B0/${V0}-10 $H0:$B0/${V0}-11 -TEST $CLI volume start $V0 - -var1=$(gluster volume remove-brick $H0:$B0/${V0}-00 $H0:$B0/${V0}-01 start 2>&1) -var2="volume remove-brick start: failed: Volume $H0:$B0/${V0}-00 does not exist" - -EXPECT "$var2" echo "$var1" -cleanup; diff --git a/tests/bugs/bug-961615.t b/tests/bugs/bug-961615.t deleted file mode 100644 index d10eeeabb3b..00000000000 --- a/tests/bugs/bug-961615.t +++ /dev/null @@ -1,34 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc - -#This test tests that an extra fd_unref does not happen in rebalance -#migration completion check code path in dht - -cleanup; - -TEST glusterd -TEST pidof glusterd -TEST $CLI volume create $V0 $H0:$B0/${V0}0 $H0:$B0/${V0}1 -TEST $CLI volume set $V0 performance.quick-read off -TEST $CLI volume set $V0 performance.io-cache off -TEST $CLI volume set $V0 performance.write-behind off -TEST $CLI volume set $V0 performance.stat-prefetch off -TEST $CLI volume set $V0 performance.read-ahead off -TEST $CLI volume start $V0 -TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0 -TEST touch $M0/1 -#This rename creates a link file for 10 in the other volume. -TEST mv $M0/1 $M0/10 -#Lets keep writing to the file which will trigger rebalance completion check -dd if=/dev/zero of=$M0/10 bs=1k & -bg_pid=$! -#Now rebalance force will migrate file '10' -TEST $CLI volume rebalance $V0 start force -EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" rebalance_status_field $V0 -#If the bug exists mount would have crashed by now -TEST ls $M0 -kill -9 $bg_pid > /dev/null 2>&1 -wait > /dev/null 2>&1 -cleanup diff --git a/tests/bugs/bug-961669.t b/tests/bugs/bug-961669.t deleted file mode 100644 index 2f7e48c6d31..00000000000 --- a/tests/bugs/bug-961669.t +++ /dev/null @@ -1,48 +0,0 @@ -#!/bin/bash - -#Test case: Fail remove-brick 'start' variant when reducing the replica count of a volume. - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc - -cleanup; - -#Basic checks -TEST glusterd -TEST pidof glusterd -TEST $CLI volume info - -#Create a 3x3 dist-rep volume -TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2,3,4,5,6,7,8}; -TEST $CLI volume start $V0 - -# Mount FUSE and create file/directory -TEST glusterfs -s $H0 --volfile-id $V0 $M0 -TEST touch $M0/zerobytefile.txt -TEST mkdir $M0/test_dir -TEST dd if=/dev/zero of=$M0/file bs=1024 count=1024 - -function remove_brick_start { - $CLI volume remove-brick $V0 replica 2 $H0:$B0/${V0}{1,4,7} start 2>&1|grep -oE 'success|failed' -} - -function remove_brick { - $CLI volume remove-brick $V0 replica 2 $H0:$B0/${V0}{1,4,7} force 2>&1|grep -oE 'success|failed' -} - -#remove-brick start variant -#Actual message displayed at cli is: -#"volume remove-brick start: failed: Rebalancing not needed when reducing replica count. Try without the 'start' option" -EXPECT "failed" remove_brick_start; - -#remove-brick commit-force -#Actual message displayed at cli is: -#"volume remove-brick commit force: success" -EXPECT "success" remove_brick - -EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0 -TEST $CLI volume stop $V0 -TEST $CLI volume delete $V0; -TEST ! $CLI volume info $V0; - -cleanup; diff --git a/tests/bugs/bug-963541.t b/tests/bugs/bug-963541.t deleted file mode 100755 index 950c7db548b..00000000000 --- a/tests/bugs/bug-963541.t +++ /dev/null @@ -1,33 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc - -cleanup; - -TEST glusterd -TEST pidof glusterd - -TEST $CLI volume create $V0 $H0:$B0/${V0}{1..3}; -TEST $CLI volume start $V0; - -# Start a remove-brick and try to start a rebalance/remove-brick without committing -TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}1 start - -TEST ! $CLI volume rebalance $V0 start -TEST ! $CLI volume remove-brick $V0 $H0:$B0/${V0}2 start - -#Try to start rebalance/remove-brick again after commit -TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}1 commit - -gluster volume status - -TEST $CLI volume rebalance $V0 start -TEST $CLI volume rebalance $V0 stop - -TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}2 start -TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}2 stop - -TEST $CLI volume stop $V0 - -cleanup; - diff --git a/tests/bugs/bug-963678.t b/tests/bugs/bug-963678.t deleted file mode 100644 index 9431010d60d..00000000000 --- a/tests/bugs/bug-963678.t +++ /dev/null @@ -1,57 +0,0 @@ -#!/bin/bash -# -# Bug 963678 - Test discard functionality -# -# Test that basic discard (hole punch) functionality works via the fallocate -# command line tool. Hole punch deallocates a region of a file, creating a hole -# and a zero-filled data region. We verify that hole punch works, frees blocks -# and that subsequent reads do not read stale data (caches are invalidated). -# -# NOTE: fuse fallocate is known to be broken with regard to cache invalidation -# up to 3.9.0 kernels. Therefore, FOPEN_KEEP_CACHE is not used in this -# test (opens will invalidate the fuse cache). -### - -. $(dirname $0)/../include.rc -. $(dirname $0)/../fallocate.rc -. $(dirname $0)/../volume.rc - -cleanup; - -TEST glusterd - -TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2} -TEST $CLI volume start $V0 - -TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0 - -# check for fallocate and hole punch support -require_fallocate -l 1m $M0/file -require_fallocate -p -l 512k $M0/file && rm -f $M0/file - -# allocate some blocks, punch a hole and verify block allocation -TEST fallocate -l 1m $M0/file -blksz=`stat -c %B $M0/file` -nblks=`stat -c %b $M0/file` -TEST [ $(($blksz * $nblks)) -ge 1048576 ] -TEST fallocate -p -o 512k -l 128k $M0/file - -nblks=`stat -c %b $M0/file` -# allow some room for xattr blocks -TEST [ $(($blksz * $nblks)) -lt $((917504 + 16384)) ] -TEST unlink $M0/file - -# write some data, punch a hole and verify the file content changes -TEST dd if=/dev/urandom of=$M0/file bs=1024k count=1 -TEST cp $M0/file $M0/file.copy.pre -TEST fallocate -p -o 512k -l 128k $M0/file -TEST cp $M0/file $M0/file.copy.post -TEST ! cmp $M0/file.copy.pre $M0/file.copy.post -TEST unlink $M0/file - -EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0 - -TEST $CLI volume stop $V0 -TEST $CLI volume delete $V0 - -cleanup; diff --git a/tests/bugs/bug-964059.t b/tests/bugs/bug-964059.t deleted file mode 100755 index e81e4d708bc..00000000000 --- a/tests/bugs/bug-964059.t +++ /dev/null @@ -1,30 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../cluster.rc - -function check_peers { - $CLI_1 peer status | grep 'Peer in Cluster (Connected)' | wc -l -} - -function volume_count { - local cli=$1; - if [ $cli -eq '1' ] ; then - $CLI_1 volume info | grep 'Volume Name' | wc -l; - else - $CLI_2 volume info | grep 'Volume Name' | wc -l; - fi -} - -cleanup; - -TEST launch_cluster 2; -TEST $CLI_1 peer probe $H2; - -EXPECT_WITHIN $PROBE_TIMEOUT 1 check_peers - -TEST $CLI_1 volume create $V0 $H1:$B1/$V0 $H2:$B2/$V0 -TEST $CLI_1 volume start $V0 -TEST $CLI_1 volume remove-brick $V0 $H2:$B2/$V0 start -TEST $CLI_1 volume status -cleanup; diff --git a/tests/bugs/bug-966018.t b/tests/bugs/bug-966018.t deleted file mode 100644 index 47a36c40c61..00000000000 --- a/tests/bugs/bug-966018.t +++ /dev/null @@ -1,35 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc -. $(dirname $0)/../nfs.rc - -#This tests if eager-lock blocks metadata operations on nfs/fuse mounts. -#If it is not woken up, INODELK from the next command waits -#for post-op-delay secs. - -cleanup; -TEST glusterd -TEST pidof glusterd - -TEST $CLI volume create $V0 replica 2 $H0:$B0/r2_0 $H0:$B0/r2_1 -TEST $CLI volume set $V0 ensure-durability off -TEST $CLI volume set $V0 cluster.eager-lock on -TEST $CLI volume set $V0 cluster.post-op-delay-secs 3 - -TEST $CLI volume start $V0 -TEST $CLI volume profile $V0 start -EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available; -TEST mount_nfs $H0:/$V0 $N0 nolock; -TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id=$V0 $M0 -echo 1 > $N0/1 && chmod +x $N0/1 -echo 1 > $M0/1 && chmod +x $M0/1 - -#Check that INODELK MAX latency is not in the order of seconds -#Test if the MAX INODELK fop latency is of the order of seconds. -inodelk_max_latency=$($CLI volume profile $V0 info | grep INODELK | awk 'BEGIN {max = 0} {if ($6 > max) max=$6;} END {print max}' | cut -d. -f 1 | egrep "[0-9]{7,}") - -TEST [ -z $inodelk_max_latency ] -EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $N0 - -cleanup; diff --git a/tests/bugs/bug-969193.t b/tests/bugs/bug-969193.t deleted file mode 100755 index e78a2980e6e..00000000000 --- a/tests/bugs/bug-969193.t +++ /dev/null @@ -1,13 +0,0 @@ -#!/bin/bash - -# Test that "system getspec" works without op_version problems. - -. $(dirname $0)/../include.rc -cleanup; - -TEST glusterd -TEST pidof glusterd -TEST $CLI volume info -TEST $CLI volume create $V0 $H0:$B0/brick1 -TEST $CLI system getspec $V0 -cleanup; diff --git a/tests/bugs/bug-970070.t b/tests/bugs/bug-970070.t deleted file mode 100755 index 08ab4a3f8c4..00000000000 --- a/tests/bugs/bug-970070.t +++ /dev/null @@ -1,13 +0,0 @@ -#!/bin/bash -# TEST the nfs.acl option -. $(dirname $0)/../include.rc - -cleanup -TEST glusterd -TEST pidof glusterd - -TEST $CLI volume create $V0 $H0:$B0/$V0 -TEST $CLI volume start $V0 -TEST $CLI volume set $V0 nfs.acl off -TEST $CLI volume set $V0 nfs.acl on -cleanup diff --git a/tests/bugs/bug-973073.t b/tests/bugs/bug-973073.t deleted file mode 100755 index 3ea54132a83..00000000000 --- a/tests/bugs/bug-973073.t +++ /dev/null @@ -1,48 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../dht.rc - -## Steps followed are one descibed in bugzilla - -cleanup; - -function get_layout() -{ - layout1=`getfattr -n trusted.glusterfs.dht -e hex $1 2>&1` - - if [ $? -ne 0 ] - then - echo 1 - else - echo 0 - fi - -} - -BRICK_COUNT=3 - -TEST glusterd -TEST pidof glusterd - -TEST $CLI volume create $V0 $H0:$B0/${V0}0 $H0:$B0/${V0}1 $H0:$B0/${V0}2 -TEST $CLI volume start $V0 - -## Mount FUSE -TEST glusterfs -s $H0 --volfile-id $V0 $M0; - -TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}2 start - -## remove-brick status == rebalance_status -EXPECT_WITHIN $REBALANCE_TIMEOUT "0" remove_brick_completed - -TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}2 stop - -TEST $CLI volume rebalance $V0 fix-layout start - -EXPECT_WITHIN $REBALANCE_TIMEOUT "0" rebalance_completed - -TEST mkdir $M0/dir 2>/dev/null; - -EXPECT "0" get_layout $B0/${V0}2/dir -cleanup; diff --git a/tests/bugs/bug-974007.t b/tests/bugs/bug-974007.t deleted file mode 100644 index 84197ec8a0f..00000000000 --- a/tests/bugs/bug-974007.t +++ /dev/null @@ -1,52 +0,0 @@ -#!/bin/bash - -#Test case: Create a distributed replicate volume, and remove multiple -#replica pairs in a single remove-brick command. - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc - -cleanup; - -#Basic checks -TEST glusterd -TEST pidof glusterd -TEST $CLI volume info - -#Create a 3X2 distributed-replicate volume -TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1..6}; -TEST $CLI volume start $V0 - -# Mount FUSE and create files -TEST glusterfs -s $H0 --volfile-id $V0 $M0 -TEST touch $M0/file{1..10} - -# Remove bricks from two sub-volumes to make it a 1x2 vol. -# Bricks in question are given in a random order but from the same subvols. -function remove_brick_start_status { - $CLI volume remove-brick $V0 \ - $H0:$B0/${V0}6 $H0:$B0/${V0}1 \ - $H0:$B0/${V0}2 $H0:$B0/${V0}5 start 2>&1 |grep -oE "success|failed" -} -EXPECT "success" remove_brick_start_status; - -# Wait for rebalance to complete -EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" remove_brick_status_completed_field "$V0" "$H0:$B0/${V0}6 $H0:$B0/${V0}1 $H0:$B0/${V0}2 $H0:$B0/${V0}5" - -# Check commit status -function remove_brick_commit_status { - $CLI volume remove-brick $V0 \ - $H0:$B0/${V0}6 $H0:$B0/${V0}1 \ - $H0:$B0/${V0}2 $H0:$B0/${V0}5 commit 2>&1 |grep -oE "success|failed" -} -EXPECT "success" remove_brick_commit_status; - -# Check the volume type -EXPECT "Replicate" echo `$CLI volume info |grep Type |awk '{print $2}'` - -EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0 -TEST $CLI volume stop $V0 -TEST $CLI volume delete $V0; -TEST ! $CLI volume info $V0; - -cleanup; diff --git a/tests/bugs/bug-974972.t b/tests/bugs/bug-974972.t deleted file mode 100755 index 28b7539a981..00000000000 --- a/tests/bugs/bug-974972.t +++ /dev/null @@ -1,37 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc -. $(dirname $0)/../nfs.rc - -#This script checks that nfs mount does not fail lookup on files with split-brain -cleanup; - -TEST glusterd -TEST pidof glusterd -TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1} -TEST $CLI volume set $V0 self-heal-daemon off -TEST $CLI volume start $V0 -EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available; -TEST mount_nfs $H0:/$V0 $N0 -TEST touch $N0/1 -TEST kill_brick ${V0} ${H0} ${B0}/${V0}1 -echo abc > $N0/1 -TEST $CLI volume start $V0 force -EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" nfs_up_status -EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_nfs $V0 0 -EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_nfs $V0 1 - -TEST kill_brick ${V0} ${H0} ${B0}/${V0}0 -echo def > $N0/1 -TEST $CLI volume start $V0 force -EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" nfs_up_status -EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_nfs $V0 0 -EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_nfs $V0 1 - -#Lookup should not fail -TEST ls $N0/1 -TEST ! cat $N0/1 - -EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $N0 -cleanup diff --git a/tests/bugs/bug-976800.t b/tests/bugs/bug-976800.t deleted file mode 100644 index 2aee8cc1128..00000000000 --- a/tests/bugs/bug-976800.t +++ /dev/null @@ -1,28 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc - -# This test checks if there are any open fds on the brick -# even after the file is closed on the mount. This particular -# test tests dd with "fsync" to check afr's fsync codepath -cleanup; - -function is_fd_open { - local v=$1 - local h=$2 - local b=$3 - local bpid=$(get_brick_pid $v $h $b) - ls -l /proc/$bpid/fd | grep -w "\-> $b/1" -} - -TEST glusterd -TEST pidof glusterd -TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1} -TEST $CLI volume set $V0 ensure-durability off -TEST $CLI volume set $V0 eager-lock off -TEST $CLI volume start $V0 -TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0 -TEST dd of=$M0/1 if=/dev/zero bs=1k count=1 conv=fsync -TEST ! is_fd_open $V0 $H0 $B0/${V0}0 -cleanup; diff --git a/tests/bugs/bug-977246.t b/tests/bugs/bug-977246.t deleted file mode 100644 index e07ee191939..00000000000 --- a/tests/bugs/bug-977246.t +++ /dev/null @@ -1,21 +0,0 @@ -#! /bin/bash - -# This test checks if address validation, correctly catches hostnames -# with consective dots, such as 'example..org', as invalid - -. $(dirname $0)/../include.rc - -cleanup; - -TEST glusterd -TEST pidof glusterd - -TEST $CLI volume create $V0 $H0:$B0/${V0}1 -TEST $CLI volume info $V0 -TEST $CLI volume start $V0 - -TEST ! $CLI volume set $V0 auth.allow example..org - -TEST $CLI volume stop $V0 - -cleanup; diff --git a/tests/bugs/bug-977797.t b/tests/bugs/bug-977797.t deleted file mode 100755 index 339588adb52..00000000000 --- a/tests/bugs/bug-977797.t +++ /dev/null @@ -1,95 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc - -cleanup; - -## Start and create a volume -TEST glusterd; -TEST pidof glusterd; -TEST $CLI volume info; - -TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2}; - -## Verify volume is is created -EXPECT "$V0" volinfo_field $V0 'Volume Name'; -EXPECT 'Created' volinfo_field $V0 'Status'; - -## Start volume and verify -TEST $CLI volume start $V0; -EXPECT 'Started' volinfo_field $V0 'Status'; - -TEST $CLI volume set $V0 self-heal-daemon off -TEST $CLI volume set $V0 open-behind off -TEST $CLI volume set $V0 quick-read off -TEST $CLI volume set $V0 read-ahead off -TEST $CLI volume set $V0 write-behind off -TEST $CLI volume set $V0 io-cache off -TEST $CLI volume set $V0 background-self-heal-count 0 - -TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0 - - -TEST mkdir -p $M0/a -TEST `echo "GLUSTERFS" > $M0/a/file` - -TEST kill_brick $V0 $H0 $B0/$V0"1" - -TEST chown root $M0/a -TEST chown root $M0/a/file -TEST `echo "GLUSTER-FILE-SYSTEM" > $M0/a/file` -TEST mkdir $M0/a/b - -TEST $CLI volume start $V0 force -EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0; - - - -TEST kill_brick $V0 $H0 $B0/$V0"2" - -TEST chmod 757 $M0/a -TEST chmod 757 $M0/a/file - -TEST $CLI volume start $V0 force -EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1; - -TEST dd if=$M0/a/file of=/dev/null bs=1024k - -b1c0dir=$(afr_get_specific_changelog_xattr $B0/$V0"1"/a \ - trusted.afr.$V0-client-0 "entry") -b1c1dir=$(afr_get_specific_changelog_xattr $B0/$V0"1"/a \ - trusted.afr.$V0-client-1 "entry") -b2c0dir=$(afr_get_specific_changelog_xattr \ - $B0/$V0"2"/a trusted.afr.$V0-client-0 "entry") -b2c1dir=$(afr_get_specific_changelog_xattr \ - $B0/$V0"2"/a trusted.afr.$V0-client-1 "entry") - - -b1c0f=$(afr_get_specific_changelog_xattr $B0/$V0"1"/a/file \ - trusted.afr.$V0-client-0 "data") -b1c1f=$(afr_get_specific_changelog_xattr $B0/$V0"1"/a/file \ - trusted.afr.$V0-client-1 "data") -b2c0f=$(afr_get_specific_changelog_xattr $B0/$V0"2"/a/file \ - trusted.afr.$V0-client-0 "data") -b2c1f=$(afr_get_specific_changelog_xattr $B0/$V0"2"/a/file \ - trusted.afr.$V0-client-1 "data") - -EXPECT "00000000|^$" echo $b1c0f -EXPECT "00000000|^$" echo $b1c1f -EXPECT "00000000|^$" echo $b2c0f -EXPECT "00000000|^$" echo $b2c1f - -EXPECT "00000000|^$" echo $b1c0dir -EXPECT "00000000|^$" echo $b1c1dir -EXPECT "00000000|^$" echo $b2c0dir -EXPECT "00000000|^$" echo $b2c1dir - -## Finish up -TEST $CLI volume stop $V0; -EXPECT 'Stopped' volinfo_field $V0 'Status'; - -TEST $CLI volume delete $V0; -TEST ! $CLI volume info $V0; - -cleanup; diff --git a/tests/bugs/bug-978794.t b/tests/bugs/bug-978794.t deleted file mode 100644 index 8cda83efe0a..00000000000 --- a/tests/bugs/bug-978794.t +++ /dev/null @@ -1,29 +0,0 @@ -#!/bin/bash -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc -. $(dirname $0)/../fileio.rc - - -# This test opens 100 fds and triggers graph switches to check if fsync -# as part of graph-switch causes crash or not. - -cleanup; - -TEST glusterd -TEST pidof glusterd -TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1} -TEST $CLI volume start $V0 -TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 -TEST touch $M0/{1..100} -for i in {1..100}; do fd[$i]=`fd_available`; fd_open ${fd[$i]} 'w' $M0/$i; done -TEST $CLI volume add-brick $V0 $H0:$B0/${V0}{2,3} -TEST $CLI volume rebalance $V0 start force -EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" rebalance_status_field $V0 -TEST cat $M0/{1..100} -for i in {1..100}; do fd_write ${fd[$i]} 'abc'; done -TEST $CLI volume add-brick $V0 $H0:$B0/${V0}{4,5} -TEST $CLI volume rebalance $V0 start force -EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" rebalance_status_field $V0 -for i in {1..100}; do fd_write ${fd[$i]} 'abc'; done -TEST cat $M0/{1..100} -cleanup diff --git a/tests/bugs/bug-979365.t b/tests/bugs/bug-979365.t deleted file mode 100755 index fa9e1de9381..00000000000 --- a/tests/bugs/bug-979365.t +++ /dev/null @@ -1,47 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc - -#This script checks that ensure-durability option enables/disables afr -#sending fsyncs -cleanup; - -function num_fsyncs { - $CLI volume profile $V0 info | grep -w FSYNC | wc -l -} - -TEST glusterd -TEST pidof glusterd -TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1} -TEST $CLI volume set $V0 ensure-durability on -TEST $CLI volume set $V0 eager-lock off -TEST $CLI volume start $V0 -TEST $CLI volume profile $V0 start -TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 -TEST kill_brick $V0 $H0 $B0/${V0}0 -TEST dd of=$M0/a if=/dev/zero bs=1024k count=10 -#fsyncs take a while to complete. -sleep 5 - -# There can be zero or more fsyncs, depending on the order -# in which the writes reached the server, in turn deciding -# whether they were treated as "appending" writes or not. - -TEST [[ $(num_fsyncs) -ge 0 ]] -#Stop the volume to erase the profile info of old operations -TEST $CLI volume profile $V0 stop -TEST $CLI volume stop $V0 -EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0 -#Disable ensure-durability now to disable fsyncs in afr. -TEST $CLI volume set $V0 ensure-durability off -TEST $CLI volume start $V0 -TEST kill_brick $V0 $H0 $B0/${V0}0 -TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 -TEST $CLI volume profile $V0 start -TEST dd of=$M0/a if=/dev/zero bs=1024k count=10 -#fsyncs take a while to complete. -sleep 5 -TEST [[ $(num_fsyncs) -eq 0 ]] - -cleanup diff --git a/tests/bugs/bug-982174.t b/tests/bugs/bug-982174.t deleted file mode 100644 index 460af751170..00000000000 --- a/tests/bugs/bug-982174.t +++ /dev/null @@ -1,36 +0,0 @@ -#!/bin/bash -# Test to check -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc - -#Check if incorrect log-level keywords does not crash the CLI -cleanup; - -TEST glusterd -TEST pidof glusterd -TEST $CLI volume create $V0 $H0:$B0/brick1 $H0:$B0/brick2 -TEST $CLI volume start $V0 - -function set_log_level_status { - local level=$1 - $CLI volume set $V0 diagnostics.client-log-level $level 2>&1 |grep -oE 'success|failed' -} - - -LOG_LEVEL="trace" -EXPECT "failed" set_log_level_status $LOG_LEVEL - - -LOG_LEVEL="error-gen" -EXPECT "failed" set_log_level_status $LOG_LEVEL - - -LOG_LEVEL="TRACE" -EXPECT "success" set_log_level_status $LOG_LEVEL - -EXPECT "$LOG_LEVEL" echo `$CLI volume info | grep diagnostics | awk '{print $2}'` - -TEST $CLI volume stop $V0; -TEST $CLI volume delete $V0; - -cleanup; diff --git a/tests/bugs/bug-983317.t b/tests/bugs/bug-983317.t deleted file mode 100644 index 7355cbaafd1..00000000000 --- a/tests/bugs/bug-983317.t +++ /dev/null @@ -1,25 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc - -cleanup; -TEST glusterd -TEST pidof glusterd - -TEST $CLI volume create $V0 $H0:$B0/$V0 - -# Set a volume option -TEST $CLI volume set $V0 open-behind on -TEST $CLI volume start $V0 - -# Execute volume get without having an explicit option, this should fail -TEST ! $CLI volume get $V0 - -# Execute volume get with an explicit option -TEST $CLI volume get $V0 open-behind - -# Execute volume get with 'all" -TEST $CLI volume get $V0 all - -cleanup; diff --git a/tests/bugs/bug-983477.t b/tests/bugs/bug-983477.t deleted file mode 100755 index 47d2f30833a..00000000000 --- a/tests/bugs/bug-983477.t +++ /dev/null @@ -1,53 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc - -#This script checks if use-readdirp option works as accepted in mount options - -function get_use_readdirp_value { - local vol=$1 - local statedump=$(generate_mount_statedump $vol) - sleep 1 - local val=$(grep "use_readdirp=" $statedump | cut -f2 -d'=' | tail -1) - rm -f $statedump - echo $val -} -cleanup; - -TEST glusterd -TEST pidof glusterd -TEST $CLI volume create $V0 $H0:$B0/${V0} -TEST $CLI volume start $V0 -#If readdirp is enabled statedump should reflect it -TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0 --use-readdirp=yes -TEST cd $M0 -EXPECT "1" get_use_readdirp_value $V0 -TEST cd - -EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0 - -#If readdirp is enabled statedump should reflect it -TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0 --use-readdirp=no -TEST cd $M0 -EXPECT "0" get_use_readdirp_value $V0 -TEST cd - -EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0 - -#Since args are optional on this argument just specifying "--use-readdirp" should also turn it `on` not `off` -TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0 --use-readdirp -TEST cd $M0 -EXPECT "1" get_use_readdirp_value $V0 -TEST cd - -EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0 - -#By default it is enabled. -TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0 -TEST cd $M0 -EXPECT "1" get_use_readdirp_value $V0 -TEST cd - -EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0 - -#Invalid values for use-readdirp should not be accepted -TEST ! glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0 --use-readdirp=please-fail - -cleanup diff --git a/tests/bugs/bug-985074.t b/tests/bugs/bug-985074.t deleted file mode 100644 index d8f852a7587..00000000000 --- a/tests/bugs/bug-985074.t +++ /dev/null @@ -1,55 +0,0 @@ -#!/bin/bash -# -# Bug 985074 - Verify stale inode/dentry mappings are cleaned out. -# -# This test verifies that an inode/dentry mapping for a file removed via a -# separate mount point is cleaned up appropriately. We create a file and hard -# link from client 1. Next we remove the link via client 2. Finally, from client -# 1 we attempt to rename the original filename to the name of the just removed -# hard link. -# -# If the inode is not unlinked properly, the removed directory entry can resolve -# to an inode (on the client that never saw the rm) that ends up passed down -# through the lookup call. If md-cache holds valid metadata on the inode (due to -# a large timeout value or recent lookup on the valid name), it is tricked into -# returning a successful lookup that should have returned ENOENT. This manifests -# as an error from the mv command in the following test sequence because file -# and file.link resolve to the same file: -# -# # mv /mnt/glusterfs/0/file /mnt/glusterfs/0/file.link -# mv: `/mnt/glusterfs/0/file' and `/mnt/glusterfs/0/file.link' are the same file -# -### - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc - -cleanup; - -TEST glusterd - -TEST $CLI volume create $V0 $H0:$B0/$V0 -TEST $CLI volume start $V0 -TEST $CLI volume set $V0 md-cache-timeout 3 - -TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0 --entry-timeout=0 --attribute-timeout=0 -TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M1 --entry-timeout=0 --attribute-timeout=0 - -TEST touch $M0/file -TEST ln $M0/file $M0/file.link -TEST ls -ali $M0 $M1 -TEST rm -f $M1/file.link -TEST ls -ali $M0 $M1 -# expire the md-cache timeout -sleep 3 -TEST mv $M0/file $M0/file.link -TEST stat $M0/file.link -TEST ! stat $M0/file - -EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M1 -EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0 - -TEST $CLI volume stop $V0 -TEST $CLI volume delete $V0 - -cleanup; diff --git a/tests/bugs/bug-986429.t b/tests/bugs/bug-986429.t deleted file mode 100644 index 6e43f72b775..00000000000 --- a/tests/bugs/bug-986429.t +++ /dev/null @@ -1,19 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc - -## This tests failover achieved by providing multiple -## servers from the trusted pool for fetching volume -## specification - -cleanup; - -TEST glusterd -TEST pidof glusterd - -TEST $CLI volume create $V0 $H0:$B0/$V0 -TEST $CLI volume start $V0 -TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s non-existent -s $H0 --volfile-id=/$V0 $M0 - -cleanup; diff --git a/tests/bugs/bug-986905.t b/tests/bugs/bug-986905.t deleted file mode 100755 index ed11bbbd03d..00000000000 --- a/tests/bugs/bug-986905.t +++ /dev/null @@ -1,27 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc - -#This script checks if hardlinks that are created while a brick is down are -#healed properly. - -cleanup; -function get_inum { - ls -i $1 | awk '{print $1}' -} - -TEST glusterd -TEST pidof glusterd -TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1} -TEST $CLI volume start $V0 -TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0 -TEST kill_brick $V0 $H0 $B0/${V0}0 -TEST touch $M0/a -TEST ln $M0/a $M0/link_a -TEST $CLI volume start $V0 force -EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0 -TEST ls -l $M0 -inum=$(get_inum $B0/${V0}0/a) -EXPECT "$inum" get_inum $B0/${V0}0/link_a -cleanup diff --git a/tests/bugs/bug-990028.t b/tests/bugs/bug-990028.t deleted file mode 100755 index fbf4175bea7..00000000000 --- a/tests/bugs/bug-990028.t +++ /dev/null @@ -1,155 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../fileio.rc - -cleanup; - -TESTS_EXPECTED_IN_LOOP=153 - -function __init() -{ - TEST glusterd - TEST pidof glusterd - TEST $CLI volume info; - - TEST $CLI volume create $V0 $H0:$B0/brick - - EXPECT 'Created' volinfo_field $V0 'Status'; - - TEST $CLI volume start $V0 - - TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0 - - TEST $CLI volume quota $V0 enable -} - -#CASE-1 -#checking pgfid under same directory -function links_in_same_directory() -{ - # create a file file1 - TEST touch $M0/file1 - - # create 50 hardlinks for file1 - for i in `seq 2 50`; do - TEST_IN_LOOP ln $M0/file1 $M0/file$i - done - - # store the pgfid of file1 in PGFID_FILE1 [should be 50 now (0x000000032)] - PGFID_FILE1=`getfattr -m "trusted.pgfid.*" -de hex $B0/brick/file1 2>&1 | grep "trusted.pgfid" | gawk -F '=' '{print $2}'` - - # compare the pgfid(link value ) of each hard links are equal or not - for i in `seq 2 50`; do - TEMP=`getfattr -m "trusted.pgfid.*" -de hex $B0/brick/file$i 2>&1 | grep "trusted.pgfid" | gawk -F '=' '{print $2}'` - TEST_IN_LOOP [ $PGFID_FILE1 = $TEMP ] - done - - # check if no of links value is 50 or not - TEST [ $PGFID_FILE1 = "0x00000032" ] - - # unlink file 2 to 50 - for i in `seq 2 50`; do - TEST_IN_LOOP unlink $M0/file$i; - done - - # now check if pgfid value is 1 or not - PGFID_FILE1=`getfattr -m "trusted.pgfid.*" -de hex $B0/brick/file1 2>&1 | grep "trusted.pgfid" | gawk -F '=' '{print $2}'`; - - TEST [ $PGFID_FILE1 = "0x00000001" ] - - TEST rm -f $M0/* -} - -##checking pgfid under diff directories -function links_across_directories() -{ - TEST mkdir $M0/dir1 $M0/dir2; - - # create a file in dir1 - TEST touch $M0/dir1/file1; - - # create hard link for file1 in dir2 - TEST ln $M0/dir1/file1 $M0/dir2/file2; - - #first check is to find whether there are two pgfids or not - LINES=`getfattr -m "trusted.pgfid.*" -de hex $B0/brick/dir1/file1 2>&1 | grep "trusted.pgfid" | wc -l` - TEST [ $LINES = 2 ] - - for i in $(seq 1 2); do - HL=`getfattr -m "trusted.pgfid.*" -de hex $B0/brick/dir$i/file$i 2>&1 | grep "trusted.pgfid" | cut -d$'\n' -f$i | cut -d'=' -f2` - TEST_IN_LOOP [ $HL = "0x00000001" ] - done - - #now unlink file2 and check the pgfid of file1 - #1. no. of pgfid should be one - #2. no. of hard link should be one - TEST unlink $M0/dir2/file2 - - LINES=`getfattr -m "trusted.pgfid.*" -de hex $B0/brick/dir1/file1 2>&1 | grep "trusted.pgfid" | wc -l` - TEST [ $LINES == 1 ] - - #next to check is to whether they contain hard link value of one or not - HL=`getfattr -m "trusted.pgfid.*" -de hex $B0/brick/dir1/file1 2>&1 | grep "trusted.pgfid" | cut -d'=' -f2` - TEST [ $HL = "0x00000001" ] - - #rename file under same directory - - TEST touch $M0/r_file1 - PGFID_rfile1=`getfattr -m "trusted.pgfid.*" -de hex $B0/brick/r_file1 2>&1 | grep "trusted.pgfid"` - - #cross check whether hard link count is one - HL=`getfattr -m "trusted.pgfid.*" -de hex $B0/brick/r_file1 2>&1 | grep "trusted.pgfid" | cut -d'=' -f2` - - TEST [ $HL = "0x00000001" ] - - #now rename the file to r_file1 - TEST mv $M0/r_file1 $M0/r_file2 - - #now check the pgfid hard link count is still one or not - HL=`getfattr -m "trusted.pgfid.*" -de hex $B0/brick/r_file2 2>&1 | grep "trusted.pgfid" | cut -d'=' -f2` - - TEST [ $HL = "0x00000001" ] - - #now move the file to a different directory where it has no hard link and check - TEST mkdir $M0/dir3; - TEST mv $M0/r_file2 $M0/dir3; - - #now check the pgfid has changed or not and hard limit is one or not - PGFID_newDir=`getfattr -m "trusted.pgfid.*" -de hex $B0/brick/dir3/r_file2 2>&1 | grep "trusted.pgfid"` - - #now the older pgfid and new pgfid shouldn't match - TEST [ $PGFID_rfile1 != $PGFID_newDir ] - - HL=`getfattr -m "trusted.pgfid" -de hex $B0/brick/dir3/r_file2 2>&1 | grep "trusted.pgfid" | cut -d'=' -f2` - TEST [ $HL = "0x00000001" ] - - TEST touch $M0/dir1/rl_file_1 - ln $M0/dir1/rl_file_1 $M0/dir2/rl_file_2 - mv $M0/dir1/rl_file_1 $M0/dir2 - - #now the there should be just one pgfid for both files - for i in $(seq 1 2); do - NL=`getfattr -m "trusted.pgfid" -de hex $B0/brick/dir2/rl_file_$i 2>&1 | grep "trusted.pgfid"|wc -l ` - TEST_IN_LOOP [ $HL = "0x00000001" ] - done - - #now pgfid of both files should match - P_rl_file_1=`getfattr -m "trusted.pgfid" -de hex $B0/brick/dir2/rl_file_1 2>&1 | grep "trusted.pgfid"` - P_rl_file_2=`getfattr -m "trusted.pgfid" -de hex $B0/brick/dir2/rl_file_2 2>&1 | grep "trusted.pgfid"` - TEST [ $P_rl_file_1 = $P_rl_file_2 ] - - #now the no of hard link should be two for both rl_file_1 and rl_file_2 - for i in $(seq 1 2); do - HL=`getfattr -m "trusted.pgfid" -de hex $B0/brick/dir2/rl_file_$i 2>&1 | grep "trusted.pgfid" | cut -d'=' -f2` - TEST_IN_LOOP [ $HL = "0x00000002" ] - done - - TEST rm -rf $M0/* -} - -__init; -links_in_same_directory; -links_across_directories; - -cleanup diff --git a/tests/bugs/bug-991622.t b/tests/bugs/bug-991622.t deleted file mode 100644 index 5c324346510..00000000000 --- a/tests/bugs/bug-991622.t +++ /dev/null @@ -1,35 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc -. $(dirname $0)/../fileio.rc - -#This tests that no fd leaks are observed in unlink/rename in open-behind -function leaked_fds { - ls -l /proc/$(get_brick_pid $V0 $H0 $B0/$V0)/fd | grep deleted -} - -cleanup; -TEST glusterd -TEST pidof glusterd - -TEST $CLI volume create $V0 $H0:$B0/$V0 -TEST $CLI volume set $V0 open-behind on -TEST $CLI volume start $V0 -TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id=$V0 $M0 --direct-io-mode=enable - -TEST fd1=`fd_available` -TEST fd_open $fd1 'w' "$M0/testfile1" -TEST fd_write $fd1 "content" - -TEST fd2=`fd_available` -TEST fd_open $fd2 'w' "$M0/testfile2" -TEST fd_write $fd2 "content" - -TEST touch $M0/a -TEST rm $M0/testfile1 -TEST mv $M0/a $M0/testfile2 -TEST fd_close $fd1 -TEST fd_close $fd2 -TEST ! leaked_fds -cleanup; diff --git a/tests/bugs/cli/bug-1004218.t b/tests/bugs/cli/bug-1004218.t new file mode 100644 index 00000000000..ab8307d6405 --- /dev/null +++ b/tests/bugs/cli/bug-1004218.t @@ -0,0 +1,26 @@ +#!/bin/bash + +# Test if only a single xml document is generated by 'status all' +# when a volume is not started + +. $(dirname $0)/../../include.rc + +cleanup; + +TEST glusterd + +TEST $CLI volume create ${V0}1 $H0:$B0/${V0}1{1,2} +TEST $CLI volume create ${V0}2 $H0:$B0/${V0}2{1,2} + +TEST $CLI volume start ${V0}1 + +function test_status_all () +{ + $CLI volume status all --xml | xmllint -format - +} + +TEST test_status_all + +TEST $CLI volume stop ${V0}1 + +cleanup diff --git a/tests/bugs/cli/bug-1022905.t b/tests/bugs/cli/bug-1022905.t new file mode 100644 index 00000000000..ce163f51943 --- /dev/null +++ b/tests/bugs/cli/bug-1022905.t @@ -0,0 +1,39 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc + +cleanup; + +## Create a volume +TEST glusterd; +TEST pidof glusterd +TEST $CLI volume info; + +TEST $CLI volume create $V0 $H0:$B0/${V0}{1}; +EXPECT 'Created' volinfo_field $V0 'Status'; + +## Volume start +TEST $CLI volume start $V0; +EXPECT 'Started' volinfo_field $V0 'Status'; + +## Enable a protected and a resettable/unprotected option +TEST $CLI volume quota $V0 enable +TEST $CLI volume set $V0 diagnostics.client-log-level DEBUG + +## Reset cmd resets only unprotected option(s), succeeds. +TEST $CLI volume reset $V0; + +## Reset should fail +TEST ! $CLI volume reset $V0; + +## Set an unprotected option +TEST $CLI volume set $V0 diagnostics.client-log-level DEBUG + +## Now 1 protected and 1 unprotected options are set +## Reset force should succeed +TEST $CLI volume reset $V0 force; + +TEST $CLI volume stop $V0 +TEST $CLI volume delete $V0 + +cleanup; diff --git a/tests/bugs/cli/bug-1030580.t b/tests/bugs/cli/bug-1030580.t new file mode 100644 index 00000000000..a907950e73f --- /dev/null +++ b/tests/bugs/cli/bug-1030580.t @@ -0,0 +1,48 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +cleanup; + +function write_to_file { + dd of=$M0/1 if=/dev/zero bs=1024k count=128 oflag=append 2>&1 >/dev/null +} + +TEST glusterd +TEST pidof glusterd +TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}0 $H0:$B0/${V0}1 +TEST $CLI volume start $V0 +TEST $CLI volume profile $V0 start +TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0 + +# Verify 'volume profile info' prints both cumulative and incremental stats +write_to_file & +wait +output=$($CLI volume profile $V0 info) +EXPECT 2 cumulative_stat_count "$output" +EXPECT 2 incremental_stat_count "$output" ' 0 ' + +# Verify 'volume profile info incremental' prints incremental stats only +write_to_file & +wait +output=$($CLI volume profile $V0 info incremental) +EXPECT 0 cumulative_stat_count "$output" +EXPECT 2 incremental_stat_count "$output" ' 1 ' + +# Verify 'volume profile info cumulative' prints cumulative stats only +write_to_file & +wait +output=$($CLI volume profile $V0 info cumulative) +EXPECT 2 cumulative_stat_count "$output" +EXPECT 0 incremental_stat_count "$output" '.*' + +# Verify the 'volume profile info cumulative' command above didn't alter +# the interval id +write_to_file & +wait +output=$($CLI volume profile $V0 info incremental) +EXPECT 0 cumulative_stat_count "$output" +EXPECT 2 incremental_stat_count "$output" ' 2 ' + +cleanup; diff --git a/tests/bugs/cli/bug-1047378.t b/tests/bugs/cli/bug-1047378.t new file mode 100644 index 00000000000..33ee6be3d8b --- /dev/null +++ b/tests/bugs/cli/bug-1047378.t @@ -0,0 +1,12 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc + +cleanup; + +TEST glusterd +TEST pidof glusterd + +TEST "echo volume list | $CLI --xml | xmllint --format -" + +cleanup diff --git a/tests/bugs/cli/bug-1047416.t b/tests/bugs/cli/bug-1047416.t new file mode 100644 index 00000000000..6e1b0a48467 --- /dev/null +++ b/tests/bugs/cli/bug-1047416.t @@ -0,0 +1,66 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +cleanup; + +function write_to_file { + dd of=$M0/1 if=/dev/zero bs=1024k count=128 oflag=append 2>&1 >/dev/null +} + +TEST glusterd +TEST pidof glusterd +TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}0 $H0:$B0/${V0}1 +TEST $CLI volume start $V0 +TEST $CLI volume profile $V0 start +TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0 + +# Verify 'volume profile info' prints both cumulative and incremental stats +write_to_file & +wait +output=$($CLI volume profile $V0 info) +EXPECT 2 cumulative_stat_count "$output" +EXPECT 2 incremental_stat_count "$output" ' 0 ' + +# Verify 'volume profile info peek' prints both cumulative and incremental stats +# without clearing incremental stats +write_to_file & +wait +output=$($CLI volume profile $V0 info peek) +EXPECT 2 cumulative_stat_count "$output" +EXPECT 2 incremental_stat_count "$output" ' 1 ' + +write_to_file & +wait +output=$($CLI volume profile $V0 info peek) +EXPECT 2 cumulative_stat_count "$output" +EXPECT 2 incremental_stat_count "$output" ' 1 ' + +# Verify 'volume profile info incremental peek' prints incremental stats only +# without clearing incremental stats +write_to_file & +wait +output=$($CLI volume profile $V0 info incremental peek) +EXPECT 0 cumulative_stat_count "$output" +EXPECT 2 incremental_stat_count "$output" ' 1 ' + +write_to_file & +wait +output=$($CLI volume profile $V0 info incremental peek) +EXPECT 0 cumulative_stat_count "$output" +EXPECT 2 incremental_stat_count "$output" ' 1 ' + +# Verify 'volume profile info clear' clears both incremental and cumulative stats +write_to_file & +wait +output=$($CLI volume profile $V0 info clear) +EXPECT 2 cleared_stat_count "$output" + +output=$($CLI volume profile $V0 info) +EXPECT 2 cumulative_stat_count "$output" +EXPECT 2 incremental_stat_count "$output" ' 0 ' +EXPECT 4 data_read_count "$output" ' 0 ' +EXPECT 4 data_written_count "$output" ' 0 ' + +cleanup; diff --git a/tests/bugs/cli/bug-1077682.t b/tests/bugs/cli/bug-1077682.t new file mode 100644 index 00000000000..6b6d71eeb46 --- /dev/null +++ b/tests/bugs/cli/bug-1077682.t @@ -0,0 +1,34 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +function get-task-status() +{ + $CLI $COMMAND | grep -o $PATTERN + if [ ${PIPESTATUS[0]} -ne 0 ]; + then + return 1 + fi + return 0 +} + +cleanup; + +TEST glusterd +TEST pidof glusterd + +TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2,3,4} +TEST $CLI volume start $V0 +TEST ! $CLI volume remove-brick $V0 $H0:$B0/${V0}1 +TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}2 force +TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}3 start + +EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" remove_brick_status_completed_field "$V0" \ +"$H0:$B0/${V0}3" + +TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}3 commit +TEST killall glusterd +TEST glusterd + +cleanup diff --git a/tests/bugs/cli/bug-1087487.t b/tests/bugs/cli/bug-1087487.t new file mode 100755 index 00000000000..0659ffab684 --- /dev/null +++ b/tests/bugs/cli/bug-1087487.t @@ -0,0 +1,23 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +cleanup; + +function rebalance_start { + $CLI volume rebalance $1 start | head -1; +} + +TEST glusterd +TEST pidof glusterd + +TEST $CLI volume create $V0 $H0:$B0/${V0}0 $H0:$B0/${V0}1 + +TEST $CLI volume start $V0 + +EXPECT "volume rebalance: $V0: success: Rebalance on $V0 has \ +been started successfully. Use rebalance status command to \ +check status of the rebalance process." rebalance_start $V0 + +cleanup; diff --git a/tests/bugs/cli/bug-1113476.t b/tests/bugs/cli/bug-1113476.t new file mode 100644 index 00000000000..119846d4cff --- /dev/null +++ b/tests/bugs/cli/bug-1113476.t @@ -0,0 +1,44 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc +. $(dirname $0)/../../snapshot.rc + +function volinfo_validate () +{ + local var=$1 + $CLI volume info $V0 | grep "^$var" | sed 's/.*: //' +} + +cleanup; + +TEST verify_lvm_version +TEST glusterd +TEST pidof glusterd +TEST setup_lvm 1 + +TEST $CLI volume create $V0 $H0:$L1 +TEST $CLI volume start $V0 + +EXPECT '' volinfo_validate 'snap-max-hard-limit' +EXPECT '' volinfo_validate 'snap-max-soft-limit' +EXPECT '' volinfo_validate 'auto-delete' + +TEST $CLI snapshot config snap-max-hard-limit 100 +EXPECT '100' volinfo_validate 'snap-max-hard-limit' +EXPECT '' volinfo_validate 'snap-max-soft-limit' +EXPECT '' volinfo_validate 'auto-delete' + +TEST $CLI snapshot config snap-max-soft-limit 50 +EXPECT '100' volinfo_validate 'snap-max-hard-limit' +EXPECT '50' volinfo_validate 'snap-max-soft-limit' +EXPECT '' volinfo_validate 'auto-delete' + +TEST $CLI snapshot config auto-delete enable +EXPECT '100' volinfo_validate 'snap-max-hard-limit' +EXPECT '50' volinfo_validate 'snap-max-soft-limit' +EXPECT 'enable' volinfo_validate 'auto-delete' + +cleanup; + + diff --git a/tests/bugs/cli/bug-764638.t b/tests/bugs/cli/bug-764638.t new file mode 100644 index 00000000000..ffc613409d6 --- /dev/null +++ b/tests/bugs/cli/bug-764638.t @@ -0,0 +1,13 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc + +cleanup; + +TEST glusterd +TEST pidof glusterd + +TEST $CLI pool list; +TEST $CLI pool list --xml; + +cleanup; diff --git a/tests/bugs/cli/bug-770655.t b/tests/bugs/cli/bug-770655.t new file mode 100755 index 00000000000..4e0b20d62da --- /dev/null +++ b/tests/bugs/cli/bug-770655.t @@ -0,0 +1,168 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +cleanup; + +## Start and create a distribute-replicate volume +TEST glusterd; +TEST pidof glusterd; +TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2,3,4,5,6,7,8}; + +## Verify volume is created +EXPECT "$V0" volinfo_field $V0 'Volume Name'; +EXPECT 'Created' volinfo_field $V0 'Status'; +EXPECT 'Distributed-Replicate' volinfo_field $V0 'Type'; + +## Start volume and verify +TEST $CLI volume start $V0; +EXPECT 'Started' volinfo_field $V0 'Status'; + +## Setting stripe-block-size as 10MB +TEST ! $CLI volume set $V0 stripe-block-size 10MB +EXPECT '' volinfo_field $V0 'cluster.stripe-block-size'; + +## Finish up +TEST $CLI volume stop $V0; +EXPECT 'Stopped' volinfo_field $V0 'Status'; + +TEST $CLI volume delete $V0; +TEST ! $CLI volume info $V0; + +cleanup; + +## Start and create a replicate volume +TEST glusterd; +TEST pidof glusterd; +TEST $CLI volume create $V0 replica 8 $H0:$B0/${V0}{1,2,3,4,5,6,7,8}; + +## Verify volume is created +EXPECT "$V0" volinfo_field $V0 'Volume Name'; +EXPECT 'Created' volinfo_field $V0 'Status'; +EXPECT 'Replicate' volinfo_field $V0 'Type'; + +## Start volume and verify +TEST $CLI volume start $V0; +EXPECT 'Started' volinfo_field $V0 'Status'; + +## Setting stripe-block-size as 10MB +TEST ! $CLI volume set $V0 stripe-block-size 10MB +EXPECT '' volinfo_field $V0 'cluster.stripe-block-size'; + +## Finish up +TEST $CLI volume stop $V0; +EXPECT 'Stopped' volinfo_field $V0 'Status'; + +TEST $CLI volume delete $V0; +TEST ! $CLI volume info $V0; + +cleanup; + +## Start and create a distribute volume +TEST glusterd; +TEST pidof glusterd; +TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2,3,4,5,6,7,8}; + +## Verify volume is created +EXPECT "$V0" volinfo_field $V0 'Volume Name'; +EXPECT 'Created' volinfo_field $V0 'Status'; +EXPECT 'Distribute' volinfo_field $V0 'Type'; + +## Start volume and verify +TEST $CLI volume start $V0; +EXPECT 'Started' volinfo_field $V0 'Status'; + +## Setting stripe-block-size as 10MB +TEST ! $CLI volume set $V0 stripe-block-size 10MB +EXPECT '' volinfo_field $V0 'cluster.stripe-block-size'; + +## Finish up +TEST $CLI volume stop $V0; +EXPECT 'Stopped' volinfo_field $V0 'Status'; + +TEST $CLI volume delete $V0; +TEST ! $CLI volume info $V0; + +cleanup; + +## Start and create a stripe volume +TEST glusterd; +TEST pidof glusterd; +TEST $CLI volume create $V0 stripe 8 $H0:$B0/${V0}{1,2,3,4,5,6,7,8}; + +## Verify volume is created +EXPECT "$V0" volinfo_field $V0 'Volume Name'; +EXPECT 'Created' volinfo_field $V0 'Status'; +EXPECT 'Stripe' volinfo_field $V0 'Type'; + +## Start volume and verify +TEST $CLI volume start $V0; +EXPECT 'Started' volinfo_field $V0 'Status'; + +## Setting stripe-block-size as 10MB +TEST $CLI volume set $V0 stripe-block-size 10MB +EXPECT '10MB' volinfo_field $V0 'cluster.stripe-block-size'; + +## Finish up +TEST $CLI volume stop $V0; +EXPECT 'Stopped' volinfo_field $V0 'Status'; + +TEST $CLI volume delete $V0; +TEST ! $CLI volume info $V0; + +cleanup; + +## Start and create a distributed stripe volume +TEST glusterd; +TEST pidof glusterd; +TEST $CLI volume create $V0 stripe 4 $H0:$B0/${V0}{1,2,3,4,5,6,7,8}; + +## Verify volume is created +EXPECT "$V0" volinfo_field $V0 'Volume Name'; +EXPECT 'Created' volinfo_field $V0 'Status'; +EXPECT 'Distributed-Stripe' volinfo_field $V0 'Type'; + +## Start volume and verify +TEST $CLI volume start $V0; +EXPECT 'Started' volinfo_field $V0 'Status'; + +## Setting stripe-block-size as 10MB +TEST $CLI volume set $V0 stripe-block-size 10MB +EXPECT '10MB' volinfo_field $V0 'cluster.stripe-block-size'; + +## Finish up +TEST $CLI volume stop $V0; +EXPECT 'Stopped' volinfo_field $V0 'Status'; + +TEST $CLI volume delete $V0; +TEST ! $CLI volume info $V0; + +cleanup; + +## Start and create a distributed stripe replicate volume +TEST glusterd; +TEST pidof glusterd; +TEST $CLI volume create $V0 stripe 2 replica 2 $H0:$B0/${V0}{1,2,3,4,5,6,7,8}; + +## Verify volume is created +EXPECT "$V0" volinfo_field $V0 'Volume Name'; +EXPECT 'Created' volinfo_field $V0 'Status'; +EXPECT 'Distributed-Striped-Replicate' volinfo_field $V0 'Type'; + +## Start volume and verify +TEST $CLI volume start $V0; +EXPECT 'Started' volinfo_field $V0 'Status'; + +## Setting stripe-block-size as 10MB +TEST $CLI volume set $V0 stripe-block-size 10MB +EXPECT '10MB' volinfo_field $V0 'cluster.stripe-block-size'; + +## Finish up +TEST $CLI volume stop $V0; +EXPECT 'Stopped' volinfo_field $V0 'Status'; + +TEST $CLI volume delete $V0; +TEST ! $CLI volume info $V0; + +cleanup; diff --git a/tests/bugs/cli/bug-822830.t b/tests/bugs/cli/bug-822830.t new file mode 100755 index 00000000000..b66aa4f8981 --- /dev/null +++ b/tests/bugs/cli/bug-822830.t @@ -0,0 +1,64 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +cleanup; + +## Start and create a volume +TEST glusterd; +TEST pidof glusterd; +TEST $CLI volume create $V0 replica 2 stripe 2 $H0:$B0/${V0}{1,2,3,4,5,6,7,8}; + +## Verify volume is is created +EXPECT "$V0" volinfo_field $V0 'Volume Name'; +EXPECT 'Created' volinfo_field $V0 'Status'; + +## Start volume and verify +TEST $CLI volume start $V0; +EXPECT 'Started' volinfo_field $V0 'Status'; + +## Setting nfs.rpc-auth-reject as 192.{}.1.2 +TEST ! $CLI volume set $V0 nfs.rpc-auth-reject 192.{}.1.2 +EXPECT '' volinfo_field $V0 'nfs.rpc-auth-reject'; + +# Setting nfs.rpc-auth-allow as a.a. +TEST ! $CLI volume set $V0 nfs.rpc-auth-allow a.a. +EXPECT '' volinfo_field $V0 'nfs.rpc-auth-allow'; + +## Setting nfs.rpc-auth-reject as 192.*..* +TEST $CLI volume set $V0 nfs.rpc-auth-reject 192.*..* +EXPECT '192.*..*' volinfo_field $V0 'nfs.rpc-auth-reject'; + +# Setting nfs.rpc-auth-allow as a.a +TEST $CLI volume set $V0 nfs.rpc-auth-allow a.a +EXPECT 'a.a' volinfo_field $V0 'nfs.rpc-auth-allow'; + +# Setting nfs.rpc-auth-allow as *.redhat.com +TEST $CLI volume set $V0 nfs.rpc-auth-allow *.redhat.com +EXPECT '\*.redhat.com' volinfo_field $V0 'nfs.rpc-auth-allow'; + +# Setting nfs.rpc-auth-allow as 192.168.10.[1-5] +TEST $CLI volume set $V0 nfs.rpc-auth-allow 192.168.10.[1-5] +EXPECT '192.168.10.\[1-5]' volinfo_field $V0 'nfs.rpc-auth-allow'; + +# Setting nfs.rpc-auth-allow as 192.168.70.? +TEST $CLI volume set $V0 nfs.rpc-auth-allow 192.168.70.? +EXPECT '192.168.70.?' volinfo_field $V0 'nfs.rpc-auth-allow'; + +# Setting nfs.rpc-auth-reject as 192.168.10.5/16 +TEST $CLI volume set $V0 nfs.rpc-auth-reject 192.168.10.5/16 +EXPECT '192.168.10.5/16' volinfo_field $V0 'nfs.rpc-auth-reject'; + +## Setting nfs.rpc-auth-reject as 192.*.* +TEST $CLI volume set $V0 nfs.rpc-auth-reject 192.*.* +EXPECT '192.*.*' volinfo_field $V0 'nfs.rpc-auth-reject'; + +## Finish up +TEST $CLI volume stop $V0; +EXPECT 'Stopped' volinfo_field $V0 'Status'; + +TEST $CLI volume delete $V0; +TEST ! $CLI volume info $V0; + +cleanup; diff --git a/tests/bugs/cli/bug-867252.t b/tests/bugs/cli/bug-867252.t new file mode 100644 index 00000000000..ccc33d82a0f --- /dev/null +++ b/tests/bugs/cli/bug-867252.t @@ -0,0 +1,41 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc + +cleanup; + +TEST glusterd +TEST pidof glusterd +TEST $CLI volume info; + +TEST $CLI volume create $V0 $H0:$B0/${V0}1; + + +function volinfo_field() +{ + local vol=$1; + local field=$2; + + $CLI volume info $vol | grep "^$field: " | sed 's/.*: //'; +} + + +function brick_count() +{ + local vol=$1; + + $CLI volume info $vol | egrep "^Brick[0-9]+: " | wc -l; +} + + +EXPECT "$V0" volinfo_field $V0 'Volume Name'; +EXPECT 'Created' volinfo_field $V0 'Status'; +EXPECT '1' brick_count $V0 + +TEST $CLI volume add-brick $V0 $H0:$B0/${V0}2; +EXPECT '2' brick_count $V0 + +TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}2 force; +EXPECT '1' brick_count $V0 + +cleanup; diff --git a/tests/bugs/cli/bug-921215.t b/tests/bugs/cli/bug-921215.t new file mode 100755 index 00000000000..02532562cff --- /dev/null +++ b/tests/bugs/cli/bug-921215.t @@ -0,0 +1,13 @@ +#!/bin/bash + +# This is test case for bug no 921215 "Can not create volume with a . in the name" + +. $(dirname $0)/../../include.rc + +cleanup; + +TEST glusterd +TEST pidof glusterd +TEST ! $CLI volume create $V0.temp replica 2 $H0:$B0/${V0}0 $H0:$B0/${V0}1 + +cleanup; diff --git a/tests/bugs/cli/bug-949298.t b/tests/bugs/cli/bug-949298.t new file mode 100644 index 00000000000..e0692f0c157 --- /dev/null +++ b/tests/bugs/cli/bug-949298.t @@ -0,0 +1,12 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +cleanup; +TEST glusterd +TEST pidof glusterd + +TEST $CLI --xml volume info $V0 + +cleanup; diff --git a/tests/bugs/cli/bug-961307.t b/tests/bugs/cli/bug-961307.t new file mode 100644 index 00000000000..68fc7bb6a15 --- /dev/null +++ b/tests/bugs/cli/bug-961307.t @@ -0,0 +1,20 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +cleanup; + +REPLICA=2 + +TEST glusterd +TEST pidof glusterd + +TEST $CLI volume create $V0 replica $REPLICA $H0:$B0/${V0}-00 $H0:$B0/${V0}-01 $H0:$B0/${V0}-10 $H0:$B0/${V0}-11 +TEST $CLI volume start $V0 + +var1=$(gluster volume remove-brick $H0:$B0/${V0}-00 $H0:$B0/${V0}-01 start 2>&1) +var2="volume remove-brick start: failed: Volume $H0:$B0/${V0}-00 does not exist" + +EXPECT "$var2" echo "$var1" +cleanup; diff --git a/tests/bugs/cli/bug-969193.t b/tests/bugs/cli/bug-969193.t new file mode 100755 index 00000000000..dd6d7cdf100 --- /dev/null +++ b/tests/bugs/cli/bug-969193.t @@ -0,0 +1,13 @@ +#!/bin/bash + +# Test that "system getspec" works without op_version problems. + +. $(dirname $0)/../../include.rc +cleanup; + +TEST glusterd +TEST pidof glusterd +TEST $CLI volume info +TEST $CLI volume create $V0 $H0:$B0/brick1 +TEST $CLI system getspec $V0 +cleanup; diff --git a/tests/bugs/cli/bug-977246.t b/tests/bugs/cli/bug-977246.t new file mode 100644 index 00000000000..bb8d6328e8b --- /dev/null +++ b/tests/bugs/cli/bug-977246.t @@ -0,0 +1,21 @@ +#! /bin/bash + +# This test checks if address validation, correctly catches hostnames +# with consective dots, such as 'example..org', as invalid + +. $(dirname $0)/../../include.rc + +cleanup; + +TEST glusterd +TEST pidof glusterd + +TEST $CLI volume create $V0 $H0:$B0/${V0}1 +TEST $CLI volume info $V0 +TEST $CLI volume start $V0 + +TEST ! $CLI volume set $V0 auth.allow example..org + +TEST $CLI volume stop $V0 + +cleanup; diff --git a/tests/bugs/cli/bug-982174.t b/tests/bugs/cli/bug-982174.t new file mode 100644 index 00000000000..067e5b97c17 --- /dev/null +++ b/tests/bugs/cli/bug-982174.t @@ -0,0 +1,36 @@ +#!/bin/bash +# Test to check +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +#Check if incorrect log-level keywords does not crash the CLI +cleanup; + +TEST glusterd +TEST pidof glusterd +TEST $CLI volume create $V0 $H0:$B0/brick1 $H0:$B0/brick2 +TEST $CLI volume start $V0 + +function set_log_level_status { + local level=$1 + $CLI volume set $V0 diagnostics.client-log-level $level 2>&1 |grep -oE 'success|failed' +} + + +LOG_LEVEL="trace" +EXPECT "failed" set_log_level_status $LOG_LEVEL + + +LOG_LEVEL="error-gen" +EXPECT "failed" set_log_level_status $LOG_LEVEL + + +LOG_LEVEL="TRACE" +EXPECT "success" set_log_level_status $LOG_LEVEL + +EXPECT "$LOG_LEVEL" echo `$CLI volume info | grep diagnostics | awk '{print $2}'` + +TEST $CLI volume stop $V0; +TEST $CLI volume delete $V0; + +cleanup; diff --git a/tests/bugs/cli/bug-983317.t b/tests/bugs/cli/bug-983317.t new file mode 100644 index 00000000000..11590ac280f --- /dev/null +++ b/tests/bugs/cli/bug-983317.t @@ -0,0 +1,25 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +cleanup; +TEST glusterd +TEST pidof glusterd + +TEST $CLI volume create $V0 $H0:$B0/$V0 + +# Set a volume option +TEST $CLI volume set $V0 open-behind on +TEST $CLI volume start $V0 + +# Execute volume get without having an explicit option, this should fail +TEST ! $CLI volume get $V0 + +# Execute volume get with an explicit option +TEST $CLI volume get $V0 open-behind + +# Execute volume get with 'all" +TEST $CLI volume get $V0 all + +cleanup; diff --git a/tests/bugs/core/949327.t b/tests/bugs/core/949327.t new file mode 100644 index 00000000000..6b8033a5c85 --- /dev/null +++ b/tests/bugs/core/949327.t @@ -0,0 +1,23 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +cleanup; + +function tmp_file_count() +{ + echo $(ls -lh /tmp/tmp.* 2>/dev/null | wc -l) +} + + +old_count=$(tmp_file_count); +TEST glusterd +TEST pidof glusterd +TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}0 $H0:$B0/${V0}1 +TEST $CLI volume start $V0 +new_count=$(tmp_file_count); + +TEST [ "$old_count" -eq "$new_count" ] + +cleanup diff --git a/tests/bugs/core/bug-1110917.t b/tests/bugs/core/bug-1110917.t new file mode 100644 index 00000000000..c4b04fbf2c7 --- /dev/null +++ b/tests/bugs/core/bug-1110917.t @@ -0,0 +1,39 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +cleanup; + +TEST glusterd +TEST pidof glusterd + +TEST $CLI volume create $V0 $H0:$B0/brick1 $H0:$B0/brick2; +TEST $CLI volume start $V0; + +TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0 + +TEST $CLI volume set $V0 changelog on +TEST $CLI volume set $V0 changelog.fsync-interval 1 + +# perform I/O on the background +f=$(basename `mktemp -t ${0##*/}.XXXXXX`) +dd if=/dev/urandom of=$M0/$f count=100000 bs=4k & + +# this is the best we can do without inducing _error points_ in the code +# without the patch reconfigre() would hang... +TEST $CLI volume set $V0 changelog.rollover-time `expr $((RANDOM % 9)) + 1` +TEST $CLI volume set $V0 changelog.rollover-time `expr $((RANDOM % 9)) + 1` + +TEST $CLI volume set $V0 changelog off +TEST $CLI volume set $V0 changelog on +TEST $CLI volume set $V0 changelog off +TEST $CLI volume set $V0 changelog on + +TEST $CLI volume set $V0 changelog.rollover-time `expr $((RANDOM % 9)) + 1` +TEST $CLI volume set $V0 changelog.rollover-time `expr $((RANDOM % 9)) + 1` + +# if there's a deadlock, this would hang +wait; + +cleanup; diff --git a/tests/bugs/core/bug-1111557.t b/tests/bugs/core/bug-1111557.t new file mode 100644 index 00000000000..4ed45761bce --- /dev/null +++ b/tests/bugs/core/bug-1111557.t @@ -0,0 +1,12 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc + +cleanup; + +TEST glusterd +TEST pidof glusterd +TEST $CLI volume create $V0 $H0:$B0/${V0}{0} +TEST $CLI volume set $V0 diagnostics.brick-log-buf-size 0 +TEST ! $CLI volume set $V0 diagnostics.brick-log-buf-size -0 +cleanup diff --git a/tests/bugs/core/bug-1117951.t b/tests/bugs/core/bug-1117951.t new file mode 100644 index 00000000000..b484fee2fe4 --- /dev/null +++ b/tests/bugs/core/bug-1117951.t @@ -0,0 +1,24 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +cleanup; + +TEST glusterd +TEST pidof glusterd +TEST $CLI volume create $V0 $H0:$B0/brick +EXPECT 'Created' volinfo_field $V0 'Status'; +TEST $CLI volume start $V0 + +# Running with a locale not using '.' as decimal separator should work +export LC_NUMERIC=sv_SE.utf8 +TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0 +EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0 + +# As should a locale using '.' as a decimal separator +export LC_NUMERIC=C +TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0 +EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0 + +cleanup diff --git a/tests/bugs/core/bug-1119582.t b/tests/bugs/core/bug-1119582.t new file mode 100644 index 00000000000..c30057c2b2c --- /dev/null +++ b/tests/bugs/core/bug-1119582.t @@ -0,0 +1,24 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc +. $(dirname $0)/../../fileio.rc +. $(dirname $0)/../../nfs.rc + +cleanup; + +TEST glusterd; + +TEST pidof glusterd; + +TEST $CLI volume create $V0 $H0:$B0/${V0}0 $H0:$B0/${V0}1 + +TEST $CLI volume set $V0 features.uss disable; + +TEST killall glusterd; + +rm -f $GLUSTERD_WORKDIR/vols/$V0/snapd.info + +TEST glusterd + +cleanup ; diff --git a/tests/bugs/core/bug-1135514-allow-setxattr-with-null-value.t b/tests/bugs/core/bug-1135514-allow-setxattr-with-null-value.t new file mode 100644 index 00000000000..d26aa561321 --- /dev/null +++ b/tests/bugs/core/bug-1135514-allow-setxattr-with-null-value.t @@ -0,0 +1,18 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +#Test +cleanup; + +TEST glusterd +TEST pidof glusterd +TEST $CLI volume create $V0 $H0:$B0/${V0}0 +TEST $CLI volume start $V0 +TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0 +TEST touch $M0/file +TEST setfattr -n user.attribute1 $M0/file +TEST getfattr -n user.attribute1 $M0/file +cleanup + diff --git a/tests/bugs/core/bug-1168803-snapd-option-validation-fix.t b/tests/bugs/core/bug-1168803-snapd-option-validation-fix.t new file mode 100755 index 00000000000..1e52d447507 --- /dev/null +++ b/tests/bugs/core/bug-1168803-snapd-option-validation-fix.t @@ -0,0 +1,30 @@ +#!/bin/bash + +## Test case for BZ-1168803 - snapd option validation should not fail if the +#snapd is not running + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +cleanup; + +## Start glusterd +TEST glusterd; +TEST pidof glusterd; + +## create volume +TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2}; +TEST $CLI volume set $V0 features.uss enable + +## Now set another volume option, this should not fail +TEST $CLI volume set $V0 features.file-snapshot on + +## start the volume +TEST $CLI volume start $V0 + +## Kill snapd daemon and then try to stop the volume which should not fail +kill $(ps aux | grep glusterfsd | grep snapd | awk '{print $2}') + +TEST $CLI volume stop $V0 + +cleanup; diff --git a/tests/bugs/core/bug-1168875.t b/tests/bugs/core/bug-1168875.t new file mode 100644 index 00000000000..f6fa9f729c9 --- /dev/null +++ b/tests/bugs/core/bug-1168875.t @@ -0,0 +1,96 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc +. $(dirname $0)/../../snapshot.rc +. $(dirname $0)/../../fileio.rc +. $(dirname $0)/../../nfs.rc + +cleanup; + +function check_entry_point_exists () +{ + local entry_point=$1; + local _path=$2; + + ls -a $_path | grep $entry_point; + + if [ $? -eq 0 ]; then + echo 'Y'; + else + echo 'N'; + fi +} + +TEST init_n_bricks 3; +TEST setup_lvm 3; + +TEST glusterd; + +TEST pidof glusterd; + +TEST $CLI volume create $V0 $H0:$L1 $H0:$L2 $H0:$L3; + +TEST $CLI volume start $V0; + +TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 --xlator-option *-snapview-client.snapdir-entry-path=/dir $M0; + +TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 $N0; +for i in {1..10} ; do echo "file" > $M0/file$i ; done + + +for i in {11..20} ; do echo "file" > $M0/file$i ; done + +mkdir $M0/dir; + +for i in {1..10} ; do echo "file" > $M0/dir/file$i ; done + +mkdir $M0/dir1; +mkdir $M0/dir2; + +for i in {1..10} ; do echo "foo" > $M0/dir1/foo$i ; done +for i in {1..10} ; do echo "foo" > $M0/dir2/foo$i ; done + +for i in {11..20} ; do echo "foo" > $M0/dir1/foo$i ; done +for i in {11..20} ; do echo "foo" > $M0/dir2/foo$i ; done + +TEST $CLI snapshot create snap1 $V0; +TEST $CLI snapshot activate snap1; + +TEST $CLI volume set $V0 features.uss enable; + +EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'Y' check_if_snapd_exist + +EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'N' check_entry_point_exists .snaps $M0/dir +EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'N' check_entry_point_exists .snaps $N0/dir + +EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'N' check_entry_point_exists .snaps $M0/dir1 +EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'N' check_entry_point_exists .snaps $N0/dir1 + +TEST $CLI volume set $V0 features.show-snapshot-directory enable; + +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "0" STAT $M0/dir +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "0" STAT $N0/dir +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "0" STAT $M0/dir1 +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "0" STAT $N0/dir1 + +EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'Y' check_entry_point_exists ".snaps" $M0/dir +EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'N' check_entry_point_exists ".snaps" $N0/dir + +EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'N' check_entry_point_exists ".snaps" $M0/dir1 +EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'N' check_entry_point_exists ".snaps" $N0/dir1 + +TEST $CLI volume set $V0 features.show-snapshot-directory disable; + +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "0" STAT $M0/dir +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "0" STAT $N0/dir +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "0" STAT $M0/dir1 +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "0" STAT $N0/dir1 + +EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'N' check_entry_point_exists ".snaps" $M0/dir +EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'N' check_entry_point_exists ".snaps" $N0/dir + +EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'N' check_entry_point_exists ".snaps" $M0/dir1 +EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'N' check_entry_point_exists ".snaps" $N0/dir1 + +cleanup; diff --git a/tests/bugs/core/bug-834465.c b/tests/bugs/core/bug-834465.c new file mode 100644 index 00000000000..61d3deac077 --- /dev/null +++ b/tests/bugs/core/bug-834465.c @@ -0,0 +1,61 @@ +#include +#include +#include +#include +#include +#include +#include + +int +main (int argc, char *argv[]) +{ + int fd = -1; + char *filename = NULL; + struct flock lock = {0, }; + int i = 0; + int ret = -1; + + if (argc != 2) { + fprintf (stderr, "Usage: %s ", argv[0]); + goto out; + } + + filename = argv[1]; + + fd = open (filename, O_RDWR | O_CREAT, 0); + if (fd < 0) { + fprintf (stderr, "open (%s) failed (%s)\n", filename, + strerror (errno)); + goto out; + } + + lock.l_type = F_WRLCK; + lock.l_whence = SEEK_SET; + lock.l_start = 1; + lock.l_len = 1; + + while (i < 100) { + lock.l_type = F_WRLCK; + ret = fcntl (fd, F_SETLK, &lock); + if (ret < 0) { + fprintf (stderr, "fcntl setlk failed (%s)\n", + strerror (errno)); + goto out; + } + + lock.l_type = F_UNLCK; + ret = fcntl (fd, F_SETLK, &lock); + if (ret < 0) { + fprintf (stderr, "fcntl setlk failed (%s)\n", + strerror (errno)); + goto out; + } + + i++; + } + + ret = 0; + +out: + return ret; +} diff --git a/tests/bugs/core/bug-834465.t b/tests/bugs/core/bug-834465.t new file mode 100755 index 00000000000..5bc52a0957e --- /dev/null +++ b/tests/bugs/core/bug-834465.t @@ -0,0 +1,48 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +cleanup; + +TEST glusterd +TEST pidof glusterd +TEST $CLI volume info; + +TEST $CLI volume create $V0 $H0:$B0/brick1 $H0:$B0/brick2; +EXPECT 'Created' volinfo_field $V0 'Status'; + +TEST $CLI volume start $V0; +EXPECT 'Started' volinfo_field $V0 'Status'; + +MOUNTDIR=$M0; +TEST glusterfs --mem-accounting --volfile-server=$H0 --volfile-id=$V0 $MOUNTDIR; + +sdump1=$(generate_mount_statedump $V0); +nalloc1=0 +grep -A3 "fuse - usage-type gf_common_mt_fd_lk_ctx_node_t" $sdump1 +if [ $? -eq '0' ] +then + nalloc1=`grep -A3 "fuse - usage-type gf_common_mt_fd_lk_ctx_node_t" $sdump1 | grep num_allocs | cut -d '=' -f2` +fi + +build_tester $(dirname $0)/bug-834465.c + +TEST $(dirname $0)/bug-834465 $M0/testfile + +sdump2=$(generate_mount_statedump $V0); + +# With _gf_free now setting typestr to NULL when num_allocs become 0, it is +# expected that there wouldn't be any entry for gf_common_mt_fd_lk_ctx_node_t +# in the statedump file now + +nalloc2=`grep -A3 "fuse - usage-type gf_common_mt_fd_lk_ctx_node_t" $sdump2 | wc -l` +TEST [ $nalloc1 -eq $nalloc2 ]; + +TEST rm -rf $MOUNTDIR/* +TEST rm -rf $(dirname $0)/bug-834465 +cleanup_mount_statedump $V0 + +EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $MOUNTDIR + +cleanup; diff --git a/tests/bugs/core/bug-845213.t b/tests/bugs/core/bug-845213.t new file mode 100644 index 00000000000..136e4126d14 --- /dev/null +++ b/tests/bugs/core/bug-845213.t @@ -0,0 +1,19 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc + +cleanup; + + +## Start and create a volume +TEST glusterd; +TEST pidof glusterd; +TEST $CLI volume info; + +## Create and start a volume with aio enabled +TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2}; +TEST $CLI volume set $V0 remote-dio enable; +TEST $CLI volume set $V0 network.remote-dio disable; + +cleanup; + diff --git a/tests/bugs/core/bug-903336.t b/tests/bugs/core/bug-903336.t new file mode 100644 index 00000000000..b52c1a4758e --- /dev/null +++ b/tests/bugs/core/bug-903336.t @@ -0,0 +1,13 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc + +cleanup; + +TEST glusterd +TEST pidof glusterd +TEST $CLI volume create $V0 $H0:$B0/${V0} +TEST $CLI volume start $V0 +TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0 +TEST setfattr -n trusted.io-stats-dump -v /tmp $M0 +cleanup diff --git a/tests/bugs/core/bug-908146.t b/tests/bugs/core/bug-908146.t new file mode 100755 index 00000000000..8b519ff2fff --- /dev/null +++ b/tests/bugs/core/bug-908146.t @@ -0,0 +1,39 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +function get_fd_count { + local vol=$1 + local host=$2 + local brick=$3 + local fname=$4 + local gfid_str=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $brick/$fname)) + local statedump=$(generate_brick_statedump $vol $host $brick) + local count=$(grep "gfid=$gfid_str" $statedump -A2 | grep fd-count | cut -f2 -d'=' | tail -1) + rm -f $statedump + echo $count +} +cleanup; + +TEST glusterd +TEST pidof glusterd +TEST $CLI volume create $V0 $H0:$B0/${V0}0 +TEST $CLI volume set $V0 performance.open-behind off +TEST $CLI volume start $V0 +TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0 --direct-io-mode=enable +TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M1 --attribute-timeout=0 --entry-timeout=0 --direct-io-mode=enable + +TEST touch $M0/a + +exec 4>"$M0/a" +exec 5>"$M1/a" +EXPECT "2" get_fd_count $V0 $H0 $B0/${V0}0 a + +exec 4>&- +EXPECT "1" get_fd_count $V0 $H0 $B0/${V0}0 a + +exec 5>&- +EXPECT "0" get_fd_count $V0 $H0 $B0/${V0}0 a + +cleanup diff --git a/tests/bugs/core/bug-913544.t b/tests/bugs/core/bug-913544.t new file mode 100644 index 00000000000..af421722590 --- /dev/null +++ b/tests/bugs/core/bug-913544.t @@ -0,0 +1,24 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +cleanup; + +#simulate a split-brain of a file and do truncate. This should not crash the mount point +TEST glusterd +TEST pidof glusterd +TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}0 $H0:$B0/${V0}1 +TEST $CLI volume set $V0 stat-prefetch off +TEST $CLI volume start $V0 +TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0 +cd $M0 +TEST touch a +#simulate no-changelog data split-brain +echo "abc" > $B0/${V0}1/a +echo "abcd" > $B0/${V0}0/a +TEST truncate -s 0 a +TEST ls +cd + +cleanup diff --git a/tests/bugs/core/bug-924075.t b/tests/bugs/core/bug-924075.t new file mode 100755 index 00000000000..61ce0f18286 --- /dev/null +++ b/tests/bugs/core/bug-924075.t @@ -0,0 +1,23 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +cleanup; + +#FIXME: there is another patch which moves the following function into +#include.rc +function process_leak_count () +{ + local pid=$1; + return $(ls -lh /proc/$pid/fd | grep "(deleted)" | wc -l) +} + +TEST glusterd; +TEST $CLI volume create $V0 $H0:$B0/${V0}1; +TEST $CLI volume start $V0; +TEST glusterfs -s $H0 --volfile-id $V0 $M0; +mount_pid=$(get_mount_process_pid $V0); +TEST process_leak_count $mount_pid; + +cleanup; diff --git a/tests/bugs/core/bug-927616.t b/tests/bugs/core/bug-927616.t new file mode 100755 index 00000000000..811e88f952f --- /dev/null +++ b/tests/bugs/core/bug-927616.t @@ -0,0 +1,62 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../nfs.rc + +cleanup; + +TEST glusterd +TEST pidof glusterd +TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2}; +TEST $CLI volume set $V0 performance.open-behind off; +TEST $CLI volume start $V0 + +## Mount FUSE with caching disabled +TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id $V0 $M0; + +EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available; +TEST mount_nfs $H0:/$V0 $N0 nolock; + +TEST mkdir $M0/dir; + +mkdir $M0/other; +cp /etc/passwd $M0/; +cp $M0/passwd $M0/file; +chmod 600 $M0/file; + +chown -R nfsnobody:nfsnobody $M0/dir; + +TEST $CLI volume set $V0 server.root-squash on; + +sleep 1; + +# tests should fail. +touch $M0/foo 2>/dev/null; +TEST [ $? -ne 0 ] +touch $N0/foo 2>/dev/null; +TEST [ $? -ne 0 ] +mkdir $M0/new 2>/dev/null; +TEST [ $? -ne 0 ] +mkdir $N0/new 2>/dev/null; +TEST [ $? -ne 0 ] + +TEST $CLI volume set $V0 server.root-squash off; + +sleep 1; + +# tests should pass. +touch $M0/foo 2>/dev/null; +TEST [ $? -eq 0 ] +touch $N0/bar 2>/dev/null; +TEST [ $? -eq 0 ] +mkdir $M0/new 2>/dev/null; +TEST [ $? -eq 0 ] +mkdir $N0/old 2>/dev/null; +TEST [ $? -eq 0 ] + +EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" umount_nfs $N0 + +TEST $CLI volume stop $V0; +TEST $CLI volume delete $V0; + +cleanup; diff --git a/tests/bugs/core/bug-949242.t b/tests/bugs/core/bug-949242.t new file mode 100644 index 00000000000..5e916cbdbe6 --- /dev/null +++ b/tests/bugs/core/bug-949242.t @@ -0,0 +1,55 @@ +#!/bin/bash +# +# Bug 949242 - Test basic fallocate functionality. +# +# Run several commands to verify basic fallocate functionality. We verify that +# fallocate creates and allocates blocks to a file. We also verify that the keep +# size option does not modify the file size. +### + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../fallocate.rc +. $(dirname $0)/../../volume.rc + +cleanup; + +TEST glusterd + +TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2,3,4} +TEST $CLI volume start $V0 + +TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0 + +# check for fallocate support before continuing the test +require_fallocate -l 1m -n $M0/file && rm -f $M0/file + +# fallocate a file and verify blocks are allocated +TEST fallocate -l 1m $M0/file +blksz=`stat -c %b $M0/file` +nblks=`stat -c %B $M0/file` +TEST [ $(($blksz * $nblks)) -eq 1048576 ] + +TEST unlink $M0/file + +# truncate a file to a fixed size, fallocate and verify that the size does not +# change +TEST truncate -s 1M $M0/file +TEST fallocate -l 2m -n $M0/file +blksz=`stat -c %b $M0/file` +nblks=`stat -c %B $M0/file` +sz=`stat -c %s $M0/file` +TEST [ $sz -eq 1048576 ] +# Note that gluster currently incorporates a hack to limit the number of blocks +# reported as allocated to the file by the file size. We have allocated beyond the +# file size here. Just check for non-zero allocation to avoid setting a land mine +# for if/when that behavior might change. +TEST [ ! $(($blksz * $nblks)) -eq 0 ] + +TEST unlink $M0/file + +EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0 + +TEST $CLI volume stop $V0 +TEST $CLI volume delete $V0 + +cleanup; diff --git a/tests/bugs/core/bug-986429.t b/tests/bugs/core/bug-986429.t new file mode 100644 index 00000000000..e512301775f --- /dev/null +++ b/tests/bugs/core/bug-986429.t @@ -0,0 +1,19 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +## This tests failover achieved by providing multiple +## servers from the trusted pool for fetching volume +## specification + +cleanup; + +TEST glusterd +TEST pidof glusterd + +TEST $CLI volume create $V0 $H0:$B0/$V0 +TEST $CLI volume start $V0 +TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s non-existent -s $H0 --volfile-id=/$V0 $M0 + +cleanup; diff --git a/tests/bugs/distribute/bug-1042725.t b/tests/bugs/distribute/bug-1042725.t new file mode 100644 index 00000000000..5497eb8bc00 --- /dev/null +++ b/tests/bugs/distribute/bug-1042725.t @@ -0,0 +1,49 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +cleanup; + +#Basic checks +TEST glusterd +TEST pidof glusterd +TEST $CLI volume info + +#Create a distributed volume +TEST $CLI volume create $V0 $H0:$B0/${V0}{1..2}; +TEST $CLI volume start $V0 + +# Mount FUSE +TEST glusterfs -s $H0 --volfile-id $V0 $M0 + +#Create files +TEST mkdir $M0/foo +TEST touch $M0/foo/{1..20} +for file in {1..20}; do + ln $M0/foo/$file $M0/foo/${file}_linkfile; +done + +#Stop one of the brick +TEST kill_brick ${V0} ${H0} ${B0}/${V0}1 + +rm -rf $M0/foo 2>/dev/null +TEST stat $M0/foo + +touch $M0/foo/{1..20} 2>/dev/null +touch $M0/foo/{1..20}_linkfile 2>/dev/null + +TEST $CLI volume start $V0 force; +sleep 5 +function verify_duplicate { + count=`ls $M0/foo | sort | uniq --repeated | grep [0-9] -c` + echo $count +} +EXPECT 0 verify_duplicate + +EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0 +TEST $CLI volume stop $V0 +TEST $CLI volume delete $V0; +TEST ! $CLI volume info $V0; + +cleanup; diff --git a/tests/bugs/distribute/bug-1063230.t b/tests/bugs/distribute/bug-1063230.t new file mode 100755 index 00000000000..944c6c296f6 --- /dev/null +++ b/tests/bugs/distribute/bug-1063230.t @@ -0,0 +1,29 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +cleanup; + +TEST glusterd +TEST pidof glusterd +TEST $CLI volume create $V0 $H0:$B0/brick0 $H0:$B0/brick1 +TEST $CLI volume start $V0 + +sleep 5 + +TEST glusterfs -s $H0 --volfile-id $V0 $M0 + +var=`gluster volume rebalance $V0 start force` + +EXPECT "0" echo $? + +var1="volume rebalance: $V0: success: Rebalance on $V0 has \ +been started successfully. Use rebalance status command to \ +check status of the rebalance process." + +echo $var | grep "$var1" + +EXPECT "0" echo $? + +cleanup diff --git a/tests/bugs/distribute/bug-1066798.t b/tests/bugs/distribute/bug-1066798.t new file mode 100755 index 00000000000..e53e1aebf2b --- /dev/null +++ b/tests/bugs/distribute/bug-1066798.t @@ -0,0 +1,86 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +cleanup; + +TESTS_EXPECTED_IN_LOOP=200 + +## Start glusterd +TEST glusterd; +TEST pidof glusterd; +TEST $CLI volume info; + +## Lets create volume +TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2}; + +## Verify volume is created +EXPECT "$V0" volinfo_field $V0 'Volume Name'; +EXPECT 'Created' volinfo_field $V0 'Status'; + +## Start volume and verify +TEST $CLI volume start $V0; +EXPECT 'Started' volinfo_field $V0 'Status'; +TEST glusterfs -s $H0 --volfile-id=$V0 $M0 + +############################################################ +#TEST_PLAN# +#Create a file +#Store the hashed brick information +#Create hard links to it +#Remove the hashed brick +#Check now all the hardlinks are migrated in to "OTHERBRICK" +#Check also in mount point for all the files +#check there is no failures and skips for migration +############################################################ + +TEST touch $M0/file1; + +file_perm=`ls -l $M0/file1 | grep file1 | awk '{print $1}'`; + +if [ -f $B0/${V0}1/file1 ] +then + HASHED=$B0/${V0}1 + OTHER=$B0/${V0}2 +else + HASHED=$B0/${V0}2 + OTHER=$B0/${V0}1 +fi + +#create hundred hard links +for i in {1..50}; +do +TEST_IN_LOOP ln $M0/file1 $M0/link$i; +done + + +TEST $CLI volume remove-brick $V0 $H0:${HASHED} start +EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" remove_brick_status_completed_field "$V0" "$H0:${HASHED}"; + +#check consistency in mount point +#And also check all the links are migrated to OTHER +for i in {1..50} +do +TEST_IN_LOOP [ -f ${OTHER}/link${i} ]; +TEST_IN_LOOP [ -f ${M0}/link${i} ]; +done; + +#check in OTHER that all the files has proper permission (Means no +#linkto files) + +for i in {1..50} +do +link_perm=`ls -l $OTHER | grep -w link${i} | awk '{print $1}'`; +TEST_IN_LOOP [ "${file_perm}" == "${link_perm}" ] + +done + +#check that remove-brick status should not have any failed or skipped files + +var=`$CLI volume remove-brick $V0 $H0:${HASHED} status | grep completed` + +TEST [ `echo $var | awk '{print $5}'` = "0" ] +TEST [ `echo $var | awk '{print $6}'` = "0" ] + +cleanup diff --git a/tests/bugs/distribute/bug-1086228.t b/tests/bugs/distribute/bug-1086228.t new file mode 100755 index 00000000000..e14ea572b61 --- /dev/null +++ b/tests/bugs/distribute/bug-1086228.t @@ -0,0 +1,34 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc +. $(dirname $0)/../../fileio.rc +. $(dirname $0)/../../dht.rc +cleanup; + +## Start and create a volume +TEST glusterd; +TEST pidof glusterd; +TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2} +TEST $CLI volume start $V0; +TEST glusterfs --direct-io-mode=yes --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id $V0 $M0; + +echo "D" > $M0/file1; +TEST chmod +st $M0/file1; + +TEST $CLI volume add-brick $V0 $H0:$B0/${V0}"3" +TEST $CLI volume rebalance $V0 start force + +EXPECT_WITHIN "10" "0" rebalance_completed +count=0 +for i in `ls $B0/$V0"3"`; + do + var=`stat -c %A $B0/$V0"3"/$i | cut -c 4`; + echo $B0/$V0"3"/$i $var + if [ "$var" != "S" ]; then + count=$((count + 1)) + fi + done + +TEST [[ $count == 0 ]] +cleanup diff --git a/tests/bugs/distribute/bug-1088231.t b/tests/bugs/distribute/bug-1088231.t new file mode 100755 index 00000000000..89823ee0800 --- /dev/null +++ b/tests/bugs/distribute/bug-1088231.t @@ -0,0 +1,161 @@ +#!/bin/bash +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc +. $(dirname $0)/../../fileio.rc +. $(dirname $0)/../../dht.rc + + +cleanup; + +TEST glusterd +TEST pidof glusterd +TEST $CLI volume create $V0 $H0:$B0/${V0}{0,1} +TEST $CLI volume set $V0 cluster.randomize-hash-range-by-gfid on +TEST $CLI volume start $V0 +TEST glusterfs --volfile-id=/$V0 --aux-gfid-mount --volfile-server=$H0 $M0 +TEST mkdir $M0/a + + +## Bug Description: In case of dht_discover code path, which is triggered +## when lookup done is nameless lookup, at the end of the lookup, even if +## it finds that self-heal is needed to fix-the layout it wont heal because +## healing code path is not added under nameless lookup. + +## What to test: With Patch, Even in case of nameless lookup, if layout +## needs to be fixed, the it will be fixed wherever lookup is successfull +## and it will not create any directory for subvols having ENOENT as it is +## nameless lookup. + +gfid_with_hyphen=`getfattr -n glusterfs.gfid.string $M0/a 2>/dev/null \ + | grep glusterfs.gfid.string | cut -d '"' -f 2` + +TEST setfattr -x trusted.glusterfs.dht $B0/$V0"0"/a + +TEST stat $M0/.gfid/$gfid_with_hyphen + +## Assuming that we have two bricks, we can have two permutations of layout +## Case 1: Brick - A Brick - B +## 0 - 50 51-100 +## +## Case 2: Brick - A Brick - B +## 51 - 100 0 - 50 +## +## To ensure layout is assigned properly, the following tests should be +## performed. +## +## Case 1: Layout_b0_s = 0; Layout_b0_e = 50, Layout_b1_s=51, +## Layout_b1_e = 100; +## +## layout_b1_s = layout_b0_e + 1; +## layout_b0_s = layout_b1_e + 1; but b0_s is 0, so change to 101 +## then compare +## Case 2: Layout_b0_s = 51, Layout_b0_e = 100, Layout_b1_s=0, +## Layout_b1_e = 51 +## +## layout_b0_s = Layout_b1_e + 1; +## layout_b1_s = Layout_b0_e + 1; but b1_s is 0, so chage to 101. + + +##Extract Layout +layout_b0_s=`get_layout $B0/$V0"0"/a | cut -c19-26` +layout_b0_e=`get_layout $B0/$V0"0"/a | cut -c27-34` +layout_b1_s=`get_layout $B0/$V0"1"/a | cut -c19-26` +layout_b1_e=`get_layout $B0/$V0"1"/a | cut -c27-34` + + +##Add 0X to perform Hex arithematic +layout_b0_s="0x"$layout_b0_s +layout_b0_e="0x"$layout_b0_e +layout_b1_s="0x"$layout_b1_s +layout_b1_e="0x"$layout_b1_e + + + +## Logic of converting starting layout "0" to "Max_value of layout + 1" +comp1=$(($layout_b0_s + 0)) +if [ "$comp1" == "0" ];then + comp1=4294967296 +fi + +comp2=$(($layout_b1_s + 0)) +if [ "$comp2" == "0" ];then + comp2=4294967296 +fi + +diff1=$(($layout_b0_e + 1)) +diff2=$(($layout_b1_e + 1)) + + +healed=0 + +if [ "$comp1" == "$diff1" ] && [ "$comp2" == "$diff2" ]; then + healed=$(($healed + 1)) +fi + +if [ "$comp1" == "$diff2" ] && [ "$comp2" == "$diff1" ]; then + healed=$(($healed + 1)) +fi + +TEST [ $healed == 1 ] + +cleanup + +TEST glusterd +TEST pidof glusterd +TEST $CLI volume create $V0 $H0:$B0/${V0}{0,1} +TEST $CLI volume set $V0 cluster.randomize-hash-range-by-gfid on +TEST $CLI volume start $V0 +TEST glusterfs --volfile-id=/$V0 --aux-gfid-mount --volfile-server=$H0 $M0 +TEST mkdir $M0/a + +gfid_with_hyphen=`getfattr -n glusterfs.gfid.string $M0/a 2>/dev/null \ + | grep glusterfs.gfid.string | cut -d '"' -f 2` + +TEST setfattr -x trusted.glusterfs.dht $B0/$V0"0"/a +TEST setfattr -x trusted.glusterfs.dht $B0/$V0"1"/a + +TEST stat $M0/.gfid/$gfid_with_hyphen + +##Extract Layout +layout_b0_s=`get_layout $B0/$V0"0"/a | cut -c19-26` +layout_b0_e=`get_layout $B0/$V0"0"/a | cut -c27-34` +layout_b1_s=`get_layout $B0/$V0"1"/a | cut -c19-26` +layout_b1_e=`get_layout $B0/$V0"1"/a | cut -c27-34` + + +##Add 0X to perform Hex arithematic +layout_b0_s="0x"$layout_b0_s +layout_b0_e="0x"$layout_b0_e +layout_b1_s="0x"$layout_b1_s +layout_b1_e="0x"$layout_b1_e + + + +## Logic of converting starting layout "0" to "Max_value of layout + 1" +comp1=$(($layout_b0_s + 0)) +if [ "$comp1" == "0" ];then + comp1=4294967296 +fi + +comp2=$(($layout_b1_s + 0)) +if [ "$comp2" == "0" ];then + comp2=4294967296 +fi + +diff1=$(($layout_b0_e + 1)) +diff2=$(($layout_b1_e + 1)) + + +healed=0 + +if [ "$comp1" == "$diff1" ] && [ "$comp2" == "$diff2" ]; then + healed=$(($healed + 1)) +fi + +if [ "$comp1" == "$diff2" ] && [ "$comp2" == "$diff1" ]; then + healed=$(($healed + 1)) +fi + +TEST [ $healed == 1 ] +cleanup + diff --git a/tests/bugs/distribute/bug-1099890.t b/tests/bugs/distribute/bug-1099890.t new file mode 100644 index 00000000000..4a4450166e9 --- /dev/null +++ b/tests/bugs/distribute/bug-1099890.t @@ -0,0 +1,125 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc +. $(dirname $0)/../../dht.rc + +## TO-DO: Fix the following once the dht du refresh interval issue is fixed: +## 1. Do away with sleep(1). +## 2. Do away with creation of empty files. + +cleanup; + +TEST glusterd; +TEST pidof glusterd; + +# Create 2 loop devices, one per brick. +TEST truncate -s 100M $B0/brick1 +TEST truncate -s 100M $B0/brick2 + +TEST L1=`SETUP_LOOP $B0/brick1` +TEST MKFS_LOOP $L1 + +TEST L2=`SETUP_LOOP $B0/brick2` +TEST MKFS_LOOP $L2 + +TEST mkdir -p $B0/${V0}{1,2} + +TEST MOUNT_LOOP $L1 $B0/${V0}1 +TEST MOUNT_LOOP $L2 $B0/${V0}2 + +# Create a plain distribute volume with 2 subvols. +TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2}; + +TEST $CLI volume start $V0; +EXPECT "Started" volinfo_field $V0 'Status'; + +TEST $CLI volume quota $V0 enable; + +TEST $CLI volume set $V0 features.quota-deem-statfs on + +TEST $CLI volume quota $V0 limit-usage / 150MB; + +TEST $CLI volume set $V0 cluster.min-free-disk 50% + +TEST glusterfs -s $H0 --volfile-id=$V0 $M0 + +# Make sure quota-deem-statfs is working as expected +EXPECT "150M" echo `df -h $M0 -P | tail -1 | awk {'print $2'}` + +# Create a new file 'foo' under the root of the volume, which hashes to subvol-0 +# of DHT, that consumes 40M +TEST dd if=/dev/zero of=$M0/foo bs=5120k count=8 + +TEST stat $B0/${V0}1/foo +TEST ! stat $B0/${V0}2/foo + +# Create a new file 'bar' under the root of the volume, which hashes to subvol-1 +# of DHT, that consumes 40M +TEST dd if=/dev/zero of=$M0/bar bs=5120k count=8 + +TEST ! stat $B0/${V0}1/bar +TEST stat $B0/${V0}2/bar + +# Touch a zero-byte file on the root of the volume to make sure the statfs data +# on DHT is refreshed +sleep 1; +TEST touch $M0/empty1; + +# At this point, the available space on each subvol {60M,60M} is greater than +# their min-free-disk {50M,50M}, but if this bug still exists, then +# the total available space on the volume as perceived by DHT should be less +# than min-free-disk, i.e., +# +# consumed space returned per subvol by quota = (40M + 40M) = 80M +# +# Therefore, consumed space per subvol computed by DHT WITHOUT the fix would be: +# (80M/150M)*100 = 53% +# +# Available space per subvol as perceived by DHT with the bug = 47% +# which is less than min-free-disk + +# Now I create a file that hashes to subvol-1 (counting from 0) of DHT. +# If this bug still exists,then DHT should be routing this creation to subvol-0. +# If this bug is fixed, then DHT should be routing the creation to subvol-1 only +# as it has more than min-free-disk space available. + +TEST dd if=/dev/zero of=$M0/file bs=1k count=1 +sleep 1; +TEST ! stat $B0/${V0}1/file +TEST stat $B0/${V0}2/file + +# Touch another zero-byte file on the root of the volume to refresh statfs +# values stored by DHT. + +TEST touch $M0/empty2; + +# Now I create a new file that hashes to subvol-0, at the end of which, there +# will be less than min-free-disk space available on it. +TEST dd if=/dev/zero of=$M0/fil bs=5120k count=4 +sleep 1; +TEST stat $B0/${V0}1/fil +TEST ! stat $B0/${V0}2/fil + +# Touch to refresh statfs info cached by DHT + +TEST touch $M0/empty3; + +# Now I create a file that hashes to subvol-0 but since it has less than +# min-free-disk space available, its data will be cached on subvol-1. + +TEST dd if=/dev/zero of=$M0/zz bs=5120k count=1 + +TEST stat $B0/${V0}1/zz +TEST stat $B0/${V0}2/zz + +EXPECT "$V0-client-1" dht_get_linkto_target "$B0/${V0}1/zz" + +EXPECT "1" is_dht_linkfile "$B0/${V0}1/zz" + +force_umount $M0 +$CLI volume stop $V0 +UMOUNT_LOOP ${B0}/${V0}{1,2} +rm -f ${B0}/brick{1,2} + +cleanup diff --git a/tests/bugs/distribute/bug-1117851.t b/tests/bugs/distribute/bug-1117851.t new file mode 100755 index 00000000000..c93a05e1d47 --- /dev/null +++ b/tests/bugs/distribute/bug-1117851.t @@ -0,0 +1,95 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +create_files () { + for i in {1..1000}; do + orig=$(printf %s/abc%04d $1 $i) + real=$(printf %s/src%04d $1 $i) + # Make sure lots of these have linkfiles. + echo "This is file $i" > $orig + mv $orig $real + done + sync +} + +move_files_inner () { + sfile=$M0/status_$(basename $1) + echo "running" > $sfile + for i in {1..1000}; do + src=$(printf %s/src%04d $1 $i) + dst=$(printf %s/dst%04d $1 $i) + mv $src $dst 2> /dev/null + done + echo "done" > $sfile +} + +move_files () { + move_files_inner $* & +} + +check_files () { + errors=0 + for i in {1..1000}; do + if [ ! -f $(printf %s/dst%04d $1 $i) ]; then + if [ -f $(printf %s/src%04d $1 $i) ]; then + echo "file $i didnt get moved" > /dev/stderr + else + echo "file $i is MISSING" > /dev/stderr + errors=$((errors+1)) + fi + fi + done + if [ $((errors)) != 0 ]; then + : ls -l $1 > /dev/stderr + fi + return $errors +} + +cleanup; + +TEST glusterd +TEST pidof glusterd +TEST $CLI volume info; + +TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2,3,4,5,6}; + +EXPECT "$V0" volinfo_field $V0 'Volume Name'; +EXPECT 'Created' volinfo_field $V0 'Status'; +EXPECT '6' brick_count $V0 + +TEST $CLI volume start $V0; +EXPECT 'Started' volinfo_field $V0 'Status'; + +## Mount FUSE with caching disabled (read-write) +TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id $V0 $M0; + +TEST create_files $M0 + +## Mount FUSE with caching disabled (read-write) again +TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id $V0 $M1; + +TEST move_files $M0 +TEST move_files $M1 + +# It's regrettable that renaming 1000 files might take more than 30 seconds, +# but on our test systems sometimes it does, so double the time from what we'd +# use otherwise. There still seem to be some spurious failures, 1 in 20 when +# this does not complete, added an additional 15 seconds to take false reports +# out of the system, during test runs. +EXPECT_WITHIN 75 "done" cat $M0/status_0 +EXPECT_WITHIN 75 "done" cat $M1/status_1 + +TEST umount $M0 +TEST umount $M1 +TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id $V0 $M0; +TEST check_files $M0 + +TEST $CLI volume stop $V0; +EXPECT 'Stopped' volinfo_field $V0 'Status'; + +TEST $CLI volume delete $V0; +TEST ! $CLI volume info $V0; + +cleanup; diff --git a/tests/bugs/distribute/bug-1122443.t b/tests/bugs/distribute/bug-1122443.t new file mode 100644 index 00000000000..3e2455e6382 --- /dev/null +++ b/tests/bugs/distribute/bug-1122443.t @@ -0,0 +1,59 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../dht.rc + +make_files() { + mkdir $1 && \ + ln -s ../ $1/symlink && \ + mknod $1/special_b b 1 2 && \ + mknod $1/special_c c 3 4 && \ + mknod $1/special_u u 5 6 && \ + mknod $1/special_p p && \ + touch -h --date=@1 $1/symlink && \ + touch -h --date=@2 $1/special_b && + touch -h --date=@3 $1/special_c && + touch -h --date=@4 $1/special_u && + touch -h --date=@5 $1/special_p +} + +bug_1113050_workaround() { + # Test if graph change has settled (bug-1113050?) + test=$(stat -c "%n:%Y" $1 2>&1 | tr '\n' ',') + if [ $? -eq 0 ] ; then + echo RECONNECTED + else + echo WAITING + fi + return 0 +} + +cleanup + +TEST glusterd +TEST pidof glusterd + +TEST $CLI volume create $V0 $H0:$B0/${V0}0 +TEST $CLI volume start $V0 + +# Mount FUSE and create symlink +TEST glusterfs -s $H0 --volfile-id $V0 $M0 +TEST make_files $M0/subdir + +# Get mtime before migration +BEFORE="$(stat -c %n:%Y $M0/subdir/* | tr '\n' ',')" + +# Migrate brick +TEST $CLI volume add-brick $V0 $H0:$B0/${V0}1 +TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}0 start +EXPECT_WITHIN $REBALANCE_TIMEOUT "0" remove_brick_completed +TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}0 commit + +# Get mtime after migration +EXPECT_WITHIN 5 RECONNECTED bug_1113050_workaround $M0/subdir/* +AFTER="$(stat -c %n:%Y $M0/subdir/* | tr '\n' ',')" + +# Check if mtime is unchanged +TEST [ "$AFTER" == "$BEFORE" ] + +cleanup diff --git a/tests/bugs/distribute/bug-1125824.t b/tests/bugs/distribute/bug-1125824.t new file mode 100755 index 00000000000..a944b360db3 --- /dev/null +++ b/tests/bugs/distribute/bug-1125824.t @@ -0,0 +1,100 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc +. $(dirname $0)/../../nfs.rc + +create_files () { + for i in {1..10}; do + orig=$(printf %s/file%04d $1 $i) + echo "This is file $i" > $orig + done + for i in {1..10}; do + mkdir $(printf %s/dir%04d $1 $i) + done + sync +} + +create_dirs () { + for i in {1..10}; do + mkdir $(printf %s/dir%04d $1 $i) + create_files $(printf %s/dir%04d $1 $i) + done + sync +} + +stat_files () { + for i in {1..10}; do + orig=$(printf %s/file%04d $1 $i) + stat $orig + done + for i in {1..10}; do + stat $(printf %s/dir%04d $1 $i) + done + sync +} + +stat_dirs () { + for i in {1..10}; do + stat $(printf %s/dir%04d $1 $i) + stat_files $(printf %s/dir%04d $1 $i) + done + sync +} + +cleanup; + +TEST glusterd +TEST pidof glusterd +TEST $CLI volume info; + +TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2,3,4}; + +EXPECT "$V0" volinfo_field $V0 'Volume Name'; +EXPECT 'Created' volinfo_field $V0 'Status'; +EXPECT '4' brick_count $V0 + +TEST $CLI volume start $V0; +EXPECT 'Started' volinfo_field $V0 'Status'; + +EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available; +TEST mount_nfs $H0:/$V0 $N0 + +# Create and poulate the NFS inode tables +TEST create_dirs $N0 +TEST stat_dirs $N0 + +# add-bricks changing the state of the volume where some bricks +# would have some directories and others would not +TEST $CLI volume add-brick $V0 replica 2 $H0:$B0/${V0}{5,6,7,8} + +# Post this dht_access was creating a mess for directories which is fixed +# with this commit. The issues could range from getting ENOENT or +# ESTALE or entries missing to directories not having complete +# layouts. +TEST cd $N0 +TEST ls -lR + +TEST $CLI volume rebalance $V0 start force +EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" rebalance_status_field $V0 + +# tests to check post rebalance if layouts and entires are fine and +# accessible by NFS to clear the volume +TEST ls -lR +rm -rf ./* +# There are additional bugs where NFS+DHT does not delete all entries +# on an rm -rf, so we do an additional rm -rf to ensure all is done +# and we are facing this transient issue, rather than a bad directory +# layout that is cached in memory +TEST rm -rf ./* + +# Get out of the mount, so that umount can work +TEST cd / + +EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $N0 +TEST $CLI volume stop $V0; +EXPECT 'Stopped' volinfo_field $V0 'Status'; +TEST $CLI volume delete $V0; +TEST ! $CLI volume info $V0; + +cleanup; diff --git a/tests/bugs/distribute/bug-1161156.t b/tests/bugs/distribute/bug-1161156.t new file mode 100755 index 00000000000..dfaea90b414 --- /dev/null +++ b/tests/bugs/distribute/bug-1161156.t @@ -0,0 +1,54 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../nfs.rc + +function usage() +{ + local QUOTA_PATH=$1; + $CLI volume quota $V0 list $QUOTA_PATH | \ + grep "$QUOTA_PATH" | awk '{print $4}' +} + +cleanup; + +TEST glusterd +TEST pidof glusterd +TEST $CLI volume info; + +TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2,3,4,5,6}; +EXPECT 'Created' volinfo_field $V0 'Status'; + +TEST $CLI volume start $V0; +EXPECT 'Started' volinfo_field $V0 'Status'; + +# Testing with NFS for no particular reason +EXPECT_WITHIN $NFS_EXPORT_TIMEOUT 1 is_nfs_export_available +TEST mount_nfs $H0:/$V0 $N0 +mydir="dir" +TEST mkdir -p $N0/$mydir +TEST mkdir -p $N0/newdir + +TEST dd if=/dev/zero of=$N0/$mydir/file bs=1k count=10240 + +TEST $CLI volume quota $V0 enable +TEST $CLI volume quota $V0 limit-usage / 20MB +TEST $CLI volume quota $V0 limit-usage /newdir 5MB +TEST $CLI volume quota $V0 soft-timeout 0 +TEST $CLI volume quota $V0 hard-timeout 0 + +TEST dd if=/dev/zero of=$N0/$mydir/newfile_1 bs=512 count=10240 +# wait for write behind to complete. +EXPECT_WITHIN $MARKER_UPDATE_TIMEOUT "15.0MB" usage "/" +TEST ! dd if=/dev/zero of=$N0/$mydir/newfile_2 bs=1k count=10240 + +# Test rename within a directory. It should pass even when the +# corresponding directory quota is filled. +TEST mv $N0/dir/file $N0/dir/newfile_3 + +# rename should fail here with disk quota exceeded +TEST ! mv $N0/dir/newfile_3 $N0/newdir/ + +# cleanup +umount_nfs $N0 +cleanup; diff --git a/tests/bugs/distribute/bug-853258.t b/tests/bugs/distribute/bug-853258.t new file mode 100755 index 00000000000..b2d7f2b771a --- /dev/null +++ b/tests/bugs/distribute/bug-853258.t @@ -0,0 +1,45 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +cleanup; + +TEST glusterd +TEST pidof glusterd + +mkdir -p $H0:$B0/${V0}0 +mkdir -p $H0:$B0/${V0}1 +mkdir -p $H0:$B0/${V0}2 +mkdir -p $H0:$B0/${V0}3 + +# Create and start a volume. +TEST $CLI volume create $V0 $H0:$B0/${V0}0 $H0:$B0/${V0}1 $H0:$B0/${V0}2 +TEST $CLI volume start $V0 +EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'Started' volinfo_field $V0 'Status'; + +# Force assignment of initial ranges. +TEST $CLI volume rebalance $V0 fix-layout start +EXPECT_WITHIN $REBALANCE_TIMEOUT "fix-layout completed" rebalance_status_field $V0 + +# Get the original values. +xattrs="" +for i in $(seq 0 2); do + xattrs="$xattrs $(dht_get_layout $B0/${V0}$i)" +done + +# Expand the volume and force assignment of new ranges. +TEST $CLI volume add-brick $V0 $H0:$B0/${V0}3 +# Force assignment of initial ranges. +TEST $CLI volume rebalance $V0 fix-layout start +EXPECT_WITHIN $REBALANCE_TIMEOUT "fix-layout completed" rebalance_status_field $V0 + +for i in $(seq 0 3); do + xattrs="$xattrs $(dht_get_layout $B0/${V0}$i)" +done + +overlap=$( $PYTHON $(dirname $0)/overlap.py $xattrs) +# 2863311531 = 0xaaaaaaab = 2/3 overlap +TEST [ "$overlap" -ge 2863311531 ] + +cleanup diff --git a/tests/bugs/distribute/bug-860663.c b/tests/bugs/distribute/bug-860663.c new file mode 100644 index 00000000000..bee4e7d40b1 --- /dev/null +++ b/tests/bugs/distribute/bug-860663.c @@ -0,0 +1,43 @@ +#include +#include +#include +#include +#include +#include +#include + +int +main(argc, argv) + int argc; + char **argv; +{ + char *basepath; + char path[MAXPATHLEN + 1]; + unsigned int count; + int i, fd; + + if (argc != 3) + errx(1, "usage: %s path count", argv[0]); + + basepath = argv[1]; + count = atoi(argv[2]); + + if (count > 999999) + errx(1, "count too big"); + + if (strlen(basepath) > MAXPATHLEN - 6) + errx(1, "path too long"); + + for (i = 0; i < count; i++) { + (void)sprintf(path, "%s%06d", basepath, i); + + fd = open(path, O_CREAT|O_RDWR, 0644); + if (fd == -1) + err(1, "create %s failed", path); + + if (close(fd) != 0) + warn("close %s failed", path); + } + + return 0; +} diff --git a/tests/bugs/distribute/bug-860663.t b/tests/bugs/distribute/bug-860663.t new file mode 100644 index 00000000000..f250a736e41 --- /dev/null +++ b/tests/bugs/distribute/bug-860663.t @@ -0,0 +1,54 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc + +cleanup; + +function file_count() +{ + val=1 + + if [ "$1" == "$2" ] + then + val=0 + fi + echo $val +} + +BRICK_COUNT=3 + +build_tester $(dirname $0)/bug-860663.c + +TEST glusterd +TEST pidof glusterd + +TEST $CLI volume create $V0 $H0:$B0/${V0}0 $H0:$B0/${V0}1 $H0:$B0/${V0}2 +TEST $CLI volume start $V0 + +## Mount FUSE +TEST glusterfs -s $H0 --volfile-id $V0 $M0; + +TEST $(dirname $0)/bug-860663 $M0/files 10000 + +ORIG_FILE_COUNT=`ls -l $M0 | wc -l`; +TEST [ $ORIG_FILE_COUNT -ge 10000 ] + +# Kill a brick process +kill -9 `cat $GLUSTERD_WORKDIR/vols/$V0/run/$H0-d-backends-${V0}1.pid`; + +TEST $CLI volume rebalance $V0 fix-layout start + +sleep 30; + +TEST ! $(dirname $0)/bug-860663 $M0/files 10000 + +TEST $CLI volume start $V0 force + +sleep 5; + +NEW_FILE_COUNT=`ls -l $M0 | wc -l`; + +EXPECT "0" file_count $ORIG_FILE_COUNT $NEW_FILE_COUNT + +rm -f $(dirname $0)/bug-860663 +cleanup; diff --git a/tests/bugs/distribute/bug-862967.t b/tests/bugs/distribute/bug-862967.t new file mode 100644 index 00000000000..07b053787ba --- /dev/null +++ b/tests/bugs/distribute/bug-862967.t @@ -0,0 +1,59 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc + +cleanup; + +function uid_gid_compare() +{ + val=1 + + if [ "$1" == "$3" ] + then + if [ "$2" == "$4" ] + then + val=0 + fi + fi + echo "$val" +} + +BRICK_COUNT=3 + +TEST glusterd +TEST pidof glusterd + +TEST $CLI volume create $V0 $H0:$B0/${V0}0 $H0:$B0/${V0}1 $H0:$B0/${V0}2 +TEST $CLI volume set $V0 stat-prefetch off +TEST $CLI volume start $V0 + +## Mount FUSE +TEST glusterfs --attribute-timeout=0 --entry-timeout=0 --gid-timeout=-1 -s $H0 --volfile-id $V0 $M0; + +# change dir permissions +mkdir $M0/dir; +chown 1:1 $M0/dir; + +# Kill a brick process + +kill -9 `cat $GLUSTERD_WORKDIR/vols/$V0/run/$H0-d-backends-${V0}1.pid`; +# change dir ownership +NEW_UID=36; +NEW_GID=36; +chown $NEW_UID:$NEW_GID $M0/dir; + +# bring the brick back up +TEST $CLI volume start $V0 force + +sleep 10; + +ls -l $M0/dir; + +# check if uid/gid is healed on backend brick which was taken down +BACKEND_UID=`stat -c %u $B0/${V0}1/dir`; +BACKEND_GID=`stat -c %g $B0/${V0}1/dir`; + + +EXPECT "0" uid_gid_compare $NEW_UID $NEW_GID $BACKEND_UID $BACKEND_GID + +cleanup; diff --git a/tests/bugs/distribute/bug-882278.t b/tests/bugs/distribute/bug-882278.t new file mode 100755 index 00000000000..8cb51474720 --- /dev/null +++ b/tests/bugs/distribute/bug-882278.t @@ -0,0 +1,73 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc +cleanup + +# Is there a good reason to require --fqdn elsewhere? It's worse than useless +# here. +H0=$(hostname -s) + +function recreate { + # The rm is necessary so we don't get fooled by leftovers from old runs. + rm -rf $1 && mkdir -p $1 +} + +function count_lines { + grep "$1" $2/* | wc -l +} + +TEST glusterd +TEST pidof glusterd +TEST $CLI volume info; + +## Start and create a volume +TEST recreate ${B0}/${V0}-0 +TEST recreate ${B0}/${V0}-1 +TEST $CLI volume create $V0 $H0:$B0/${V0}-{0,1} +TEST $CLI volume set $V0 cluster.nufa on + +function volinfo_field() +{ + local vol=$1; + local field=$2; + + $CLI volume info $vol | grep "^$field: " | sed 's/.*: //'; +} + + +## Verify volume is created +EXPECT "$V0" volinfo_field $V0 'Volume Name'; +EXPECT 'Created' volinfo_field $V0 'Status'; + +## Start volume and verify +TEST $CLI volume start $V0; +EXPECT 'Started' volinfo_field $V0 'Status'; + +## Mount native +special_option="--xlator-option ${V0}-dht.local-volume-name=${V0}-client-1" +TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 $special_option $M0 + +## Create a bunch of test files. +for i in $(seq 0 99); do + echo hello > $(printf $M0/file%02d $i) +done + +## Make sure the files went to the right place. There might be link files in +## the other brick, but they won't have any contents. +EXPECT "0" count_lines hello ${B0}/${V0}-0 +EXPECT "100" count_lines hello ${B0}/${V0}-1 + +if [ "$EXIT_EARLY" = "1" ]; then + exit 0; +fi + +## Finish up +EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0 +TEST $CLI volume stop $V0; +EXPECT 'Stopped' volinfo_field $V0 'Status'; + +TEST $CLI volume delete $V0; +TEST ! $CLI volume info $V0; + +cleanup; diff --git a/tests/bugs/distribute/bug-884455.t b/tests/bugs/distribute/bug-884455.t new file mode 100755 index 00000000000..59413cd5408 --- /dev/null +++ b/tests/bugs/distribute/bug-884455.t @@ -0,0 +1,84 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../dht.rc + +cleanup; + +function layout_compare() +{ + res=0 + + if [ "$1" == "$2" ] + then + res=1 + fi + if [ "$1" == "$3" ] + then + res=1 + fi + if [ "$2" == "$3" ] + then + res=1 + fi + + echo $res +} + +function get_layout() +{ + layout1=`getfattr -n trusted.glusterfs.dht -e hex $1 2>&1|grep dht |cut -d = -f2` + layout2=`getfattr -n trusted.glusterfs.dht -e hex $2 2>&1|grep dht |cut -d = -f2` + layout3=`getfattr -n trusted.glusterfs.dht -e hex $3 2>&1|grep dht |cut -d = -f2` + + ret=$(layout_compare $layout1 $layout2 $layout3) + + if [ $ret -ne 0 ] + then + echo 1 + else + echo 0 + fi + +} + +BRICK_COUNT=3 + +TEST glusterd +TEST pidof glusterd + +TEST $CLI volume create $V0 $H0:$B0/${V0}0 $H0:$B0/${V0}1 +## set subvols-per-dir option +TEST $CLI volume set $V0 subvols-per-directory 2 +TEST $CLI volume start $V0 + +## Mount FUSE +TEST glusterfs -s $H0 --volfile-id $V0 $M0; + +TEST mkdir $M0/dir{1..10} 2>/dev/null; + +## Add-brick n run rebalance to force re-write of layout +TEST $CLI volume add-brick $V0 $H0:$B0/${V0}2 +sleep 5; + +## trigger dir self heal on client +TEST ls -l $M0 2>/dev/null; + +TEST $CLI volume rebalance $V0 start force + +EXPECT_WITHIN $REBALANCE_TIMEOUT "0" rebalance_completed + +## check for layout overlaps. +EXPECT "0" get_layout $B0/${V0}0 $B0/${V0}1 $B0/${V0}2 +EXPECT "0" get_layout $B0/${V0}0/dir1 $B0/${V0}1/dir1 $B0/${V0}2/dir1 +EXPECT "0" get_layout $B0/${V0}0/dir2 $B0/${V0}1/dir2 $B0/${V0}2/dir2 +EXPECT "0" get_layout $B0/${V0}0/dir3 $B0/${V0}1/dir3 $B0/${V0}2/dir3 +EXPECT "0" get_layout $B0/${V0}0/dir4 $B0/${V0}1/dir4 $B0/${V0}2/dir4 +EXPECT "0" get_layout $B0/${V0}0/dir5 $B0/${V0}1/dir5 $B0/${V0}2/dir5 +EXPECT "0" get_layout $B0/${V0}0/dir6 $B0/${V0}1/dir6 $B0/${V0}2/dir6 +EXPECT "0" get_layout $B0/${V0}0/dir7 $B0/${V0}1/dir7 $B0/${V0}2/dir7 +EXPECT "0" get_layout $B0/${V0}0/dir8 $B0/${V0}1/dir8 $B0/${V0}2/dir8 +EXPECT "0" get_layout $B0/${V0}0/dir9 $B0/${V0}1/dir9 $B0/${V0}2/dir9 +EXPECT "0" get_layout $B0/${V0}0/dir10 $B0/${V0}1/dir10 $B0/${V0}2/dir10 + +cleanup; diff --git a/tests/bugs/distribute/bug-884597.t b/tests/bugs/distribute/bug-884597.t new file mode 100755 index 00000000000..d6a2c65f370 --- /dev/null +++ b/tests/bugs/distribute/bug-884597.t @@ -0,0 +1,173 @@ +#!/bin/bash +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../dht.rc +. $(dirname $0)/../../volume.rc + +cleanup; +BRICK_COUNT=3 +function uid_gid_compare() +{ + val=1 + + if [ "$1" == "$3" ] + then + if [ "$2" == "$4" ] + then + val=0 + fi + fi + echo "$val" +} + + +TEST glusterd +TEST pidof glusterd + +TEST $CLI volume create $V0 $H0:$B0/${V0}0 $H0:$B0/${V0}1 $H0:$B0/${V0}2 +TEST $CLI volume start $V0 + +## Mount FUSE +TEST glusterfs --attribute-timeout=0 --entry-timeout=0 -s $H0 --volfile-id $V0 $M0; + +i=1 +NEW_UID=36 +NEW_GID=36 + +TEST touch $M0/$i + +chown $NEW_UID:$NEW_GID $M0/$i +## rename till file gets a linkfile + +has_link=0 +while [ $i -lt 100 ] +do + mv $M0/$i $M0/$(( $i+1 )) + if [ $? -ne 0 ] + then + break + fi + let i++ + file_has_linkfile $i + has_link=$? + if [ $has_link -eq 2 ] + then + break; + fi +done + +TEST [ $has_link -eq 2 ] + +get_hashed_brick $i +cached=$? + +# check if uid/gid on linkfile is created with correct uid/gid +BACKEND_UID=`stat -c %u $B0/${V0}$cached/$i`; +BACKEND_GID=`stat -c %g $B0/${V0}$cached/$i`; + +EXPECT "0" uid_gid_compare $NEW_UID $NEW_GID $BACKEND_UID $BACKEND_GID + +# remove linkfile from backend, and trigger a lookup heal. uid/gid should match +rm -rf $B0/${V0}$cached/$i + +# without a unmount, we are not able to trigger a lookup based heal + +EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0 + +## Mount FUSE +TEST glusterfs --attribute-timeout=0 --entry-timeout=0 -s $H0 --volfile-id $V0 $M0; + +lookup=`ls -l $M0/$i 2>/dev/null` + +# check if uid/gid on linkfile is created with correct uid/gid +BACKEND_UID=`stat -c %u $B0/${V0}$cached/$i`; +BACKEND_GID=`stat -c %g $B0/${V0}$cached/$i`; + +EXPECT "0" uid_gid_compare $NEW_UID $NEW_GID $BACKEND_UID $BACKEND_GID +# create hardlinks. Make sure a linkfile gets created + +i=1 +NEW_UID=36 +NEW_GID=36 + +TEST touch $M0/file +chown $NEW_UID:$NEW_GID $M0/file; + +## ln till file gets a linkfile + +has_link=0 +while [ $i -lt 100 ] +do + ln $M0/file $M0/link$i + if [ $? -ne 0 ] + then + break + fi + file_has_linkfile link$i + has_link=$? + if [ $has_link -eq 2 ] + then + break; + fi + let i++ +done + +TEST [ $has_link -eq 2 ] + +get_hashed_brick link$i +cached=$? + +# check if uid/gid on linkfile is created with correct uid/gid +BACKEND_UID=`stat -c %u $B0/${V0}$cached/link$i`; +BACKEND_GID=`stat -c %g $B0/${V0}$cached/link$i`; + +EXPECT "0" uid_gid_compare $NEW_UID $NEW_GID $BACKEND_UID $BACKEND_GID + +## UID/GID creation as different user +i=1 +NEW_UID=36 +NEW_GID=36 + +TEST touch $M0/user_file1 +TEST chown $NEW_UID:$NEW_GID $M0/user_file1; + +## Give permission on volume, so that different users can perform rename + +TEST chmod 0777 $M0 + +## Add a user known as ABC and perform renames +TEST `useradd -M ABC 2>/dev/null` + +TEST cd $M0 +## rename as different user till file gets a linkfile + +has_link=0 +while [ $i -lt 100 ] +do + su -m ABC -c "mv $M0/user_file$i $M0/user_file$(( $i+1 ))" + if [ $? -ne 0 ] + then + break + fi + let i++ + file_has_linkfile user_file$i + has_link=$? + if [ $has_link -eq 2 ] + then + break; + fi +done + +TEST [ $has_link -eq 2 ] + +## del user ABC +TEST userdel ABC + +get_hashed_brick user_file$i +cached=$? + +# check if uid/gid on linkfile is created with correct uid/gid +BACKEND_UID=`stat -c %u $B0/${V0}$cached/user_file$i`; +BACKEND_GID=`stat -c %g $B0/${V0}$cached/user_file$i`; + +EXPECT "0" uid_gid_compare $NEW_UID $NEW_GID $BACKEND_UID $BACKEND_GID +cleanup; diff --git a/tests/bugs/distribute/bug-907072.t b/tests/bugs/distribute/bug-907072.t new file mode 100755 index 00000000000..1e8bd280f32 --- /dev/null +++ b/tests/bugs/distribute/bug-907072.t @@ -0,0 +1,47 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../fileio.rc +. $(dirname $0)/../../dht.rc +. $(dirname $0)/../../volume.rc + +cleanup; + +TEST glusterd; +TEST pidof glusterd; + +TEST $CLI volume create $V0 $H0:$B0/${V0}{0,1,2,3}; +TEST $CLI volume start $V0; + +TEST glusterfs -s $H0 --volfile-id $V0 $M0; + +TEST mkdir $M0/test; + +OLD_LAYOUT0=`get_layout $B0/${V0}0/test`; +OLD_LAYOUT1=`get_layout $B0/${V0}1/test`; +OLD_LAYOUT2=`get_layout $B0/${V0}2/test`; +OLD_LAYOUT3=`get_layout $B0/${V0}3/test`; + +TEST killall glusterfsd; + +# Delete directory on one brick +TEST rm -rf $B0/${V}1/test; + +# And only layout xattr on another brick +TEST setfattr -x trusted.glusterfs.dht $B0/${V0}2/test; + +TEST $CLI volume start $V0 force; + +EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0 +TEST glusterfs -s $H0 --volfile-id $V0 $M0; +TEST stat $M0/test; + +NEW_LAYOUT0=`get_layout $B0/${V0}0/test`; +NEW_LAYOUT1=`get_layout $B0/${V0}1/test`; +NEW_LAYOUT2=`get_layout $B0/${V0}2/test`; +NEW_LAYOUT3=`get_layout $B0/${V0}3/test`; + +EXPECT $OLD_LAYOUT0 echo $NEW_LAYOUT0; +EXPECT $OLD_LAYOUT1 echo $NEW_LAYOUT1; +EXPECT $OLD_LAYOUT2 echo $NEW_LAYOUT2; +EXPECT $OLD_LAYOUT3 echo $NEW_LAYOUT3; diff --git a/tests/bugs/distribute/bug-912564.t b/tests/bugs/distribute/bug-912564.t new file mode 100755 index 00000000000..d437728f83b --- /dev/null +++ b/tests/bugs/distribute/bug-912564.t @@ -0,0 +1,92 @@ +#!/bin/bash + +# Test that the rsync and "extra" regexes cause rename-in-place without +# creating linkfiles, when they're supposed to. Without the regex we'd have a +# 1/4 chance of each file being assigned to the right place, so with 16 files +# we have a 1/2^32 chance of getting the correct result by accident. + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +function count_linkfiles { + local i + local count=0 + for i in $(seq $2 $3); do + x=$(find $1$i -perm -1000 | wc -l) + # Divide by two because of the .glusterfs links. + count=$((count+x/2)) + done + echo $count +} + +# This function only exists to get around quoting difficulties in TEST. +function set_regex { + $CLI volume set $1 cluster.extra-hash-regex '^foo(.+)bar$' +} + +cleanup; + +TEST glusterd +TEST pidof glusterd + +mkdir -p $H0:$B0/${V0}0 +mkdir -p $H0:$B0/${V0}1 +mkdir -p $H0:$B0/${V0}2 +mkdir -p $H0:$B0/${V0}3 + +# Create and start a volume. +TEST $CLI volume create $V0 $H0:$B0/${V0}0 $H0:$B0/${V0}1 \ + $H0:$B0/${V0}2 $H0:$B0/${V0}3 +TEST $CLI volume start $V0 +EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'Started' volinfo_field $V0 'Status'; + +# Mount it. +TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 $M0 + +# Make sure the rsync regex works, by verifying that no linkfiles are +# created. +rm -f $M0/file* +for i in $(seq 0 15); do + fn=$(printf file%x $i) + tmp_fn=$(printf .%s.%d $fn $RANDOM) + echo testing > $M0/$tmp_fn + mv $M0/$tmp_fn $M0/$fn +done +lf=$(count_linkfiles $B0/$V0 0 3) +TEST [ "$lf" -eq "0" ] + +# Make sure that linkfiles *are* created for normal files. +rm -f $M0/file* +for i in $(seq 0 15); do + fn=$(printf file%x $i) + tmp_fn=$(printf foo%sbar $fn) + echo testing > $M0/$tmp_fn + mv $M0/$tmp_fn $M0/$fn +done +lf=$(count_linkfiles $B0/$V0 0 3) +TEST [ "$lf" -ne "0" ] + +# Make sure that setting an extra regex suppresses the linkfiles. +TEST set_regex $V0 +rm -f $M0/file* +for i in $(seq 0 15); do + fn=$(printf file%x $i) + tmp_fn=$(printf foo%sbar $fn) + echo testing > $M0/$tmp_fn + mv $M0/$tmp_fn $M0/$fn +done +lf=$(count_linkfiles $B0/$V0 0 3) +TEST [ "$lf" -eq "0" ] + +# Re-test the rsync regex, to make sure the extra one didn't break it. +rm -f $M0/file* +for i in $(seq 0 15); do + fn=$(printf file%x $i) + tmp_fn=$(printf .%s.%d $fn $RANDOM) + echo testing > $M0/$tmp_fn + mv $M0/$tmp_fn $M0/$fn +done +lf=$(count_linkfiles $B0/$V0 0 3) +TEST [ "$lf" -eq "0" ] + +cleanup diff --git a/tests/bugs/distribute/bug-915554.t b/tests/bugs/distribute/bug-915554.t new file mode 100755 index 00000000000..5caf4834b8c --- /dev/null +++ b/tests/bugs/distribute/bug-915554.t @@ -0,0 +1,76 @@ +#!/bin/bash +# +# Bug <915554> +# +# This test checks for a condition where a rebalance migrates a file and does +# not preserve the original file size. This can occur due to hole preservation +# logic in the file migration code. If a file size is aligned to a disk sector +# boundary (512b) and the tail portion of the file is zero-filled, the file +# may end up truncated to the end of the last data region in the file. +# +### + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../dht.rc +. $(dirname $0)/../../volume.rc + +cleanup; + +TEST glusterd +TEST pidof glusterd + +BRICK_COUNT=3 +# create, start and mount a two brick DHT volume +TEST $CLI volume create $V0 $H0:$B0/${V0}0 $H0:$B0/${V0}1 $H0:$B0/${V0}2 +TEST $CLI volume start $V0 + +TEST glusterfs --attribute-timeout=0 --entry-timeout=0 --gid-timeout=-1 -s $H0 --volfile-id $V0 $M0; + +i=1 +# Write some data to a file and extend such that the file is sparse to a sector +# aligned boundary. +echo test > $M0/$i +TEST truncate -s 1M $M0/$i + +# cache the original size +SIZE1=`stat -c %s $M0/$i` + +# rename till file gets a linkfile + +while [ $i -ne 0 ] +do + test=`mv $M0/$i $M0/$(( $i+1 )) 2>/dev/null` + if [ $? -ne 0 ] + then + echo "rename failed" + break + fi + let i++ + file_has_linkfile $i + has_link=$? + if [ $has_link -eq 2 ] + then + break; + fi +done + +# start a rebalance (force option to overide checks) to trigger migration of +# file + +TEST $CLI volume rebalance $V0 start force + +# check if rebalance has completed for upto 15 secs + +EXPECT_WITHIN $REBALANCE_TIMEOUT "0" rebalance_completed + +# validate the file size after the migration +SIZE2=`stat -c %s $M0/$i` + +TEST [ $SIZE1 -eq $SIZE2 ] + +TEST rm -f $M0/$i +EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0 +TEST $CLI volume stop $V0 +TEST $CLI volume delete $V0 + +cleanup; diff --git a/tests/bugs/distribute/bug-921408.t b/tests/bugs/distribute/bug-921408.t new file mode 100755 index 00000000000..b1887f8ae22 --- /dev/null +++ b/tests/bugs/distribute/bug-921408.t @@ -0,0 +1,90 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../fileio.rc +. $(dirname $0)/../../dht.rc +. $(dirname $0)/../../volume.rc + +cleanup; +wait_check_status () +{ + n=0 + while [ $n -lt $1 ] + do + ret=$(rebalance_completed) + if [ $ret == "0" ] + then + return 0; + else + sleep 1 + n=`expr $n + 1`; + fi + done + return 1; +} + +addbr_rebal_till_layout_change() +{ + val=1 + l=$1 + i=1 + while [ $i -lt 5 ] + do + $CLI volume add-brick $V0 $H0:$B0/${V0}$l &>/dev/null + $CLI volume rebalance $V0 fix-layout start &>/dev/null + wait_check_status $REBALANCE_TIMEOUT + if [ $? -eq 1 ] + then + break + fi + NEW_LAYOUT=`get_layout $B0/${V0}0` + if [ $OLD_LAYOUT == $NEW_LAYOUT ] + then + i=`expr $i + 1`; + l=`expr $l + 1`; + else + val=0 + break + fi + done + return $val +} +TEST glusterd +TEST pidof glusterd + +TEST $CLI volume create $V0 $H0:$B0/${V0}0 +TEST $CLI volume set $V0 subvols-per-directory 1 +TEST $CLI volume start $V0 + +TEST glusterfs -s $H0 --volfile-id $V0 $M0; + +TEST mkdir $M0/test +TEST touch $M0/test/test + +fd=`fd_available` +TEST fd_open $fd "rw" $M0/test/test + +OLD_LAYOUT=`get_layout $B0/${V0}0` + +addbr_rebal_till_layout_change 1 + +TEST [ $? -eq 0 ] + +for i in $(seq 1 1000) +do + ls -l $M0/ >/dev/null + ret=$? + if [ $ret != 0 ] + then + break + fi +done + +TEST [ $ret == 0 ]; +TEST fd_close $fd; + +EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0 +TEST $CLI volume stop $V0 +TEST $CLI volume delete $V0 + +cleanup diff --git a/tests/bugs/distribute/bug-924265.t b/tests/bugs/distribute/bug-924265.t new file mode 100755 index 00000000000..67c21de97cb --- /dev/null +++ b/tests/bugs/distribute/bug-924265.t @@ -0,0 +1,35 @@ +#!/bin/bash + +# Test that setting cluster.dht-xattr-name works, and that DHT consistently +# uses the specified name instead of the default. + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +# We only care about the exit code, so keep it quiet. +function silent_getfattr { + getfattr $* &> /dev/null +} + +cleanup + +TEST glusterd +TEST pidof glusterd + +mkdir -p $H0:$B0/${V0}0 + +# Create a volume and set the option. +TEST $CLI volume create $V0 $H0:$B0/${V0}0 +TEST $CLI volume set $V0 cluster.dht-xattr-name trusted.foo.bar + +# Start and mount the volume. +TEST $CLI volume start $V0 +EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'Started' volinfo_field $V0 'Status'; +TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 $M0 + +# Create a directory and make sure it has the right xattr. +mkdir $M0/test +TEST ! silent_getfattr -n trusted.glusterfs.dht $B0/${V0}0/test +TEST silent_getfattr -n trusted.foo.bar $B0/${V0}0/test + +cleanup diff --git a/tests/bugs/distribute/bug-961615.t b/tests/bugs/distribute/bug-961615.t new file mode 100644 index 00000000000..00938e8fa9b --- /dev/null +++ b/tests/bugs/distribute/bug-961615.t @@ -0,0 +1,34 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +#This test tests that an extra fd_unref does not happen in rebalance +#migration completion check code path in dht + +cleanup; + +TEST glusterd +TEST pidof glusterd +TEST $CLI volume create $V0 $H0:$B0/${V0}0 $H0:$B0/${V0}1 +TEST $CLI volume set $V0 performance.quick-read off +TEST $CLI volume set $V0 performance.io-cache off +TEST $CLI volume set $V0 performance.write-behind off +TEST $CLI volume set $V0 performance.stat-prefetch off +TEST $CLI volume set $V0 performance.read-ahead off +TEST $CLI volume start $V0 +TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0 +TEST touch $M0/1 +#This rename creates a link file for 10 in the other volume. +TEST mv $M0/1 $M0/10 +#Lets keep writing to the file which will trigger rebalance completion check +dd if=/dev/zero of=$M0/10 bs=1k & +bg_pid=$! +#Now rebalance force will migrate file '10' +TEST $CLI volume rebalance $V0 start force +EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" rebalance_status_field $V0 +#If the bug exists mount would have crashed by now +TEST ls $M0 +kill -9 $bg_pid > /dev/null 2>&1 +wait > /dev/null 2>&1 +cleanup diff --git a/tests/bugs/distribute/bug-973073.t b/tests/bugs/distribute/bug-973073.t new file mode 100755 index 00000000000..aac4afdc226 --- /dev/null +++ b/tests/bugs/distribute/bug-973073.t @@ -0,0 +1,48 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../dht.rc + +## Steps followed are one descibed in bugzilla + +cleanup; + +function get_layout() +{ + layout1=`getfattr -n trusted.glusterfs.dht -e hex $1 2>&1` + + if [ $? -ne 0 ] + then + echo 1 + else + echo 0 + fi + +} + +BRICK_COUNT=3 + +TEST glusterd +TEST pidof glusterd + +TEST $CLI volume create $V0 $H0:$B0/${V0}0 $H0:$B0/${V0}1 $H0:$B0/${V0}2 +TEST $CLI volume start $V0 + +## Mount FUSE +TEST glusterfs -s $H0 --volfile-id $V0 $M0; + +TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}2 start + +## remove-brick status == rebalance_status +EXPECT_WITHIN $REBALANCE_TIMEOUT "0" remove_brick_completed + +TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}2 stop + +TEST $CLI volume rebalance $V0 fix-layout start + +EXPECT_WITHIN $REBALANCE_TIMEOUT "0" rebalance_completed + +TEST mkdir $M0/dir 2>/dev/null; + +EXPECT "0" get_layout $B0/${V0}2/dir +cleanup; diff --git a/tests/bugs/distribute/overlap.py b/tests/bugs/distribute/overlap.py new file mode 100755 index 00000000000..15f2da473f1 --- /dev/null +++ b/tests/bugs/distribute/overlap.py @@ -0,0 +1,59 @@ +#!/usr/bin/python + +import sys + +def calculate_one (ov, nv): + old_start = int(ov[18:26],16) + old_end = int(ov[26:34],16) + new_start = int(nv[18:26],16) + new_end = int(nv[26:34],16) + if (new_end < old_start) or (new_start > old_end): + #print '%s, %s -> ZERO' % (ov, nv) + return 0 + all_start = max(old_start,new_start) + all_end = min(old_end,new_end) + #print '%s, %s -> %08x' % (ov, nv, all_end - all_start + 1) + return all_end - all_start + 1 + +def calculate_all (values): + total = 0 + nv_index = len(values) / 2 + for old_val in values[:nv_index]: + new_val = values[nv_index] + nv_index += 1 + total += calculate_one(old_val,new_val) + return total + +""" +test1_vals = [ + '0x0000000000000000000000003fffffff', # first quarter + '0x0000000000000000400000007fffffff', # second quarter + '0x000000000000000080000000ffffffff', # second half + '0x00000000000000000000000055555554', # first third + '0x000000000000000055555555aaaaaaa9', # second third + '0x0000000000000000aaaaaaaaffffffff', # last third +] + +test2_vals = [ + '0x0000000000000000000000003fffffff', # first quarter + '0x0000000000000000400000007fffffff', # second quarter + '0x000000000000000080000000ffffffff', # second half + '0x00000000000000000000000055555554', # first third + # Next two are (incorrectly) swapped. + '0x0000000000000000aaaaaaaaffffffff', # last third + '0x000000000000000055555555aaaaaaa9', # second third +] + +print '%08x' % calculate_one(test1_vals[0],test1_vals[3]) +print '%08x' % calculate_one(test1_vals[1],test1_vals[4]) +print '%08x' % calculate_one(test1_vals[2],test1_vals[5]) +print '= %08x' % calculate_all(test1_vals) +print '%08x' % calculate_one(test2_vals[0],test2_vals[3]) +print '%08x' % calculate_one(test2_vals[1],test2_vals[4]) +print '%08x' % calculate_one(test2_vals[2],test2_vals[5]) +print '= %08x' % calculate_all(test2_vals) +""" + +if __name__ == '__main__': + # Return decimal so bash can reason about it. + print '%d' % calculate_all(sys.argv[1:]) diff --git a/tests/bugs/error-gen/bug-767095.t b/tests/bugs/error-gen/bug-767095.t new file mode 100755 index 00000000000..34c3b182169 --- /dev/null +++ b/tests/bugs/error-gen/bug-767095.t @@ -0,0 +1,51 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc + +cleanup; + +## Start and create a volume +TEST glusterd; +TEST pidof glusterd; +TEST $CLI volume info; + +TEST $CLI volume create $V0 replica 2 stripe 2 $H0:$B0/${V0}{1,2,3,4,5,6,7,8}; + +function volinfo_field() +{ + local vol=$1; + local field=$2; + + $CLI volume info $vol | grep "^$field: " | sed 's/.*: //'; +} + +dump_dir='/tmp/gerrit_glusterfs' +TEST mkdir -p $dump_dir; +## Verify volume is is created +EXPECT "$V0" volinfo_field $V0 'Volume Name'; +EXPECT 'Created' volinfo_field $V0 'Status'; +TEST $CLI volume set $V0 error-gen posix; +TEST $CLI volume set $V0 server.statedump-path $dump_dir; + +## Start volume and verify +TEST $CLI volume start $V0; +EXPECT 'Started' volinfo_field $V0 'Status'; + +TEST PID=`gluster --xml volume status patchy | grep -A 5 patchy1 | grep '' | cut -d '>' -f 2 | cut -d '<' -f 1` +TEST kill -USR1 $PID; +sleep 2; +for file_name in $(ls $dump_dir) +do + TEST grep "error-gen.priv" $dump_dir/$file_name; +done + +## Finish up +TEST $CLI volume stop $V0; +EXPECT 'Stopped' volinfo_field $V0 'Status'; + +TEST $CLI volume delete $V0; +TEST ! $CLI volume info $V0; + +TEST rm -rf $dump_dir; + +cleanup; diff --git a/tests/bugs/fuse/bug-1030208.t b/tests/bugs/fuse/bug-1030208.t new file mode 100644 index 00000000000..526283cf101 --- /dev/null +++ b/tests/bugs/fuse/bug-1030208.t @@ -0,0 +1,35 @@ +#!/bin/bash + +#Test case: Hardlink test + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +cleanup; + +#Basic checks +TEST glusterd +TEST pidof glusterd +TEST $CLI volume info + +#Create a distributed volume +TEST $CLI volume create $V0 $H0:$B0/${V0}{1..2}; +TEST $CLI volume start $V0 + +# Mount FUSE +TEST glusterfs -s $H0 --volfile-id $V0 $M0 + +#Create a file and perform fop on a DIR +TEST touch $M0/foo +TEST ls $M0/ + +#Create hardlink +TEST ln $M0/foo $M0/bar + + +EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0 +TEST $CLI volume stop $V0 +TEST $CLI volume delete $V0; +TEST ! $CLI volume info $V0; + +cleanup; diff --git a/tests/bugs/fuse/bug-1126048.c b/tests/bugs/fuse/bug-1126048.c new file mode 100644 index 00000000000..5b9afafef0f --- /dev/null +++ b/tests/bugs/fuse/bug-1126048.c @@ -0,0 +1,37 @@ +#include +#include +#include +#include +#include +#include +#include + +/* + * This function opens a file and to trigger migration failure, unlinks the + * file and performs graph switch (cmd passed in argv). If everything goes fine, + * fsync should fail without crashing the mount process. + */ +int +main (int argc, char **argv) +{ + int ret = 0; + int fd = 0; + char *cmd = argv[1]; + + printf ("cmd is: %s\n", cmd); + fd = open("a.txt", O_CREAT|O_RDWR); + if (fd < 0) + printf ("open failed: %s\n", strerror(errno)); + + ret = unlink("a.txt"); + if (ret < 0) + printf ("unlink failed: %s\n", strerror(errno)); + if (write (fd, "abc", 3) < 0) + printf ("Not able to print %s\n", strerror (errno)); + system(cmd); + sleep(1); /* No way to confirm graph switch so sleep 1 */ + ret = fsync(fd); + if (ret < 0) + printf ("Not able to fsync %s\n", strerror (errno)); + return 0; +} diff --git a/tests/bugs/fuse/bug-1126048.t b/tests/bugs/fuse/bug-1126048.t new file mode 100755 index 00000000000..5e2ed293cd3 --- /dev/null +++ b/tests/bugs/fuse/bug-1126048.t @@ -0,0 +1,30 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc + +cleanup; + +function grep_for_ebadf { + $M0/bug-1126048 "gluster --mode=script --wignore volume add-brick $V0 $H0:$B0/brick2" | grep -i "Bad file descriptor" +} +TEST glusterd +TEST pidof glusterd +TEST $CLI volume info; + +TEST $CLI volume create $V0 $H0:$B0/brick1; +EXPECT 'Created' volinfo_field $V0 'Status'; + +TEST $CLI volume start $V0; +EXPECT 'Started' volinfo_field $V0 'Status'; + +TEST glusterfs -s $H0 --volfile-id=$V0 $M0 --direct-io-mode=yes + +build_tester $(dirname $0)/bug-1126048.c + +TEST cp $(dirname $0)/bug-1126048 $M0 +cd $M0 +TEST grep_for_ebadf +TEST ls -l $M0 +cd - +TEST rm -f $(dirname $0)/bug-1126048 +cleanup; diff --git a/tests/bugs/fuse/bug-858215.t b/tests/bugs/fuse/bug-858215.t new file mode 100755 index 00000000000..b33b8d4971b --- /dev/null +++ b/tests/bugs/fuse/bug-858215.t @@ -0,0 +1,78 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc + +cleanup; + + +## Start and create a volume +TEST glusterd; +TEST pidof glusterd; +TEST $CLI volume info; + +TEST $CLI volume create $V0 replica 2 stripe 2 $H0:$B0/${V0}{1,2,3,4,5,6,7,8}; + +function volinfo_field() +{ + local vol=$1; + local field=$2; + + $CLI volume info $vol | grep "^$field: " | sed 's/.*: //'; +} + + +## Verify volume is is created +EXPECT "$V0" volinfo_field $V0 'Volume Name'; +EXPECT 'Created' volinfo_field $V0 'Status'; + + +## Start volume and verify +TEST $CLI volume start $V0; +EXPECT 'Started' volinfo_field $V0 'Status'; + +## Mount FUSE with caching disabled +TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id $V0 $M0; + +## Test for checking whether the fops have been saved in the event-history +TEST ! stat $M0/newfile; +TEST touch $M0/newfile; +TEST stat $M0/newfile; +TEST rm $M0/newfile; + +nfs_pid=$(cat $GLUSTERD_WORKDIR/nfs/run/nfs.pid); +glustershd_pid=$(cat $GLUSTERD_WORKDIR/glustershd/run/glustershd.pid); + +pids=$(pidof glusterfs); +for i in $pids +do + if [ $i -ne $nfs_pid ] && [ $i -ne $glustershd_pid ]; then + mount_pid=$i; + break; + fi +done + +dump_dir='/tmp/gerrit_glusterfs' +cat >$statedumpdir/glusterdump.options </dev/null 2>&1 + + if [[ -e $B0/${V0}1/file$i.data && $BRICK1FILE = "0" ]] + then + BRICK1FILE=file$i.data + CONTINUE=$(( $CONTINUE - 1 )) + fi + + if [[ -e $B0/${V0}2/file$i.data && $BRICK2FILE = "0" ]] + then + BRICK2FILE=file$i.data + CONTINUE=$(( $CONTINUE - 1 )) + fi + + rm $M0/file$i.data + let i++ +done + + +## Bring free space on one of the bricks to less than minfree value by +## creating one big file. +dd if=/dev/zero of=$M0/fillonebrick.data bs=1024 count=25600 1>/dev/null 2>&1 + +#Lets find out where it was created +if [ -f $B0/${V0}1/fillonebrick.data ] +then + FILETOCREATE=$BRICK1FILE + OTHERBRICK=$B0/${V0}2 +else + FILETOCREATE=$BRICK2FILE + OTHERBRICK=$B0/${V0}1 +fi + +##--------------------------------TEST CASE 0----------------------------------- +## If we try to create a file which should go into full brick as per hash, it +## should go into the other brick instead. + +## Before that let us create files just to make gluster refresh the stat +## Using touch so it should not change the disk usage stats +for k in {1..20}; +do + touch $M0/dummyfile$k +done + +dd if=/dev/zero of=$M0/$FILETOCREATE bs=1024 count=2048 1>/dev/null 2>&1 +TEST [ -e $OTHERBRICK/$FILETOCREATE ] +## Done testing, lets clean up +TEST rm -rf $M0/* + +## Finish up +TEST $CLI volume stop $V0; +EXPECT 'Stopped' volinfo_field $V0 'Status'; +$CLI volume delete $V0; + +UMOUNT_LOOP ${B0}/${V0}{1,2} +rm -f ${B0}/brick{1,2} + +cleanup; diff --git a/tests/bugs/fuse/bug-924726.t b/tests/bugs/fuse/bug-924726.t new file mode 100755 index 00000000000..58bf2c8a083 --- /dev/null +++ b/tests/bugs/fuse/bug-924726.t @@ -0,0 +1,45 @@ +#!/bin/bash + +TESTS_EXPECTED_IN_LOOP=10 + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +cleanup; + +function get_socket_count() { + netstat -nap | grep $1 | wc -l +} + +TEST glusterd +TEST pidof glusterd +TEST $CLI volume info; + +TEST $CLI volume create $V0 $H0:$B0/$V0 +EXPECT 'Created' volinfo_field $V0 'Status'; + +TEST $CLI volume start $V0; +EXPECT 'Started' volinfo_field $V0 'Status'; + +TEST glusterfs -s $H0 --volfile-id $V0 $M0; + +TEST ls $M0 + +GLFS_MNT_PID=`ps ax | grep -i $M0 | grep glusterfs | grep -v grep | sed -e "s/^ *\([0-9]*\).*/\1/g"` + +SOCKETS_BEFORE_SWITCH=`netstat -nap | grep $GLFS_MNT_PID | grep ESTABLISHED | wc -l` + +for i in $(seq 1 5); do + TEST_IN_LOOP $CLI volume set $V0 performance.write-behind off; + sleep 1; + TEST_IN_LOOP $CLI volume set $V0 performance.write-behind on; + sleep 1; +done + +SOCKETS_AFTER_SWITCH=`netstat -nap | grep $GLFS_MNT_PID | grep ESTABLISHED | wc -l` + +# currently active graph is not cleaned up till some operation on +# mount-point. Hence there is one extra graph. +TEST [ $SOCKETS_AFTER_SWITCH = `expr $SOCKETS_BEFORE_SWITCH + 1` ] + +cleanup; diff --git a/tests/bugs/fuse/bug-963678.t b/tests/bugs/fuse/bug-963678.t new file mode 100644 index 00000000000..006181f26e1 --- /dev/null +++ b/tests/bugs/fuse/bug-963678.t @@ -0,0 +1,57 @@ +#!/bin/bash +# +# Bug 963678 - Test discard functionality +# +# Test that basic discard (hole punch) functionality works via the fallocate +# command line tool. Hole punch deallocates a region of a file, creating a hole +# and a zero-filled data region. We verify that hole punch works, frees blocks +# and that subsequent reads do not read stale data (caches are invalidated). +# +# NOTE: fuse fallocate is known to be broken with regard to cache invalidation +# up to 3.9.0 kernels. Therefore, FOPEN_KEEP_CACHE is not used in this +# test (opens will invalidate the fuse cache). +### + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../fallocate.rc +. $(dirname $0)/../../volume.rc + +cleanup; + +TEST glusterd + +TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2} +TEST $CLI volume start $V0 + +TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0 + +# check for fallocate and hole punch support +require_fallocate -l 1m $M0/file +require_fallocate -p -l 512k $M0/file && rm -f $M0/file + +# allocate some blocks, punch a hole and verify block allocation +TEST fallocate -l 1m $M0/file +blksz=`stat -c %B $M0/file` +nblks=`stat -c %b $M0/file` +TEST [ $(($blksz * $nblks)) -ge 1048576 ] +TEST fallocate -p -o 512k -l 128k $M0/file + +nblks=`stat -c %b $M0/file` +# allow some room for xattr blocks +TEST [ $(($blksz * $nblks)) -lt $((917504 + 16384)) ] +TEST unlink $M0/file + +# write some data, punch a hole and verify the file content changes +TEST dd if=/dev/urandom of=$M0/file bs=1024k count=1 +TEST cp $M0/file $M0/file.copy.pre +TEST fallocate -p -o 512k -l 128k $M0/file +TEST cp $M0/file $M0/file.copy.post +TEST ! cmp $M0/file.copy.pre $M0/file.copy.post +TEST unlink $M0/file + +EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0 + +TEST $CLI volume stop $V0 +TEST $CLI volume delete $V0 + +cleanup; diff --git a/tests/bugs/fuse/bug-983477.t b/tests/bugs/fuse/bug-983477.t new file mode 100755 index 00000000000..41ddd9e55a9 --- /dev/null +++ b/tests/bugs/fuse/bug-983477.t @@ -0,0 +1,53 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +#This script checks if use-readdirp option works as accepted in mount options + +function get_use_readdirp_value { + local vol=$1 + local statedump=$(generate_mount_statedump $vol) + sleep 1 + local val=$(grep "use_readdirp=" $statedump | cut -f2 -d'=' | tail -1) + rm -f $statedump + echo $val +} +cleanup; + +TEST glusterd +TEST pidof glusterd +TEST $CLI volume create $V0 $H0:$B0/${V0} +TEST $CLI volume start $V0 +#If readdirp is enabled statedump should reflect it +TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0 --use-readdirp=yes +TEST cd $M0 +EXPECT "1" get_use_readdirp_value $V0 +TEST cd - +EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0 + +#If readdirp is enabled statedump should reflect it +TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0 --use-readdirp=no +TEST cd $M0 +EXPECT "0" get_use_readdirp_value $V0 +TEST cd - +EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0 + +#Since args are optional on this argument just specifying "--use-readdirp" should also turn it `on` not `off` +TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0 --use-readdirp +TEST cd $M0 +EXPECT "1" get_use_readdirp_value $V0 +TEST cd - +EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0 + +#By default it is enabled. +TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0 +TEST cd $M0 +EXPECT "1" get_use_readdirp_value $V0 +TEST cd - +EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0 + +#Invalid values for use-readdirp should not be accepted +TEST ! glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0 --use-readdirp=please-fail + +cleanup diff --git a/tests/bugs/fuse/bug-985074.t b/tests/bugs/fuse/bug-985074.t new file mode 100644 index 00000000000..d10fd9f8b41 --- /dev/null +++ b/tests/bugs/fuse/bug-985074.t @@ -0,0 +1,55 @@ +#!/bin/bash +# +# Bug 985074 - Verify stale inode/dentry mappings are cleaned out. +# +# This test verifies that an inode/dentry mapping for a file removed via a +# separate mount point is cleaned up appropriately. We create a file and hard +# link from client 1. Next we remove the link via client 2. Finally, from client +# 1 we attempt to rename the original filename to the name of the just removed +# hard link. +# +# If the inode is not unlinked properly, the removed directory entry can resolve +# to an inode (on the client that never saw the rm) that ends up passed down +# through the lookup call. If md-cache holds valid metadata on the inode (due to +# a large timeout value or recent lookup on the valid name), it is tricked into +# returning a successful lookup that should have returned ENOENT. This manifests +# as an error from the mv command in the following test sequence because file +# and file.link resolve to the same file: +# +# # mv /mnt/glusterfs/0/file /mnt/glusterfs/0/file.link +# mv: `/mnt/glusterfs/0/file' and `/mnt/glusterfs/0/file.link' are the same file +# +### + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +cleanup; + +TEST glusterd + +TEST $CLI volume create $V0 $H0:$B0/$V0 +TEST $CLI volume start $V0 +TEST $CLI volume set $V0 md-cache-timeout 3 + +TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0 --entry-timeout=0 --attribute-timeout=0 +TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M1 --entry-timeout=0 --attribute-timeout=0 + +TEST touch $M0/file +TEST ln $M0/file $M0/file.link +TEST ls -ali $M0 $M1 +TEST rm -f $M1/file.link +TEST ls -ali $M0 $M1 +# expire the md-cache timeout +sleep 3 +TEST mv $M0/file $M0/file.link +TEST stat $M0/file.link +TEST ! stat $M0/file + +EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M1 +EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0 + +TEST $CLI volume stop $V0 +TEST $CLI volume delete $V0 + +cleanup; diff --git a/tests/bugs/geo-replication/bug-1111490.t b/tests/bugs/geo-replication/bug-1111490.t new file mode 100644 index 00000000000..e598eccc1a5 --- /dev/null +++ b/tests/bugs/geo-replication/bug-1111490.t @@ -0,0 +1,34 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +cleanup; + +TEST glusterd +TEST pidof glusterd + +TEST $CLI volume create $V0 $H0:$B0/${V0}0 +TEST $CLI volume start $V0 + +# mount with auxillary gfid mount +TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0 --aux-gfid-mount + +# create file with specific gfid +uuid=`uuidgen` +EXPECT "File creation OK" $PYTHON $(dirname $0)/../../utils/gfid-access.py \ + $M0 ROOT file0 $uuid file 10 10 0644 + +# check gfid +EXPECT "$uuid" getfattr --only-values -n glusterfs.gfid.string $M0/file0 + +# unmount and mount again so as to start with a fresh inode table +# or use another mount... +TEST umount $M0 +TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0 --aux-gfid-mount + +# touch the file again (gfid-access.py handles errno) +EXPECT "File creation OK" $PYTHON $(dirname $0)/../../utils/gfid-access.py \ + $M0 ROOT file0 $uuid file 10 10 0644 + +cleanup; diff --git a/tests/bugs/geo-replication/bug-877293.t b/tests/bugs/geo-replication/bug-877293.t new file mode 100755 index 00000000000..542774ab900 --- /dev/null +++ b/tests/bugs/geo-replication/bug-877293.t @@ -0,0 +1,41 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +TEST glusterd +TEST pidof glusterd + +## Start and create a replicated volume +mkdir -p ${B0}/${V0}-0 +mkdir -p ${B0}/${V0}-1 +TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}-{0,1} + +TEST $CLI volume set $V0 indexing on + +TEST $CLI volume start $V0; + +## Mount native +TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 $M0 + +## Mount client-pid=-1 +TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 --client-pid=-1 $M1 + +TEST touch $M0 + +vol_uuid=`getfattr -n trusted.glusterfs.volume-mark -ehex $M1 | sed -n 's/^trusted.glusterfs.volume-mark=0x//p' | cut -b5-36 | sed 's/\([a-f0-9]\{8\}\)\([a-f0-9]\{4\}\)\([a-f0-9]\{4\}\)\([a-f0-9]\{4\}\)/\1-\2-\3-\4-/'` +xtime=trusted.glusterfs.$vol_uuid.xtime + +TEST "getfattr -n $xtime $M1 | grep -q ${xtime}=" + +TEST kill_brick $V0 $H0 $B0/${V0}-0 + +TEST "getfattr -n $xtime $M1 | grep -q ${xtime}=" + +EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0 +EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M1 + +TEST $CLI volume stop $V0; +TEST $CLI volume delete $V0; + +cleanup diff --git a/tests/bugs/getlk_owner.c b/tests/bugs/getlk_owner.c deleted file mode 100644 index 619c2e32d98..00000000000 --- a/tests/bugs/getlk_owner.c +++ /dev/null @@ -1,120 +0,0 @@ -#include -#include -#include -#include - -#define GETLK_OWNER_CHECK(f, cp, label) \ - do { \ - switch (f.l_type) { \ - case F_RDLCK: \ - case F_WRLCK: \ - ret = 1; \ - goto label; \ - case F_UNLCK: \ - if (!are_flocks_sane (&f, &cp)) { \ - ret = 1; \ - goto label; \ - } \ - break; \ - } \ - } while (0) - -void -flock_init (struct flock *f, short int type, off_t start, off_t len) -{ - f->l_type = type; - f->l_start = start; - f->l_len = len; -} - -int -flock_cp (struct flock *dst, struct flock *src) -{ - memcpy ((void *) dst, (void *) src, sizeof (struct flock)); -} - -int -are_flocks_sane (struct flock *src, struct flock *cpy) -{ - return ((src->l_whence == cpy->l_whence) && - (src->l_start == cpy->l_start) && - (src->l_len == cpy->l_len)); -} - -/* - * Test description: - * SETLK (0,3), F_WRLCK - * SETLK (3,3), F_WRLCK - * - * the following GETLK requests must return flock struct unmodified - * except for l_type to F_UNLCK - * GETLK (3,3), F_WRLCK - * GETLK (3,3), F_RDLCK - * - * */ - -int main (int argc, char **argv) -{ - int fd = -1; - int ret = 1; - char *fname = NULL; - struct flock f = {0,}; - struct flock cp = {0,}; - - if (argc < 2) - goto out; - - fname = argv[1]; - fd = open (fname, O_RDWR); - if (fd == -1) { - perror ("open"); - goto out; - } - - flock_init (&f, F_WRLCK, 0, 3); - flock_cp (&cp, &f); - ret = fcntl (fd, F_SETLK, &f); - if (ret) { - perror ("fcntl"); - goto out; - } - if (!are_flocks_sane (&f, &cp)) { - ret = 1; - goto out; - } - - flock_init (&f, F_WRLCK, 3, 3); - flock_cp (&cp, &f); - ret = fcntl (fd, F_SETLK, &f); - if (ret) { - perror ("fcntl"); - goto out; - } - if (!are_flocks_sane (&f, &cp)) { - ret = 1; - goto out; - } - - flock_init (&f, F_WRLCK, 3, 3); - flock_cp (&cp, &f); - ret = fcntl (fd, F_GETLK, &f); - if (ret) { - perror ("fcntl"); - return 1; - } - GETLK_OWNER_CHECK (f, cp, out); - - flock_init (&f, F_RDLCK, 3, 3); - flock_cp (&cp, &f); - ret = fcntl (fd, F_GETLK, &f); - if (ret) { - perror ("fcntl"); - return 1; - } - GETLK_OWNER_CHECK (f, cp, out); - -out: - if (fd != -1) - close (fd); - return ret; -} diff --git a/tests/bugs/glusterd/859927/repl.t b/tests/bugs/glusterd/859927/repl.t new file mode 100755 index 00000000000..db96d70bfa6 --- /dev/null +++ b/tests/bugs/glusterd/859927/repl.t @@ -0,0 +1,69 @@ +#!/bin/bash + +. $(dirname $0)/../../../include.rc +. $(dirname $0)/../../../volume.rc +cleanup; + +TEST glusterd; +TEST pidof glusterd + +#Tests for data-self-heal-algorithm option +function create_setup_for_self_heal { + file=$1 + kill_brick $V0 $H0 $B0/${V0}1 + dd of=$file if=/dev/urandom bs=1024k count=1 2>&1 > /dev/null + $CLI volume start $V0 force +} + +function test_write { + dd of=$M0/a if=/dev/urandom bs=1k count=1 2>&1 > /dev/null +} + +TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2}; +TEST $CLI volume set $V0 cluster.self-heal-daemon off +TEST $CLI volume set $V0 performance.stat-prefetch off +TEST $CLI volume set $V0 client-log-level DEBUG +TEST $CLI volume set $V0 cluster.background-self-heal-count 0 +TEST $CLI volume start $V0 +TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id=$V0 $M0; + +touch $M0/a + +TEST $CLI volume set $V0 cluster.data-self-heal-algorithm full +EXPECT full volume_option $V0 cluster.data-self-heal-algorithm +create_setup_for_self_heal $M0/a +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0 +cat $file 2>&1 > /dev/null +TEST cmp $B0/${V0}1/a $B0/${V0}2/a + +TEST $CLI volume set $V0 cluster.data-self-heal-algorithm diff +EXPECT diff volume_option $V0 cluster.data-self-heal-algorithm +create_setup_for_self_heal $M0/a +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0 +cat $file 2>&1 > /dev/null +TEST cmp $B0/${V0}1/a $B0/${V0}2/a + +TEST $CLI volume reset $V0 cluster.data-self-heal-algorithm +create_setup_for_self_heal $M0/a +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0 +cat $file 2>&1 > /dev/null +TEST cmp $B0/${V0}1/a $B0/${V0}2/a + +TEST ! $CLI volume set $V0 cluster.data-self-heal-algorithm "" + +#Tests for quorum-type option +TEST ! $CLI volume set $V0 cluster.quorum-type "" +TEST $CLI volume set $V0 cluster.quorum-type fixed +EXPECT fixed volume_option $V0 cluster.quorum-type +TEST $CLI volume set $V0 cluster.quorum-count 2 +kill_brick $V0 $H0 $B0/${V0}1 +TEST ! test_write +TEST $CLI volume set $V0 cluster.quorum-type auto +EXPECT auto volume_option $V0 cluster.quorum-type +TEST ! test_write +TEST $CLI volume set $V0 cluster.quorum-type none +EXPECT none volume_option $V0 cluster.quorum-type +TEST test_write +TEST $CLI volume reset $V0 cluster.quorum-type +TEST test_write +cleanup; diff --git a/tests/bugs/glusterd/bug-000000.t b/tests/bugs/glusterd/bug-000000.t new file mode 100755 index 00000000000..55f7b11f598 --- /dev/null +++ b/tests/bugs/glusterd/bug-000000.t @@ -0,0 +1,9 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc + +cleanup; + +TEST glusterd + +cleanup; diff --git a/tests/bugs/glusterd/bug-1002556.t b/tests/bugs/glusterd/bug-1002556.t new file mode 100755 index 00000000000..ac71d06d533 --- /dev/null +++ b/tests/bugs/glusterd/bug-1002556.t @@ -0,0 +1,25 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +cleanup; + +TEST glusterd +TEST pidof glusterd + +TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1} +TEST $CLI volume start $V0 +EXPECT '1 x 2 = 2' volinfo_field $V0 'Number of Bricks'; + +TEST $CLI volume add-brick $V0 replica 3 $H0:$B0/${V0}2 +EXPECT '1 x 3 = 3' volinfo_field $V0 'Number of Bricks'; + +TEST $CLI volume remove-brick $V0 replica 2 $H0:$B0/${V0}1 force +EXPECT '1 x 2 = 2' volinfo_field $V0 'Number of Bricks'; + +TEST killall glusterd +TEST glusterd + +EXPECT '1 x 2 = 2' volinfo_field $V0 'Number of Bricks'; +cleanup diff --git a/tests/bugs/glusterd/bug-1004744.t b/tests/bugs/glusterd/bug-1004744.t new file mode 100644 index 00000000000..b48ed97fb52 --- /dev/null +++ b/tests/bugs/glusterd/bug-1004744.t @@ -0,0 +1,46 @@ +#!/bin/bash + +#Test case: After a rebalance fix-layout, check if the rebalance status command +#displays the appropriate message at the CLI. + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +cleanup; + +#Basic checks +TEST glusterd +TEST pidof glusterd +TEST $CLI volume info + +#Create a 2x1 distributed volume +TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2}; +TEST $CLI volume start $V0 + +# Mount FUSE and create file/directory +TEST glusterfs -s $H0 --volfile-id $V0 $M0 +for i in `seq 1 10`; +do + mkdir $M0/dir_$i + echo file>$M0/dir_$i/file_$i + for j in `seq 1 100`; + do + mkdir $M0/dir_$i/dir_$j + echo file>$M0/dir_$i/dir_$j/file_$j + done +done + +#add 2 bricks +TEST $CLI volume add-brick $V0 $H0:$B0/${V0}{3,4}; + +#perform rebalance fix-layout +TEST $CLI volume rebalance $V0 fix-layout start + +EXPECT_WITHIN $REBALANCE_TIMEOUT "fix-layout completed" rebalance_status_field $V0; + +EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0 +TEST $CLI volume stop $V0 +TEST $CLI volume delete $V0; +TEST ! $CLI volume info $V0; + +cleanup; diff --git a/tests/bugs/glusterd/bug-1022055.t b/tests/bugs/glusterd/bug-1022055.t new file mode 100755 index 00000000000..9f39c80b6b6 --- /dev/null +++ b/tests/bugs/glusterd/bug-1022055.t @@ -0,0 +1,26 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../cluster.rc + +function check_peers { + $CLI_1 peer status | grep 'Peer in Cluster (Connected)' | wc -l +} + +cleanup; + +TEST launch_cluster 2; + +TEST $CLI_1 peer probe $H2; + +EXPECT_WITHIN $PROBE_TIMEOUT 1 check_peers; + +TEST $CLI_1 volume create $V0 $H1:$B1/$V0 $H2:$B2/$V0; + +TEST $CLI_1 volume start $V0; + +TEST $CLI_1 volume log rotate $V0; + +TEST $CLI_1 volume status; + +cleanup; diff --git a/tests/bugs/glusterd/bug-1027171.t b/tests/bugs/glusterd/bug-1027171.t new file mode 100644 index 00000000000..1b457d8f660 --- /dev/null +++ b/tests/bugs/glusterd/bug-1027171.t @@ -0,0 +1,53 @@ +#!/bin/bash + +#Test case: Do not allow commit if the bricks are not decommissioned + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +cleanup; + +#Basic checks +TEST glusterd +TEST pidof glusterd +TEST $CLI volume info + +#Create a Distributed volume +TEST $CLI volume create $V0 $H0:$B0/${V0}{1..2}; +TEST $CLI volume start $V0 + +#Remove bricks and commit without starting +function remove_brick_commit_status { + $CLI volume remove-brick $V0 \ + $H0:$B0/${V0}2 commit 2>&1 |grep -oE "success|decommissioned" +} +EXPECT "decommissioned" remove_brick_commit_status; + +TEST $CLI volume stop $V0 +TEST $CLI volume delete $V0 +TEST ! $CLI volume info $V0 + +#Create a Distributed-Replicate volume +TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1..4}; +TEST $CLI volume start $V0 + +#Try to reduce replica count with start option +function remove_brick_start_status { + $CLI volume remove-brick $V0 replica 1 \ + $H0:$B0/${V0}1 $H0:$B0/${V0}3 start 2>&1 |grep -oE "success|failed" +} +EXPECT "failed" remove_brick_start_status; + +#Remove bricks with commit option +function remove_brick_commit_status2 { + $CLI volume remove-brick $V0 replica 1 \ + $H0:$B0/${V0}1 $H0:$B0/${V0}3 commit 2>&1 | + grep -oE "success|decommissioned" +} +EXPECT "decommissioned" remove_brick_commit_status2; + +TEST $CLI volume stop $V0 +TEST $CLI volume delete $V0 +TEST ! $CLI volume info $V0 + +cleanup; diff --git a/tests/bugs/glusterd/bug-1040408.t b/tests/bugs/glusterd/bug-1040408.t new file mode 100644 index 00000000000..c378000630b --- /dev/null +++ b/tests/bugs/glusterd/bug-1040408.t @@ -0,0 +1,31 @@ +#!/bin/bash + +#Test case: Create a distributed replicate volume, and reduce +#replica count + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +cleanup; + +#Basic checks +TEST glusterd +TEST pidof glusterd +TEST $CLI volume info + +#Create a 2X3 distributed-replicate volume +TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{1..6}; +TEST $CLI volume start $V0 + +# Reduce to 2x2 volume by specifying bricks in reverse order +function remove_brick_status { + $CLI volume remove-brick $V0 replica 2 \ + $H0:$B0/${V0}6 $H0:$B0/${V0}3 force 2>&1 |grep -oE "success|failed" +} +EXPECT "success" remove_brick_status; + +TEST $CLI volume stop $V0 +TEST $CLI volume delete $V0; +TEST ! $CLI volume info $V0; + +cleanup; diff --git a/tests/bugs/glusterd/bug-1046308.t b/tests/bugs/glusterd/bug-1046308.t new file mode 100644 index 00000000000..9c827c4a492 --- /dev/null +++ b/tests/bugs/glusterd/bug-1046308.t @@ -0,0 +1,19 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc + +cleanup; + +volname="StartMigrationDuringRebalanceTest" +TEST glusterd +TEST pidof glusterd; + +TEST $CLI volume info; +TEST $CLI volume create $volname $H0:$B0/${volname}{1,2}; +TEST $CLI volume start $volname; +TEST $CLI volume rebalance $volname start; + +cleanup; + + + diff --git a/tests/bugs/glusterd/bug-1047955.t b/tests/bugs/glusterd/bug-1047955.t new file mode 100644 index 00000000000..a409d9f7195 --- /dev/null +++ b/tests/bugs/glusterd/bug-1047955.t @@ -0,0 +1,23 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../cluster.rc + +function check_peers { + $CLI_1 peer status | grep 'Peer in Cluster (Connected)' | wc -l +} + +cleanup; + +# Create a 2x2 dist-rep volume; peer probe a new node. +# Performing remove-brick from this new node must succeed +# without crashing it's glusterd + +TEST launch_cluster 2; +TEST $CLI_1 volume create $V0 replica 2 $H1:$B1/${V0}{1,2,3,4} +TEST $CLI_1 volume start $V0; +TEST $CLI_1 peer probe $H2; +EXPECT_WITHIN $PROBE_TIMEOUT 1 check_peers; +TEST $CLI_2 volume remove-brick $V0 $H1:$B1/${V0}{3,4} start; +TEST $CLI_2 volume info +cleanup; diff --git a/tests/bugs/glusterd/bug-1070734.t b/tests/bugs/glusterd/bug-1070734.t new file mode 100755 index 00000000000..b5a53c24cab --- /dev/null +++ b/tests/bugs/glusterd/bug-1070734.t @@ -0,0 +1,74 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc +. $(dirname $0)/../../nfs.rc + +cleanup; + +## Start glusterd +TEST glusterd; +TEST pidof glusterd; +TEST $CLI volume info; + +## Lets create volume +TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2}; + +## Verify volume is created +EXPECT "$V0" volinfo_field $V0 'Volume Name'; +EXPECT 'Created' volinfo_field $V0 'Status'; + +## Start volume and verify +TEST $CLI volume start $V0; +EXPECT 'Started' volinfo_field $V0 'Status'; + +EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available; +TEST mount_nfs $H0:/$V0 $N0; + +############################################################################ +#TEST-PLAN: +#Create a directory DIR and a file inside DIR +#check the hash brick of the file +#delete the directory for recreating later after remove-brick +#remove the brick where the files hashed to +#After remove-brick status says complete go on creating the same directory \ +#DIR and file +#Check if the file now falls into the other brick +#Check if the other brick gets the full layout and the remove brick gets \ +#the zeroed layout +############################################################################ + +TEST mkdir $N0/DIR; + +TEST touch $N0/DIR/file; + +if [ -f $B0/${V0}1/DIR/file ] +then + HASHED=$B0/${V0}1; + OTHERBRICK=$B0/${V0}2; +else + HASHED=$B0/${V0}2; + OTHERBRICK=$B0/${V0}1; +fi + +TEST rm -f $N0/DIR/file; +TEST rmdir $N0/DIR; +TEST $CLI volume remove-brick $V0 $H0:${HASHED} start; +EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" remove_brick_status_completed_field "$V0" \ +"$H0:${HASHED}"; + +TEST mkdir $N0/DIR; +TEST touch $N0/DIR/file; + +#Check now the file should fall in to OTHERBRICK +TEST [ -f ${OTHERBRICK}/DIR/file ] + +#Check the DIR on HASHED should have got zeroed layout and the \ +#OTHERBRICK should have got full layout +EXPECT "0x00000001000000000000000000000000" dht_get_layout $HASHED/DIR ; +EXPECT "0x000000010000000000000000ffffffff" dht_get_layout $OTHERBRICK/DIR; + +## Before killing daemon to avoid deadlocks +EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" umount_nfs $N0 + +cleanup diff --git a/tests/bugs/glusterd/bug-1075087.t b/tests/bugs/glusterd/bug-1075087.t new file mode 100644 index 00000000000..35155a0b8c9 --- /dev/null +++ b/tests/bugs/glusterd/bug-1075087.t @@ -0,0 +1,33 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +cleanup; + +TEST glusterd +TEST pidof glusterd + +TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}0 $H0:$B0/${V0}1 \ + $H0:$B0/${V0}2 $H0:$B0/${V0}3 +TEST $CLI volume start $V0 + +## Mount FUSE +TEST glusterfs -s $H0 --volfile-id=$V0 $M0; + +TEST mkdir $M0/dir{1..10}; +TEST touch $M0/dir{1..10}/files{1..10}; + +TEST $CLI volume add-brick $V0 $H0:$B0/${V0}4 $H0:/$B0/${V0}5 + +TEST $CLI volume rebalance $V0 start force +EXPECT_WITHIN 60 "completed" rebalance_status_field $V0 + +TEST pkill gluster +TEST glusterd +TEST pidof glusterd + +# status should be "completed" immediate after glusterd has respawned. +EXPECT_WITHIN 5 "completed" rebalance_status_field $V0 + +cleanup; diff --git a/tests/bugs/glusterd/bug-1085330.t b/tests/bugs/glusterd/bug-1085330.t new file mode 100755 index 00000000000..ffcfe9274eb --- /dev/null +++ b/tests/bugs/glusterd/bug-1085330.t @@ -0,0 +1,80 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +STR="1234567890" +volname="Vol" + +cleanup; +TEST glusterd +TEST pidof glusterd +TEST $CLI volume info; + + +# Construct volname string such that its more than 256 characters +for i in {1..30} +do + volname+=$STR +done +# Now $volname is more than 256 chars + +TEST ! $CLI volume create $volname $H0:$B0/${volname}{1,2}; + +TEST $CLI volume info; + +# Construct brick string such that its more than 256 characters +volname="Vol1234" +brick="brick" +for i in {1..30} +do + brick+=$STR +done +# Now $brick1 is more than 256 chars + +TEST ! $CLI volume create $volname $H0:$B0/$brick; + +TEST $CLI volume info; + +# Now try to create a volume with couple of bricks (strlen(volname) = 128 & +# strlen(brick1) = 128 +# Command should still fail as strlen(volp path) > 256 + +volname="Volume-0" +brick="brick-00" +STR="12345678" + +for i in {1..15} +do + volname+=$STR + brick+=$STR +done +TEST ! $CLI volume create $volname $H0:$B0/$brick; + +TEST $CLI volume info; + +# test case with brick path as 255 and a trailing "/" +brick="" +STR1="12345678" +volname="vol" + +for i in {1..31} +do + brick+=$STR1 +done +brick+="123456/" + +echo $brick | wc -c +# Now $brick is exactly 255 chars, but at end a trailing space +# This will still fail as volfpath exceeds more than _POSIX_MAX chars + +TEST ! $CLI volume create $volname $H0:$B0/$brick; + +TEST $CLI volume info; + +# Positive test case +TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2}; + +TEST $CLI volume info; + +cleanup; diff --git a/tests/bugs/glusterd/bug-1087203.t b/tests/bugs/glusterd/bug-1087203.t new file mode 100644 index 00000000000..035be098576 --- /dev/null +++ b/tests/bugs/glusterd/bug-1087203.t @@ -0,0 +1,103 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc +. $(dirname $0)/../../snapshot.rc +. $(dirname $0)/../../cluster.rc + +function get_volume_info () +{ + local var=$1 + $CLI_1 volume info $V0 | grep "^$var" | sed 's/.*: //' +} + +cleanup; + +TEST verify_lvm_version +TEST launch_cluster 2 +TEST setup_lvm 2 + +TEST $CLI_1 peer probe $H2; +EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count; + +TEST $CLI_1 volume create $V0 $H1:$L1 $H2:$L2 +EXPECT "$V0" get_volume_info 'Volume Name'; +EXPECT 'Created' get_volume_info 'Status'; + +TEST $CLI_1 volume start $V0 +EXPECT 'Started' get_volume_info 'Status'; + + +# Setting system limit +TEST $CLI_1 snapshot config snap-max-hard-limit 100 + +# Volume limit cannot exceed system limit, as limit is set to 100, +# this should fail. +TEST ! $CLI_1 snapshot config $V0 snap-max-hard-limit 101 + +# Following are the invalid cases +TEST ! $CLI_1 snapshot config $V0 snap-max-hard-limit a10 +TEST ! $CLI_1 snapshot config snap-max-hard-limit 10a +TEST ! $CLI_1 snapshot config snap-max-hard-limit 10% +TEST ! $CLI_1 snapshot config snap-max-soft-limit 50%1 +TEST ! $CLI_1 snapshot config snap-max-soft-limit 0111 +TEST ! $CLI_1 snapshot config snap-max-hard-limit OXA +TEST ! $CLI_1 snapshot config snap-max-hard-limit 11.11 +TEST ! $CLI_1 snapshot config snap-max-soft-limit 50% +TEST ! $CLI_1 snapshot config snap-max-hard-limit -100 +TEST ! $CLI_1 snapshot config snap-max-soft-limit -90 + +# Soft limit cannot be assigned to volume +TEST ! $CLI_1 snapshot config $V0 snap-max-soft-limit 10 + +# Valid case +TEST $CLI_1 snapshot config snap-max-soft-limit 50 +TEST $CLI_1 snapshot config $V0 snap-max-hard-limit 10 + +# Validating auto-delete feature +# Make sure auto-delete is disabled by default +EXPECT 'disable' snap_config CLI_1 'auto-delete' + +# Test for invalid value for auto-delete +TEST ! $CLI_1 snapshot config auto-delete test + +TEST $CLI_1 snapshot config snap-max-hard-limit 6 +TEST $CLI_1 snapshot config snap-max-soft-limit 50 + +# Create 4 snapshots +snap_index=1 +snap_count=4 +TEST snap_create CLI_1 $V0 $snap_index $snap_count + +# If auto-delete is disabled then oldest snapshot +# should not be deleted automatically. +EXPECT '4' get_snap_count CLI_1; + +TEST snap_delete CLI_1 $snap_index $snap_count; + +# After all those 4 snaps are deleted, There will not be any snaps present +EXPECT '0' get_snap_count CLI_1; + +TEST $CLI_1 snapshot config auto-delete enable + +# auto-delete is already enabled, Hence expect a failure. +TEST ! $CLI_1 snapshot config auto-delete on + +# Testing other boolean values with auto-delete +TEST $CLI_1 snapshot config auto-delete off +EXPECT 'off' snap_config CLI_1 'auto-delete' + +TEST $CLI_1 snapshot config auto-delete true +EXPECT 'true' snap_config CLI_1 'auto-delete' + +# Try to create 4 snaps again, As auto-delete is enabled +# oldest snap should be deleted and snapcount should be 3 + +TEST snap_create CLI_1 $V0 $snap_index $snap_count; +EXPECT '3' get_snap_count CLI_1; + +TEST $CLI_1 snapshot config auto-delete disable +EXPECT 'disable' snap_config CLI_1 'auto-delete' + +cleanup; + diff --git a/tests/bugs/glusterd/bug-1089668.t b/tests/bugs/glusterd/bug-1089668.t new file mode 100755 index 00000000000..f2b99bf6051 --- /dev/null +++ b/tests/bugs/glusterd/bug-1089668.t @@ -0,0 +1,27 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc +. $(dirname $0)/../../dht.rc + +cleanup + +#This script checks command "gluster volume rebalance status will not +#show any output when user have done only remove-brick start and command +#'gluster volume remove-brick status' will not show +#any output when user have triggered only rebalance start. + +TEST glusterd +TEST pidof glusterd +TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2} +TEST $CLI volume start $V0 + +TEST $CLI volume rebalance $V0 start +TEST ! $CLI volume remove-brick $V0 $H0:$B0/${V0}1 status + +TEST $CLI volume rebalance $V0 stop + +TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}1 start +TEST ! $CLI volume rebalance $V0 status + +cleanup diff --git a/tests/bugs/glusterd/bug-1090042.t b/tests/bugs/glusterd/bug-1090042.t new file mode 100755 index 00000000000..b4df8e6cebe --- /dev/null +++ b/tests/bugs/glusterd/bug-1090042.t @@ -0,0 +1,30 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc +. $(dirname $0)/../../snapshot.rc + +cleanup; + +TEST init_n_bricks 3; +TEST setup_lvm 3; +TEST glusterd; + +TEST $CLI volume create $V0 replica 3 $H0:$L1 $H0:$L2 $H0:$L3; +TEST $CLI volume start $V0; + +TEST kill_brick $V0 $H0 $L1; + +#Normal snap create should fail +TEST ! $CLI snapshot create ${V0}_snap1 $V0; +TEST ! snapshot_exists 0 ${V0}_snap1; + +#Force snap create should succeed +TEST $CLI snapshot create ${V0}_snap1 $V0 force; +TEST snapshot_exists 0 ${V0}_snap1; + +#Delete the created snap +TEST $CLI snapshot delete ${V0}_snap1; +TEST ! snapshot_exists 0 ${V0}_snap1; + +cleanup; diff --git a/tests/bugs/glusterd/bug-1091935-brick-order-check-from-cli-to-glusterd.t b/tests/bugs/glusterd/bug-1091935-brick-order-check-from-cli-to-glusterd.t new file mode 100755 index 00000000000..01cc5b56097 --- /dev/null +++ b/tests/bugs/glusterd/bug-1091935-brick-order-check-from-cli-to-glusterd.t @@ -0,0 +1,27 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +cleanup; + +TEST glusterd +TEST pidof glusterd + +cli1=$(echo $CLI | sed 's/ --wignore//') + +# Creating volume with non resolvable host name +TEST ! $cli1 volume create $V0 replica 2 $H0:$B0/${V0}0 redhat:$B0/${V0}1 \ + $H0:$B0/${V0}2 redhat:$B0/${V0}3 + +# Creating distribute-replica volume with bad brick order. It will fail +# due to bad brick order. +TEST ! $cli1 volume create $V0 replica 2 $H0:$B0/${V0}0 $H0:$B0/${V0}1 \ + $H0:$B0/${V0}2 $H0:$B0/${V0}3 + +# Now with force at the end of command it will bypass brick-order check +# for replicate or distribute-replicate volume. and it will create volume +TEST $cli1 volume create $V0 replica 2 $H0:$B0/${V0}0 $H0:$B0/${V0}1 \ + $H0:$B0/${V0}2 $H0:$B0/${V0}3 force + +cleanup; diff --git a/tests/bugs/glusterd/bug-1092841.t b/tests/bugs/glusterd/bug-1092841.t new file mode 100644 index 00000000000..d3dcf07fd02 --- /dev/null +++ b/tests/bugs/glusterd/bug-1092841.t @@ -0,0 +1,24 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +cleanup; + +TEST glusterd; +TEST pidof glusterd; +TEST $CLI volume info; + +TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2}; + +TEST $CLI volume start $V0; + +TEST $CLI volume barrier $V0 enable; + +TEST ! $CLI volume barrier $V0 enable; + +TEST $CLI volume barrier $V0 disable; + +TEST ! $CLI volume barrier $V0 disable; + +cleanup diff --git a/tests/bugs/glusterd/bug-1095097.t b/tests/bugs/glusterd/bug-1095097.t new file mode 100755 index 00000000000..0fe29f06630 --- /dev/null +++ b/tests/bugs/glusterd/bug-1095097.t @@ -0,0 +1,21 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc + +cleanup; +TEST glusterd; +TEST pidof glusterd; +TEST $CLI volume info; + +TEST $CLI volume create $V0 $H0:$B0/brick1 $H0:$B1/brick1; +EXPECT 'Created' volinfo_field $V0 'Status'; + +TEST $CLI volume start $V0; +EXPECT 'Started' volinfo_field $V0 'Status'; + +TEST $CLI volume profile $V0 start +TEST $CLI volume profile $V0 info +TEST $CLI volume replace-brick $V0 $H0:$B0/brick1 $H0:$B0/brick2 start +TEST $CLI volume replace-brick $V0 $H0:$B0/brick1 $H0:$B0/brick2 status + +cleanup; diff --git a/tests/bugs/glusterd/bug-1102656.t b/tests/bugs/glusterd/bug-1102656.t new file mode 100644 index 00000000000..e80f4930a63 --- /dev/null +++ b/tests/bugs/glusterd/bug-1102656.t @@ -0,0 +1,20 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc + +cleanup; + +TEST glusterd +TEST pidof glusterd +TEST $CLI volume create $V0 replica 2 $H0:$B0/brick0 $H0:$B0/brick1 +TEST $CLI volume start $V0 +EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'Started' volinfo_field $V0 'Status'; + +TEST $CLI volume top $V0 open +TEST ! $CLI volume top $V0 open brick $H0:/tmp/brick +TEST $CLI volume top $V0 read + +TEST $CLI volume status +TEST $CLI volume stop $V0 +EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'Stopped' volinfo_field $V0 'Status'; +cleanup; diff --git a/tests/bugs/glusterd/bug-1104642.t b/tests/bugs/glusterd/bug-1104642.t new file mode 100644 index 00000000000..a45a617d235 --- /dev/null +++ b/tests/bugs/glusterd/bug-1104642.t @@ -0,0 +1,47 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc +. $(dirname $0)/../../cluster.rc + + +function get_value() +{ + local key=$1 + local var="CLI_$2" + + eval cli_index=\$$var + + $cli_index volume info | grep "^$key"\ + | sed 's/.*: //' +} + +cleanup + +TEST launch_cluster 2 + +TEST $CLI_1 peer probe $H2; +EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count + +TEST $CLI_1 volume create $V0 $H1:$B1/${V0}0 $H2:$B2/${V0}1 +EXPECT "$V0" get_value 'Volume Name' 1 +EXPECT "Created" get_value 'Status' 1 + +TEST $CLI_1 volume start $V0 +EXPECT "Started" get_value 'Status' 1 + +#Bring down 2nd glusterd +TEST kill_glusterd 2 + +#set the volume all options from the 1st glusterd +TEST $CLI_1 volume set all cluster.server-quorum-ratio 80 + +#Bring back the 2nd glusterd +TEST $glusterd_2 + +#Verify whether the value has been synced +EXPECT '80' get_value 'cluster.server-quorum-ratio' 1 +EXPECT_WITHIN $PROBE_TIMEOUT '1' peer_count +EXPECT '80' get_value 'cluster.server-quorum-ratio' 2 + +cleanup; diff --git a/tests/bugs/glusterd/bug-1109741-auth-mgmt-handshake.t b/tests/bugs/glusterd/bug-1109741-auth-mgmt-handshake.t new file mode 100644 index 00000000000..561b90740fa --- /dev/null +++ b/tests/bugs/glusterd/bug-1109741-auth-mgmt-handshake.t @@ -0,0 +1,50 @@ +#! /bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../cluster.rc + +# The test will attempt to verify that management handshake requests to +# GlusterD are authenticated before being allowed to change a GlusterD's +# op-version +# +# 1. Launch 3 glusterds +# 2. Probe 2 of them to form a cluster. This should succeed. +# 3. Probe either of the first two GlusterD's from the 3rd GlusterD. This should fail. +# 4. a. Reduce the op-version of 3rd GlusterD and restart it. +# b. Probe either of the first two GlusterD's from the 3rd GlusterD. This should fail. +# 5. Check current op-version of first two GlusterDs. It shouldn't have changed. +# 6. Probe third GlusterD from the cluster. This should succeed. + + +cleanup + +TEST launch_cluster 3 + +TEST $CLI_1 peer probe $H2 + +TEST ! $CLI_3 peer probe $H1 + +GD1_WD=$($CLI_1 system getwd) +OP_VERS_ORIG=$(grep 'operating-version' ${GD1_WD}/glusterd.info | cut -d '=' -f 2) + +TEST $CLI_3 system uuid get # Needed for glusterd.info to be created + +GD3_WD=$($CLI_3 system getwd) +TEST sed -rnie "'s/(operating-version=)\w+/\130600/gip'" ${GD3_WD}/glusterd.info + +TEST kill_glusterd 3 +TEST start_glusterd 3 + +TEST ! $CLI_3 peer probe $H1 + +OP_VERS_NEW=$(grep 'operating-version' ${GD1_WD}/glusterd.info | cut -d '=' -f 2) +TEST [[ $OP_VERS_ORIG == $OP_VERS_NEW ]] + +TEST $CLI_1 peer probe $H3 + +kill_node 1 +kill_node 2 +kill_node 3 + +cleanup; + diff --git a/tests/bugs/glusterd/bug-1109770.t b/tests/bugs/glusterd/bug-1109770.t new file mode 100644 index 00000000000..eca4969f2e3 --- /dev/null +++ b/tests/bugs/glusterd/bug-1109770.t @@ -0,0 +1,65 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc +. $(dirname $0)/../../snapshot.rc +. $(dirname $0)/../../fileio.rc +. $(dirname $0)/../../nfs.rc + +cleanup; + +TEST init_n_bricks 3; +TEST setup_lvm 3; + +TEST glusterd; + +TEST pidof glusterd; + +TEST $CLI volume create $V0 $H0:$L1 $H0:$L2 $H0:$L3; + +TEST $CLI volume start $V0; + +TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 $M0; + +for i in {1..10} ; do echo "file" > $M0/file$i ; done + +TEST $CLI snapshot create snap1 $V0; + +for i in {11..20} ; do echo "file" > $M0/file$i ; done + +TEST $CLI snapshot create snap2 $V0; + +mkdir $M0/dir1; +mkdir $M0/dir2; + +for i in {1..10} ; do echo "foo" > $M0/dir1/foo$i ; done +for i in {1..10} ; do echo "foo" > $M0/dir2/foo$i ; done + +TEST $CLI snapshot create snap3 $V0; + +for i in {11..20} ; do echo "foo" > $M0/dir1/foo$i ; done +for i in {11..20} ; do echo "foo" > $M0/dir2/foo$i ; done + +TEST $CLI snapshot create snap4 $V0; + +TEST $CLI volume set $V0 features.uss enable; + +EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'Y' check_if_snapd_exist + +TEST $CLI volume set $V0 features.uss disable; + +SNAPD_PID=$(ps auxww | grep snapd | grep -v grep | awk '{print $2}'); + +TEST ! [ $SNAPD_PID -gt 0 ]; + +TEST $CLI volume set $V0 features.uss enable; + +EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'Y' check_if_snapd_exist + +TEST $CLI volume stop $V0; + +SNAPD_PID=$(ps auxww | grep snapd | grep -v grep | awk '{print $2}'); + +TEST ! [ $SNAPD_PID -gt 0 ]; + +cleanup ; diff --git a/tests/bugs/glusterd/bug-1109889.t b/tests/bugs/glusterd/bug-1109889.t new file mode 100644 index 00000000000..eac5ac17f5b --- /dev/null +++ b/tests/bugs/glusterd/bug-1109889.t @@ -0,0 +1,74 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc +. $(dirname $0)/../../snapshot.rc +. $(dirname $0)/../../fileio.rc +. $(dirname $0)/../../nfs.rc + +cleanup; + +TEST init_n_bricks 3; +TEST setup_lvm 3; + +TEST glusterd; + +TEST pidof glusterd; + +TEST $CLI volume create $V0 $H0:$L1 $H0:$L2 $H0:$L3; + +TEST $CLI volume start $V0; + +TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 $M0; + +MOUNT_PID=`ps ax |grep "glusterfs --volfile-sever $H0 --volfile-id=$V0 $M0" | grep -v grep | awk '{print $1}' | head -1` + +for i in {1..10} ; do echo "file" > $M0/file$i ; done + +TEST $CLI snapshot config activate-on-create enable + +TEST $CLI snapshot create snap1 $V0; + +for i in {11..20} ; do echo "file" > $M0/file$i ; done + +TEST $CLI snapshot create snap2 $V0; + +mkdir $M0/dir1; +mkdir $M0/dir2; + +for i in {1..10} ; do echo "foo" > $M0/dir1/foo$i ; done +for i in {1..10} ; do echo "foo" > $M0/dir2/foo$i ; done + +TEST $CLI snapshot create snap3 $V0; + +for i in {11..20} ; do echo "foo" > $M0/dir1/foo$i ; done +for i in {11..20} ; do echo "foo" > $M0/dir2/foo$i ; done + +TEST $CLI snapshot create snap4 $V0; + +TEST $CLI volume set $V0 features.uss enable; + +#let snapd get started properly and client connect to snapd +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" snap_client_connected_status $V0 + +SNAPD_PID=$(ps auxww | grep snapd | grep -v grep | awk '{print $2}'); + +TEST [ $SNAPD_PID -gt 0 ]; + +TEST stat $M0/.snaps; + +kill -KILL $SNAPD_PID; + +# let snapd die properly +EXPECT_WITHIN $CHILD_UP_TIMEOUT "0" snap_client_connected_status $V0 + +TEST ! stat $M0/.snaps; + +TEST $CLI volume start $V0 force; + +# let client get the snapd port from glusterd and connect +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" snap_client_connected_status $V0 + +TEST stat $M0/.snaps; + +cleanup; diff --git a/tests/bugs/glusterd/bug-1111041.t b/tests/bugs/glusterd/bug-1111041.t new file mode 100644 index 00000000000..9e72c50990d --- /dev/null +++ b/tests/bugs/glusterd/bug-1111041.t @@ -0,0 +1,36 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc +. $(dirname $0)/../../fileio.rc +. $(dirname $0)/../../nfs.rc + +cleanup; + +function is_snapd_running { + $CLI volume status $1 | grep "Snapshot Daemon" | wc -l; +} + +TEST glusterd; + +TEST pidof glusterd; + +TEST $CLI volume create $V0 $H0:$B0/${V0}0 $H0:$B0/${V0}1 + +TEST $CLI volume start $V0; + +EXPECT "0" is_snapd_running $v0 + +TEST $CLI volume set $V0 features.uss enable; + +EXPECT "1" is_snapd_running $V0 + +SNAPD_PID=$(ps auxww | grep snapd | grep -v grep | awk '{print $2}'); + +TEST [ $SNAPD_PID -gt 0 ]; + +SNAPD_PID2=$($CLI volume status $V0 | grep "Snapshot Daemon" | awk {'print $7'}); + +TEST [ $SNAPD_PID -eq $SNAPD_PID2 ] + +cleanup ; diff --git a/tests/bugs/glusterd/bug-1112559.t b/tests/bugs/glusterd/bug-1112559.t new file mode 100755 index 00000000000..f318db61b8a --- /dev/null +++ b/tests/bugs/glusterd/bug-1112559.t @@ -0,0 +1,61 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../cluster.rc +. $(dirname $0)/../../volume.rc +. $(dirname $0)/../../snapshot.rc + +function check_peers { + $CLI_1 peer status | grep 'Peer in Cluster (Connected)' | wc -l +} + +function check_snaps_status { + $CLI_1 snapshot status | grep 'Snap Name : ' | wc -l +} + +function check_snaps_bricks_health { + $CLI_1 snapshot status | grep 'Brick Running : Yes' | wc -l +} + + +SNAP_COMMAND_TIMEOUT=40 +NUMBER_OF_BRICKS=2 + +cleanup; +TEST verify_lvm_version +TEST launch_cluster 3 +TEST setup_lvm 3 + +TEST $CLI_1 peer probe $H2 +EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count + +TEST $CLI_1 volume create $V0 $H1:$L1 $H2:$L2 + +TEST $CLI_1 volume start $V0 + +#Create snapshot and add a peer together +$CLI_1 snapshot create ${V0}_snap1 ${V0} & +PID_1=$! +$CLI_1 peer probe $H3 +wait $PID_1 + +#Snapshot should be created and in the snaplist +TEST snapshot_exists 1 ${V0}_snap1 + +#Not being paranoid! Just checking for the status of the snapshot +#During the testing of the bug the snapshot would list but actually +#not be created.Therefore check for health of the snapshot +EXPECT_WITHIN $SNAP_COMMAND_TIMEOUT 1 check_snaps_status + +#Disabling the checking of snap brick status , Will continue investigation +#on the failure of the snapbrick port bind issue. +#EXPECT_WITHIN $SNAP_COMMAND_TIMEOUT $NUMBER_OF_BRICKS check_snaps_bricks_health + +#check if the peer is added successfully +EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count + +TEST $CLI_1 snapshot delete ${V0}_snap1 + +cleanup; + + diff --git a/tests/bugs/glusterd/bug-1112613.t b/tests/bugs/glusterd/bug-1112613.t new file mode 100644 index 00000000000..e566de056bc --- /dev/null +++ b/tests/bugs/glusterd/bug-1112613.t @@ -0,0 +1,49 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc +. $(dirname $0)/../../snapshot.rc +. $(dirname $0)/../../cluster.rc + +cleanup; + +V1="patchy2" + +TEST verify_lvm_version; +TEST launch_cluster 2 +TEST setup_lvm 2 + +TEST $CLI_1 peer probe $H2 +EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count + +TEST $CLI_1 volume create $V0 $H1:$L1 +TEST $CLI_1 volume start $V0 +TEST $CLI_1 volume create $V1 $H2:$L2 +TEST $CLI_1 volume start $V1 + +# Create 3 snapshots for volume $V0 +snap_count=3 +snap_index=1 +TEST snap_create CLI_1 $V0 $snap_index $snap_count; + +# Create 3 snapshots for volume $V1 +snap_count=4 +snap_index=11 +TEST snap_create CLI_1 $V1 $snap_index $snap_count; + +EXPECT '3' get_snap_count CLI_1 $V0; +EXPECT '4' get_snap_count CLI_1 $V1; +EXPECT '7' get_snap_count CLI_1 + +TEST $CLI_1 snapshot delete volume $V0 +EXPECT '0' get_snap_count CLI_1 $V0; +EXPECT '4' get_snap_count CLI_1 $V1; +EXPECT '4' get_snap_count CLI_1 + +TEST $CLI_1 snapshot delete all +EXPECT '0' get_snap_count CLI_1 $V0; +EXPECT '0' get_snap_count CLI_1 $V1; +EXPECT '0' get_snap_count CLI_1 + +cleanup; + diff --git a/tests/bugs/glusterd/bug-1113975.t b/tests/bugs/glusterd/bug-1113975.t new file mode 100644 index 00000000000..c1b9b1e3e2c --- /dev/null +++ b/tests/bugs/glusterd/bug-1113975.t @@ -0,0 +1,38 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc +. $(dirname $0)/../../snapshot.rc + +cleanup; + +TEST init_n_bricks 3; +TEST setup_lvm 3; + +TEST glusterd; + +TEST pidof glusterd; + +TEST $CLI volume create $V0 $H0:$L1 $H0:$L2 $H0:$L3; + +TEST $CLI volume start $V0; + +TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 $M0; + +for i in {1..10} ; do echo "file" > $M0/file$i ; done + +TEST $CLI snapshot create snap1 $V0; + +for i in {11..20} ; do echo "file" > $M0/file$i ; done + +TEST $CLI snapshot create snap2 $V0; + +TEST $CLI volume stop $V0 + +TEST $CLI snapshot restore snap1; + +TEST $CLI snapshot restore snap2; + +TEST $CLI volume start $V0 + +cleanup ; diff --git a/tests/bugs/glusterd/bug-1120647.t b/tests/bugs/glusterd/bug-1120647.t new file mode 100644 index 00000000000..0223f460398 --- /dev/null +++ b/tests/bugs/glusterd/bug-1120647.t @@ -0,0 +1,17 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +cleanup; + +TEST glusterd +TEST pidof glusterd +TEST $CLI volume create $V0 replica 2 $H0:$B0/brick{1..4} +TEST $CLI volume start $V0 +TEST $CLI volume remove-brick $V0 $H0:$B0/brick{3..4} start +EXPECT_WITHIN 10 "completed" remove_brick_status_completed_field "$V0 $H0:$B0/brick{3..4}" +TEST $CLI volume remove-brick $V0 $H0:$B0/brick{3..4} commit +TEST $CLI volume remove-brick $V0 replica 1 $H0:$B0/brick2 force + +cleanup; diff --git a/tests/bugs/glusterd/bug-1140162-file-snapshot-and-features-encryption-option-validation.t b/tests/bugs/glusterd/bug-1140162-file-snapshot-and-features-encryption-option-validation.t new file mode 100644 index 00000000000..f91093db4e7 --- /dev/null +++ b/tests/bugs/glusterd/bug-1140162-file-snapshot-and-features-encryption-option-validation.t @@ -0,0 +1,33 @@ +#!/bin/bash + +## Test case for BZ-1140160 Volume option set and +## command input should validate correctly. + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +cleanup; + +## Start glusterd +TEST glusterd; +TEST pidof glusterd; + +## Lets create and start volume +TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2}; +TEST $CLI volume start $V0 + +## Set features.file-snapshot and features.encryption option with non-boolean +## value. These options should fail. +TEST ! $CLI volume set $V0 features.file-snapshot abcd +TEST ! $CLI volume set $V0 features.encryption redhat + +## Set other options with valid value. These options should succeed. +TEST $CLI volume set $V0 barrier enable +TEST $CLI volume set $V0 ping-timeout 60 + +## Set features.file-snapshot and features.encryption option with valid boolean +## value. These options should succeed. +TEST $CLI volume set $V0 features.file-snapshot on +TEST $CLI volume set $V0 features.encryption on + +cleanup; diff --git a/tests/bugs/glusterd/bug-1173414-mgmt-v3-remote-lock-failure.t b/tests/bugs/glusterd/bug-1173414-mgmt-v3-remote-lock-failure.t new file mode 100755 index 00000000000..5a6cf81fd53 --- /dev/null +++ b/tests/bugs/glusterd/bug-1173414-mgmt-v3-remote-lock-failure.t @@ -0,0 +1,34 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../cluster.rc + +function check_peers { + $CLI_1 peer status | grep 'Peer in Cluster (Connected)' | wc -l +} + +cleanup; + +TEST launch_cluster 2; +TEST $CLI_1 peer probe $H2; + +EXPECT_WITHIN $PROBE_TIMEOUT 1 check_peers + +TEST $CLI_1 volume create $V0 $H1:$B1/$V0 +TEST $CLI_1 volume create $V1 $H1:$B1/$V1 +TEST $CLI_1 volume start $V0 +TEST $CLI_1 volume start $V1 + +for i in {1..20} +do + $CLI_1 volume set $V0 diagnostics.client-log-level DEBUG & + $CLI_1 volume set $V1 barrier on + $CLI_2 volume set $V0 diagnostics.client-log-level DEBUG & + $CLI_2 volume set $V1 barrier on +done + +EXPECT_WITHIN $PROBE_TIMEOUT 1 check_peers +TEST $CLI_1 volume status +TEST $CLI_2 volume status + +cleanup; diff --git a/tests/bugs/glusterd/bug-765230.t b/tests/bugs/glusterd/bug-765230.t new file mode 100755 index 00000000000..e0b9608d728 --- /dev/null +++ b/tests/bugs/glusterd/bug-765230.t @@ -0,0 +1,60 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +cleanup; + +## Start and create a volume +TEST glusterd; +TEST pidof glusterd; +TEST $CLI volume create $V0 replica 2 stripe 2 $H0:$B0/${V0}{1,2,3,4,5,6,7,8}; + +## Verify volume is created +EXPECT "$V0" volinfo_field $V0 'Volume Name'; +EXPECT 'Created' volinfo_field $V0 'Status'; + +## Start volume and verify +TEST $CLI volume start $V0; +EXPECT 'Started' volinfo_field $V0 'Status'; + +## Setting quota-timeout as 20 +TEST ! $CLI volume set $V0 features.quota-timeout 20 +EXPECT '' volinfo_field $V0 'features.quota-timeout'; + +## Enabling features.quota-deem-statfs +TEST ! $CLI volume set $V0 features.quota-deem-statfs on +EXPECT '' volinfo_field $V0 'features.quota-deem-statfs' + +## Enabling quota +TEST $CLI volume quota $V0 enable +EXPECT 'on' volinfo_field $V0 'features.quota' + +## Setting quota-timeout as 20 +TEST $CLI volume set $V0 features.quota-timeout 20 +EXPECT '20' volinfo_field $V0 'features.quota-timeout'; + +## Enabling features.quota-deem-statfs +TEST $CLI volume set $V0 features.quota-deem-statfs on +EXPECT 'on' volinfo_field $V0 'features.quota-deem-statfs' + +## Disabling quota +TEST $CLI volume quota $V0 disable +EXPECT 'off' volinfo_field $V0 'features.quota' + +## Setting quota-timeout as 30 +TEST ! $CLI volume set $V0 features.quota-timeout 30 +EXPECT '20' volinfo_field $V0 'features.quota-timeout'; + +## Disabling features.quota-deem-statfs +TEST ! $CLI volume set $V0 features.quota-deem-statfs off +EXPECT 'on' volinfo_field $V0 'features.quota-deem-statfs' + +## Finish up +TEST $CLI volume stop $V0; +EXPECT 'Stopped' volinfo_field $V0 'Status'; + +TEST $CLI volume delete $V0; +TEST ! $CLI volume info $V0; + +cleanup; diff --git a/tests/bugs/glusterd/bug-782095.t b/tests/bugs/glusterd/bug-782095.t new file mode 100755 index 00000000000..dd8a8dc3026 --- /dev/null +++ b/tests/bugs/glusterd/bug-782095.t @@ -0,0 +1,48 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +cleanup; + +## Start and create a volume +TEST glusterd; +TEST pidof glusterd; +TEST $CLI volume create $V0 replica 2 stripe 2 $H0:$B0/${V0}{1,2,3,4,5,6,7,8}; + +## Verify volume is is created +EXPECT "$V0" volinfo_field $V0 'Volume Name'; +EXPECT 'Created' volinfo_field $V0 'Status'; + +## Start volume and verify +TEST $CLI volume start $V0; +EXPECT 'Started' volinfo_field $V0 'Status'; + +## Setting performance cache min size as 2MB +TEST $CLI volume set $V0 performance.cache-min-file-size 2MB +EXPECT '2MB' volinfo_field $V0 'performance.cache-min-file-size'; + +## Setting performance cache max size as 20MB +TEST $CLI volume set $V0 performance.cache-max-file-size 20MB +EXPECT '20MB' volinfo_field $V0 'performance.cache-max-file-size'; + +## Trying to set performance cache min size as 25MB +TEST ! $CLI volume set $V0 performance.cache-min-file-size 25MB +EXPECT '2MB' volinfo_field $V0 'performance.cache-min-file-size'; + +## Able to set performance cache min size as long as its lesser than max size +TEST $CLI volume set $V0 performance.cache-min-file-size 15MB +EXPECT '15MB' volinfo_field $V0 'performance.cache-min-file-size'; + +## Trying it out with only cache-max-file-size in CLI as 10MB +TEST ! $CLI volume set $V0 cache-max-file-size 10MB +EXPECT '20MB' volinfo_field $V0 'performance.cache-max-file-size'; + +## Finish up +TEST $CLI volume stop $V0; +EXPECT 'Stopped' volinfo_field $V0 'Status'; + +TEST $CLI volume delete $V0; +TEST ! $CLI volume info $V0; + +cleanup; diff --git a/tests/bugs/glusterd/bug-824753-file-locker.c b/tests/bugs/glusterd/bug-824753-file-locker.c new file mode 100644 index 00000000000..ea8a7630e81 --- /dev/null +++ b/tests/bugs/glusterd/bug-824753-file-locker.c @@ -0,0 +1,42 @@ +#include +#include +#include + +int main (int argc, char *argv[]) +{ + int fd = -1; + int ret = -1; + char command[2048] = ""; + char filepath[255] = ""; + struct flock fl; + + fl.l_type = F_WRLCK; + fl.l_whence = SEEK_SET; + fl.l_start = 7; + fl.l_len = 1; + fl.l_pid = getpid(); + + snprintf(filepath, 255, "%s/%s", argv[4], argv[5]); + + fd = open(filepath, O_RDWR); + + if (fd == -1) + return -1; + + if (fcntl(fd, F_SETLKW, &fl) == -1) { + return -1; + } + + snprintf(command, sizeof(command), + "gluster volume clear-locks %s /%s kind all posix 0,7-1 |" + " grep %s | awk -F'..: ' '{print $1}' | grep %s:%s/%s", + argv[1], argv[5], argv[2], argv[2], argv[3], argv[1]); + + ret = system (command); + close(fd); + + if (ret) + return -1; + else + return 0; +} diff --git a/tests/bugs/glusterd/bug-824753.t b/tests/bugs/glusterd/bug-824753.t new file mode 100755 index 00000000000..2ce4a07c5bd --- /dev/null +++ b/tests/bugs/glusterd/bug-824753.t @@ -0,0 +1,45 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc + +cleanup; + +## Start and create a volume +TEST glusterd; +TEST pidof glusterd; +TEST $CLI volume info; + +TEST $CLI volume create $V0 replica 2 stripe 2 $H0:$B0/${V0}{1,2,3,4,5,6,7,8}; + +function volinfo_field() +{ + local vol=$1; + local field=$2; + + $CLI volume info $vol | grep "^$field: " | sed 's/.*: //'; +} + +## Verify volume is is created +EXPECT "$V0" volinfo_field $V0 'Volume Name'; +EXPECT 'Created' volinfo_field $V0 'Status'; + +## Start volume and verify +TEST $CLI volume start $V0; +EXPECT 'Started' volinfo_field $V0 'Status'; + +TEST glusterfs -s $H0 --volfile-id=$V0 $M0 +touch $M0/file1; + +TEST $CC -g $(dirname $0)/bug-824753-file-locker.c -o $(dirname $0)/file-locker + +TEST $(dirname $0)/file-locker $V0 $H0 $B0 $M0 file1 + +## Finish up +TEST rm -f $(dirname $0)/file-locker +TEST $CLI volume stop $V0; +EXPECT 'Stopped' volinfo_field $V0 'Status'; + +TEST $CLI volume delete $V0; +TEST ! $CLI volume info $V0; + +cleanup; diff --git a/tests/bugs/glusterd/bug-839595.t b/tests/bugs/glusterd/bug-839595.t new file mode 100644 index 00000000000..b2fe9789a8c --- /dev/null +++ b/tests/bugs/glusterd/bug-839595.t @@ -0,0 +1,31 @@ +#!/bin/bash +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +cleanup; + +TEST glusterd +TEST pidof glusterd + +TEST $CLI volume create $V0 $H0:$B0/${V0}1 +TEST $CLI volume set $V0 cluster.server-quorum-type server +EXPECT "server" volume_option $V0 cluster.server-quorum-type +TEST $CLI volume set $V0 cluster.server-quorum-type none +EXPECT "none" volume_option $V0 cluster.server-quorum-type +TEST $CLI volume reset $V0 cluster.server-quorum-type +TEST ! $CLI volume set $V0 cluster.server-quorum-type abc +TEST ! $CLI volume set all cluster.server-quorum-type none +TEST ! $CLI volume set $V0 cluster.server-quorum-ratio 100 + +TEST ! $CLI volume set all cluster.server-quorum-ratio abc +TEST ! $CLI volume set all cluster.server-quorum-ratio -1 +TEST ! $CLI volume set all cluster.server-quorum-ratio 100.0000005 +TEST $CLI volume set all cluster.server-quorum-ratio 0 +EXPECT "0" volume_option $V0 cluster.server-quorum-ratio +TEST $CLI volume set all cluster.server-quorum-ratio 100 +EXPECT "100" volume_option $V0 cluster.server-quorum-ratio +TEST $CLI volume set all cluster.server-quorum-ratio 0.0000005 +EXPECT "0.0000005" volume_option $V0 cluster.server-quorum-ratio +TEST $CLI volume set all cluster.server-quorum-ratio 100% +EXPECT "100%" volume_option $V0 cluster.server-quorum-ratio +cleanup; diff --git a/tests/bugs/glusterd/bug-857330/common.rc b/tests/bugs/glusterd/bug-857330/common.rc new file mode 100644 index 00000000000..8342dccb442 --- /dev/null +++ b/tests/bugs/glusterd/bug-857330/common.rc @@ -0,0 +1,55 @@ +. $(dirname $0)/../../../include.rc + +UUID_REGEX='[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}' + +TASK_ID="" +COMMAND="" +PATTERN="" + +function check-and-store-task-id() +{ + TASK_ID="" + + local task_id=$($CLI $COMMAND | grep $PATTERN | grep -o -E "$UUID_REGEX") + + if [ -z "$task_id" ] && [ "${task_id+asdf}" = "asdf" ]; then + return 1 + fi + + TASK_ID=$task_id + return 0; +} + +function get-task-id() +{ + $CLI $COMMAND | grep $PATTERN | grep -o -E "$UUID_REGEX" | tail -n1 + +} + +function check-and-store-task-id-xml() +{ + TASK_ID="" + + local task_id=$($CLI $COMMAND --xml | xmllint --format - | grep $PATTERN | grep -o -E "$UUID_REGEX") + + if [ -z "$task_id" ] && [ "${task_id+asdf}" = "asdf" ]; then + return 1 + fi + + TASK_ID=$task_id + return 0; +} + +function get-task-id-xml() +{ + $CLI $COMMAND --xml | xmllint --format - | grep $PATTERN | grep -o -E "$UUID_REGEX" +} + +function get-task-status() +{ + $CLI $COMMAND | grep -o $PATTERN + if [ ${PIPESTATUS[0]} -ne 0 ]; then + return 1 + fi + return 0 +} diff --git a/tests/bugs/glusterd/bug-857330/normal.t b/tests/bugs/glusterd/bug-857330/normal.t new file mode 100755 index 00000000000..02018f244a8 --- /dev/null +++ b/tests/bugs/glusterd/bug-857330/normal.t @@ -0,0 +1,79 @@ +#!/bin/bash + +. $(dirname $0)/common.rc +. $(dirname $0)/../../../volume.rc +cleanup; + +TEST glusterd +TEST pidof glusterd +TEST $CLI volume info; + +TEST $CLI volume create $V0 $H0:$B0/${V0}1; +TEST $CLI volume info $V0; +TEST $CLI volume start $V0; + +TEST glusterfs -s $H0 --volfile-id=$V0 $M0; + +TEST $PYTHON $(dirname $0)/../../../utils/create-files.py \ + --multi -b 10 -d 10 -n 10 $M0; + +EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0 + +############### +## Rebalance ## +############### +TEST $CLI volume add-brick $V0 $H0:$B0/${V0}2; + +COMMAND="volume rebalance $V0 start" +PATTERN="ID:" +TEST check-and-store-task-id + +COMMAND="volume status $V0" +PATTERN="ID" +EXPECT $TASK_ID get-task-id + +COMMAND="volume rebalance $V0 status" +PATTERN="completed" +EXPECT_WITHIN 300 $PATTERN get-task-status + +################### +## Replace-brick ## +################### +REP_BRICK_PAIR="$H0:$B0/${V0}2 $H0:$B0/${V0}3" + +COMMAND="volume replace-brick $V0 $REP_BRICK_PAIR start" +PATTERN="ID:" +TEST check-and-store-task-id + +COMMAND="volume status $V0" +PATTERN="ID" +EXPECT $TASK_ID get-task-id + +COMMAND="volume replace-brick $V0 $REP_BRICK_PAIR status" +PATTERN="complete" +EXPECT_WITHIN 300 $PATTERN get-task-status + +TEST $CLI volume replace-brick $V0 $REP_BRICK_PAIR commit; + +################## +## Remove-brick ## +################## +COMMAND="volume remove-brick $V0 $H0:$B0/${V0}3 start" +PATTERN="ID:" +TEST check-and-store-task-id + +COMMAND="volume status $V0" +PATTERN="ID" +EXPECT $TASK_ID get-task-id + +COMMAND="volume remove-brick $V0 $H0:$B0/${V0}3 status" +PATTERN="completed" +EXPECT_WITHIN 300 $PATTERN get-task-status + +TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}3 commit + +TEST $CLI volume stop $V0; +TEST $CLI volume delete $V0; +TEST ! $CLI volume info $V0; + +cleanup; diff --git a/tests/bugs/glusterd/bug-857330/xml.t b/tests/bugs/glusterd/bug-857330/xml.t new file mode 100755 index 00000000000..3aec3b89bbe --- /dev/null +++ b/tests/bugs/glusterd/bug-857330/xml.t @@ -0,0 +1,103 @@ +#!/bin/bash + +. $(dirname $0)/common.rc +. $(dirname $0)/../../../volume.rc + +cleanup; + +TEST glusterd +TEST pidof glusterd +TEST $CLI volume info; + +TEST $CLI volume create $V0 $H0:$B0/${V0}1; +TEST $CLI volume info $V0; +TEST $CLI volume start $V0; + +TEST glusterfs -s $H0 --volfile-id=$V0 $M0; + +TEST $PYTHON $(dirname $0)/../../../utils/create-files.py \ + --multi -b 10 -d 10 -n 10 $M0; + +EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0 + + +############### +## Rebalance ## +############### +TEST $CLI volume add-brick $V0 $H0:$B0/${V0}2; + +COMMAND="volume rebalance $V0 start" +PATTERN="task-id" +TEST check-and-store-task-id-xml + +COMMAND="volume status $V0" +PATTERN="id" +EXPECT $TASK_ID get-task-id-xml + +COMMAND="volume rebalance $V0 status" +PATTERN="task-id" +EXPECT $TASK_ID get-task-id-xml + +## TODO: Add tests for rebalance stop + +COMMAND="volume rebalance $V0 status" +PATTERN="completed" +EXPECT_WITHIN 300 $PATTERN get-task-status + +################### +## Replace-brick ## +################### +REP_BRICK_PAIR="$H0:$B0/${V0}2 $H0:$B0/${V0}3" + +COMMAND="volume replace-brick $V0 $REP_BRICK_PAIR start" +PATTERN="task-id" +TEST check-and-store-task-id-xml + +COMMAND="volume status $V0" +PATTERN="id" +EXPECT $TASK_ID get-task-id-xml + +COMMAND="volume replace-brick $V0 $REP_BRICK_PAIR status" +PATTERN="task-id" +EXPECT $TASK_ID get-task-id-xml + +## TODO: Add more tests for replace-brick pause|abort + +COMMAND="volume replace-brick $V0 $REP_BRICK_PAIR status" +PATTERN="complete" +EXPECT_WITHIN 300 $PATTERN get-task-status + +COMMAND="volume replace-brick $V0 $REP_BRICK_PAIR commit" +PATTERN="task-id" +EXPECT $TASK_ID get-task-id-xml + +################## +## Remove-brick ## +################## +COMMAND="volume remove-brick $V0 $H0:$B0/${V0}3 start" +PATTERN="task-id" +TEST check-and-store-task-id-xml + +COMMAND="volume status $V0" +PATTERN="id" +EXPECT $TASK_ID get-task-id-xml + +COMMAND="volume remove-brick $V0 $H0:$B0/${V0}3 status" +PATTERN="task-id" +EXPECT $TASK_ID get-task-id-xml + +COMMAND="volume remove-brick $V0 $H0:$B0/${V0}3 status" +PATTERN="completed" +EXPECT_WITHIN 300 $PATTERN get-task-status + +## TODO: Add tests for remove-brick stop + +COMMAND="volume remove-brick $V0 $H0:$B0/${V0}3 commit" +PATTERN="task-id" +EXPECT $TASK_ID get-task-id-xml + +TEST $CLI volume stop $V0; +TEST $CLI volume delete $V0; +TEST ! $CLI volume info $V0; + +cleanup; diff --git a/tests/bugs/glusterd/bug-859927.t b/tests/bugs/glusterd/bug-859927.t new file mode 100755 index 00000000000..c30d2b852d4 --- /dev/null +++ b/tests/bugs/glusterd/bug-859927.t @@ -0,0 +1,70 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc +cleanup; + +glusterd; + +TEST $CLI volume create $V0 replica 2 stripe 2 $H0:$B0/${V0}{1,2,3,4,5,6,7,8}; + +TEST ! $CLI volume set $V0 statedump-path "" +TEST ! $CLI volume set $V0 statedump-path " " +TEST $CLI volume set $V0 statedump-path "/home/" +EXPECT "/home/" volume_option $V0 server.statedump-path + +TEST ! $CLI volume set $V0 background-self-heal-count "" +TEST ! $CLI volume set $V0 background-self-heal-count " " +TEST $CLI volume set $V0 background-self-heal-count 10 +EXPECT "10" volume_option $V0 cluster.background-self-heal-count + +TEST ! $CLI volume set $V0 cache-size "" +TEST ! $CLI volume set $V0 cache-size " " +TEST $CLI volume set $V0 cache-size 512MB +EXPECT "512MB" volume_option $V0 performance.cache-size + +TEST ! $CLI volume set $V0 self-heal-daemon "" +TEST ! $CLI volume set $V0 self-heal-daemon " " +TEST $CLI volume set $V0 self-heal-daemon on +EXPECT "on" volume_option $V0 cluster.self-heal-daemon + +TEST ! $CLI volume set $V0 read-subvolume "" +TEST ! $CLI volume set $V0 read-subvolume " " +TEST $CLI volume set $V0 read-subvolume $V0-client-0 +EXPECT "$V0-client-0" volume_option $V0 cluster.read-subvolume + +TEST ! $CLI volume set $V0 data-self-heal-algorithm "" +TEST ! $CLI volume set $V0 data-self-heal-algorithm " " +TEST ! $CLI volume set $V0 data-self-heal-algorithm on +TEST $CLI volume set $V0 data-self-heal-algorithm full +EXPECT "full" volume_option $V0 cluster.data-self-heal-algorithm + +TEST ! $CLI volume set $V0 min-free-inodes "" +TEST ! $CLI volume set $V0 min-free-inodes " " +TEST $CLI volume set $V0 min-free-inodes 60% +EXPECT "60%" volume_option $V0 cluster.min-free-inodes + +TEST ! $CLI volume set $V0 min-free-disk "" +TEST ! $CLI volume set $V0 min-free-disk " " +TEST $CLI volume set $V0 min-free-disk 60% +EXPECT "60%" volume_option $V0 cluster.min-free-disk + +TEST $CLI volume set $V0 min-free-disk 120 +EXPECT "120" volume_option $V0 cluster.min-free-disk + +TEST ! $CLI volume set $V0 frame-timeout "" +TEST ! $CLI volume set $V0 frame-timeout " " +TEST $CLI volume set $V0 frame-timeout 0 +EXPECT "0" volume_option $V0 network.frame-timeout + +TEST ! $CLI volume set $V0 auth.allow "" +TEST ! $CLI volume set $V0 auth.allow " " +TEST $CLI volume set $V0 auth.allow 192.168.122.1 +EXPECT "192.168.122.1" volume_option $V0 auth.allow + +TEST ! $CLI volume set $V0 stripe-block-size "" +TEST ! $CLI volume set $V0 stripe-block-size " " +TEST $CLI volume set $V0 stripe-block-size 512MB +EXPECT "512MB" volume_option $V0 cluster.stripe-block-size + +cleanup; diff --git a/tests/bugs/glusterd/bug-862834.t b/tests/bugs/glusterd/bug-862834.t new file mode 100755 index 00000000000..ac2f956a1ed --- /dev/null +++ b/tests/bugs/glusterd/bug-862834.t @@ -0,0 +1,46 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc + +cleanup; + +V1="patchy2" +TEST glusterd +TEST pidof glusterd +TEST $CLI volume info; + +TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2}; + +function check_brick() +{ + vol=$1; + num=$2 + $CLI volume info $V0 | grep "Brick$num" | awk '{print $2}'; +} + +function volinfo_field() +{ + local vol=$1; + local field=$2; + + $CLI volume info $vol | grep "^$field: " | sed 's/.*: //'; +} + +function brick_count() +{ + local vol=$1; + + $CLI volume info $vol | egrep "^Brick[0-9]+: " | wc -l; +} + +EXPECT "$V0" volinfo_field $V0 'Volume Name'; +EXPECT 'Created' volinfo_field $V0 'Status'; +EXPECT '2' brick_count $V0 + + +EXPECT "$H0:$B0/${V0}1" check_brick $V0 '1'; +EXPECT "$H0:$B0/${V0}2" check_brick $V0 '2'; + +TEST ! $CLI volume create $V1 $H0:$B0/${V1}0 $H0:$B0/${V0}1; + +cleanup; diff --git a/tests/bugs/glusterd/bug-878004.t b/tests/bugs/glusterd/bug-878004.t new file mode 100644 index 00000000000..8abada3c3b3 --- /dev/null +++ b/tests/bugs/glusterd/bug-878004.t @@ -0,0 +1,29 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc + +cleanup; + +TEST glusterd +TEST pidof glusterd +TEST $CLI volume info; + +TEST $CLI volume create $V0 $H0:$B0/${V0}1 $H0:$B0/${V0}2 $H0:$B0/${V0}3; + +function brick_count() +{ + local vol=$1; + + $CLI volume info $vol | egrep "^Brick[0-9]+: " | wc -l; +} + + +TEST $CLI volume start $V0 +TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}2 force; +EXPECT '2' brick_count $V0 + +TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}3 force; +EXPECT '1' brick_count $V0 + +cleanup; + diff --git a/tests/bugs/glusterd/bug-888752.t b/tests/bugs/glusterd/bug-888752.t new file mode 100644 index 00000000000..ed0602e34e2 --- /dev/null +++ b/tests/bugs/glusterd/bug-888752.t @@ -0,0 +1,24 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../cluster.rc + +# Check if xml output is generated correctly for volume status for a single brick +# present on another peer and no async tasks are running. + +function get_peer_count { + $CLI_1 peer status | grep 'Peer in Cluster (Connected)' | wc -l +} +cleanup + +TEST launch_cluster 2; +TEST $CLI_1 peer probe $H2; +EXPECT_WITHIN $PROBE_TIMEOUT 1 get_peer_count +TEST $CLI_1 volume create $V0 $H1:$B1/$V0 $H2:$B2/$V0 +TEST $CLI_1 volume start $V0 + +TEST $CLI_1 volume status $V0 $H2:$B2/$V0 --xml + +TEST $CLI_1 volume stop $V0 + +cleanup diff --git a/tests/bugs/glusterd/bug-889630.t b/tests/bugs/glusterd/bug-889630.t new file mode 100755 index 00000000000..4fefd94d66f --- /dev/null +++ b/tests/bugs/glusterd/bug-889630.t @@ -0,0 +1,56 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../cluster.rc + +function check_peers { + $CLI_1 peer status | grep 'Peer in Cluster (Connected)' | wc -l +} + +function volume_count { + local cli=$1; + if [ $cli -eq '1' ] ; then + $CLI_1 volume info | grep 'Volume Name' | wc -l; + else + $CLI_2 volume info | grep 'Volume Name' | wc -l; + fi +} + +cleanup; + +TEST launch_cluster 2; +TEST $CLI_1 peer probe $H2; + +EXPECT_WITHIN $PROBE_TIMEOUT 1 check_peers + +TEST $CLI_1 volume create $V0 $H1:$B1/$V0 $H2:$B2/$V0 +TEST $CLI_1 volume start $V0 + +b="B1"; + +#Create an extra file in the originator's volume store +touch ${!b}/glusterd/vols/$V0/run/file + +TEST $CLI_1 volume stop $V0 +#Test for self-commit failure +TEST $CLI_1 volume delete $V0 + +#Check whether delete succeeded on both the nodes +EXPECT "0" volume_count '1' +EXPECT "0" volume_count '2' + +#Check whether the volume name can be reused after deletion +TEST $CLI_1 volume create $V0 $H1:$B1/${V0}1 $H2:$B2/${V0}1 +TEST $CLI_1 volume start $V0 + +#Create an extra file in the peer's volume store +touch ${!b}/glusterd/vols/$V0/run/file + +TEST $CLI_1 volume stop $V0 +#Test for commit failure on the other node +TEST $CLI_2 volume delete $V0 + +EXPECT "0" volume_count '1'; +EXPECT "0" volume_count '2'; + +cleanup; diff --git a/tests/bugs/glusterd/bug-905307.t b/tests/bugs/glusterd/bug-905307.t new file mode 100644 index 00000000000..dd1c1bc0795 --- /dev/null +++ b/tests/bugs/glusterd/bug-905307.t @@ -0,0 +1,36 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +cleanup; +TEST glusterd +TEST pidof glusterd + +#test functionality of post-op-delay-secs +TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1} + +#Strings should not be accepted. +TEST ! $CLI volume set $V0 cluster.post-op-delay-secs abc + +#-ve ints should not be accepted. +TEST ! $CLI volume set $V0 cluster.post-op-delay-secs -1 + +#INT_MAX+1 should not be accepted. +TEST ! $CLI volume set $V0 cluster.post-op-delay-secs 2147483648 + +#floats should not be accepted. +TEST ! $CLI volume set $V0 cluster.post-op-delay-secs 1.25 + +#min val 0 should be accepted +TEST $CLI volume set $V0 cluster.post-op-delay-secs 0 +EXPECT "0" volume_option $V0 cluster.post-op-delay-secs + +#max val 2147483647 should be accepted +TEST $CLI volume set $V0 cluster.post-op-delay-secs 2147483647 +EXPECT "2147483647" volume_option $V0 cluster.post-op-delay-secs + +#some middle val in range 2147 should be accepted +TEST $CLI volume set $V0 cluster.post-op-delay-secs 2147 +EXPECT "2147" volume_option $V0 cluster.post-op-delay-secs +cleanup; diff --git a/tests/bugs/glusterd/bug-913487.t b/tests/bugs/glusterd/bug-913487.t new file mode 100644 index 00000000000..9c616ea28fb --- /dev/null +++ b/tests/bugs/glusterd/bug-913487.t @@ -0,0 +1,14 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc + +cleanup; + +TEST glusterd; +TEST pidof glusterd; + +TEST ! $CLI volume set $V0 performance.open-behind off; + +TEST pidof glusterd; + +cleanup; diff --git a/tests/bugs/glusterd/bug-913555.t b/tests/bugs/glusterd/bug-913555.t new file mode 100755 index 00000000000..4f9e004a654 --- /dev/null +++ b/tests/bugs/glusterd/bug-913555.t @@ -0,0 +1,54 @@ +#!/bin/bash + +# Test that a volume becomes unwritable when the cluster loses quorum. + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc +. $(dirname $0)/../../cluster.rc + + +function check_fs { + df $1 &> /dev/null + echo $? +} + +function check_peers { + $CLI_1 peer status | grep 'Peer in Cluster (Connected)' | wc -l +} + +function glusterfsd_count { + pidof glusterfsd | wc -w; +} + +cleanup; + +TEST launch_cluster 3; # start 3-node virtual cluster +TEST $CLI_1 peer probe $H2; # peer probe server 2 from server 1 cli +TEST $CLI_1 peer probe $H3; # peer probe server 3 from server 1 cli + +EXPECT_WITHIN $PROBE_TIMEOUT 2 check_peers + +TEST $CLI_1 volume create $V0 $H1:$B1/$V0 $H2:$B2/$V0 $H3:$B3/$V0 +TEST $CLI_1 volume set $V0 cluster.server-quorum-type server +TEST $CLI_1 volume start $V0 +TEST glusterfs --volfile-server=$H1 --volfile-id=$V0 $M0 + +# Kill one pseudo-node, make sure the others survive and volume stays up. +TEST kill_node 3; +EXPECT_WITHIN $PROBE_TIMEOUT 1 check_peers; +EXPECT 0 check_fs $M0; +EXPECT 2 glusterfsd_count; + +# Kill another pseudo-node, make sure the last one dies and volume goes down. +TEST kill_node 2; +EXPECT_WITHIN $PROBE_TIMEOUT 0 check_peers +EXPECT 1 check_fs $M0; +EXPECT 0 glusterfsd_count; # the two glusterfsds of the other two glusterds + # must be dead + +TEST $glusterd_2; +TEST $glusterd_3; +EXPECT_WITHIN $PROCESS_UP_TIMEOUT 3 glusterfsd_count; # restore quorum, all ok +EXPECT_WITHIN $PROCESS_UP_TIMEOUT 0 check_fs $M0; + +cleanup diff --git a/tests/bugs/glusterd/bug-916549.t b/tests/bugs/glusterd/bug-916549.t new file mode 100755 index 00000000000..bedbdd60bb6 --- /dev/null +++ b/tests/bugs/glusterd/bug-916549.t @@ -0,0 +1,19 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc + +cleanup; + +TEST glusterd; +TEST $CLI volume create $V0 $H0:$B0/${V0}1; +TEST $CLI volume start $V0; + +pid_file=$(ls $GLUSTERD_WORKDIR/vols/$V0/run); +brick_pid=$(cat $GLUSTERD_WORKDIR/vols/$V0/run/$pid_file); + + +kill -SIGKILL $brick_pid; +TEST $CLI volume start $V0 force; +TEST process_leak_count $(pidof glusterd); + +cleanup; diff --git a/tests/bugs/glusterd/bug-948686.t b/tests/bugs/glusterd/bug-948686.t new file mode 100755 index 00000000000..dfe11ff153f --- /dev/null +++ b/tests/bugs/glusterd/bug-948686.t @@ -0,0 +1,46 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc +. $(dirname $0)/../../cluster.rc + +function check_peers { + $CLI_1 peer status | grep 'Peer in Cluster (Connected)' | wc -l +} +cleanup; +#setup cluster and test volume +TEST launch_cluster 3; # start 3-node virtual cluster +TEST $CLI_1 peer probe $H2; # peer probe server 2 from server 1 cli +TEST $CLI_1 peer probe $H3; # peer probe server 3 from server 1 cli + +EXPECT_WITHIN $PROBE_TIMEOUT 2 check_peers; + +TEST $CLI_1 volume create $V0 replica 2 $H1:$B1/$V0 $H1:$B1/${V0}_1 $H2:$B2/$V0 $H3:$B3/$V0 +TEST $CLI_1 volume start $V0 +TEST glusterfs --volfile-server=$H1 --volfile-id=$V0 $M0 + +#kill a node +TEST kill_node 3 + +#modify volume config to see change in volume-sync +TEST $CLI_1 volume set $V0 write-behind off +#add some files to the volume to see effect of volume-heal cmd +TEST touch $M0/{1..100}; +TEST $CLI_1 volume stop $V0; +TEST $glusterd_3; +EXPECT_WITHIN $PROBE_TIMEOUT 2 check_peers; +TEST $CLI_3 volume start $V0; +TEST $CLI_2 volume stop $V0; +TEST $CLI_2 volume delete $V0; + +cleanup; + +TEST glusterd; +TEST $CLI volume create $V0 $H0:$B0/$V0 +TEST $CLI volume start $V0 +pkill glusterd; +pkill glusterfsd; +TEST glusterd +TEST $CLI volume status $V0 + +cleanup; diff --git a/tests/bugs/glusterd/bug-948729/bug-948729-force.t b/tests/bugs/glusterd/bug-948729/bug-948729-force.t new file mode 100644 index 00000000000..f4f71f9a1e2 --- /dev/null +++ b/tests/bugs/glusterd/bug-948729/bug-948729-force.t @@ -0,0 +1,103 @@ +#!/bin/bash + +. $(dirname $0)/../../../include.rc +. $(dirname $0)/../../../volume.rc +. $(dirname $0)/../../../cluster.rc + +function check_peers { + $CLI_1 peer status | grep 'Peer in Cluster (Connected)' | wc -l +} + +cleanup; +uuid1=`uuidgen`; +uuid2=`uuidgen`; +uuid3=`uuidgen`; + +V1=patchy1 +V2=patchy2 + +TEST launch_cluster 2; + +TEST $CLI_1 peer probe $H2; + +EXPECT_WITHIN $PROBE_TIMEOUT 1 check_peers; + +B3=/d/backends/3 +B4=/d/backends/4 +B5=/d/backends/5 +B6=/d/backends/6 + +mkdir -p $B3 $B4 $B5 $B6 + +TEST truncate -s 16M $B1/brick1 +TEST truncate -s 16M $B2/brick2 +TEST truncate -s 16M $B3/brick3 +TEST truncate -s 16M $B4/brick4 +TEST truncate -s 16M $B5/brick5 +TEST truncate -s 16M $B6/brick6 + +TEST LD1=`SETUP_LOOP $B1/brick1` +TEST MKFS_LOOP $LD1 +TEST LD2=`SETUP_LOOP $B2/brick2` +TEST MKFS_LOOP $LD2 +TEST LD3=`SETUP_LOOP $B3/brick3` +TEST MKFS_LOOP $LD3 +TEST LD4=`SETUP_LOOP $B4/brick4` +TEST MKFS_LOOP $LD4 +TEST LD5=`SETUP_LOOP $B5/brick5` +TEST MKFS_LOOP $LD5 +TEST LD6=`SETUP_LOOP $B6/brick6` +TEST MKFS_LOOP $LD6 + +mkdir -p $B1/$V0 $B2/$V0 $B3/$V0 $B4/$V0 $B5/$V0 $B6/$V0 + +TEST MOUNT_LOOP $LD1 $B1/$V0 +TEST MOUNT_LOOP $LD2 $B2/$V0 +TEST MOUNT_LOOP $LD3 $B3/$V0 +TEST MOUNT_LOOP $LD4 $B4/$V0 +TEST MOUNT_LOOP $LD5 $B5/$V0 +TEST MOUNT_LOOP $LD6 $B6/$V0 + +#Case 0: Parent directory of the brick is absent +TEST ! $CLI1 volume create $V0 $H1:$B1/$V0/nonexistent/b1 $H2:$B2/$V0/nonexistent/b2 force + +#Case 1: File system root is being used as brick directory +TEST $CLI1 volume create $V0 $H1:$B5/$V0 $H2:$B6/$V0 force + +#Case 2: Brick directory contains only one component +TEST $CLI1 volume create $V1 $H1:/$uuid1 $H2:/$uuid2 force + +#Case 3: Sub-directories of the backend FS being used as brick directory +TEST $CLI1 volume create $V2 $H1:$B1/$V0/brick1 $H2:$B2/$V0/brick2 force + +#add-brick tests +TEST ! $CLI1 volume add-brick $V0 $H1:$B3/$V0/nonexistent/brick3 force +TEST $CLI1 volume add-brick $V0 $H1:$B3/$V0 force +TEST $CLI1 volume add-brick $V1 $H1:/$uuid3 force +TEST $CLI1 volume add-brick $V2 $H1:$B4/$V0/brick3 force + +#####replace-brick tests +#FIX-ME: replace-brick does not work with the newly introduced cluster test +#####framework + +rmdir /$uuid1 /$uuid2 /$uuid3; + +$CLI volume stop $V0 +$CLI volume stop $V1 +$CLI volume stop $V2 + +UMOUNT_LOOP $B1/$V0 +UMOUNT_LOOP $B2/$V0 +UMOUNT_LOOP $B3/$V0 +UMOUNT_LOOP $B4/$V0 +UMOUNT_LOOP $B5/$V0 +UMOUNT_LOOP $B6/$V0 + +rm -f $B1/brick1 +rm -f $B2/brick2 +rm -f $B3/brick3 +rm -f $B4/brick4 +rm -f $B5/brick5 +rm -f $B6/brick6 + +cleanup; diff --git a/tests/bugs/glusterd/bug-948729/bug-948729-mode-script.t b/tests/bugs/glusterd/bug-948729/bug-948729-mode-script.t new file mode 100644 index 00000000000..18bf9a1c4b6 --- /dev/null +++ b/tests/bugs/glusterd/bug-948729/bug-948729-mode-script.t @@ -0,0 +1,77 @@ +#!/bin/bash + +. $(dirname $0)/../../../include.rc +. $(dirname $0)/../../../volume.rc +. $(dirname $0)/../../../cluster.rc + +function check_peers { + $CLI_1 peer status | grep 'Peer in Cluster (Connected)' | wc -l +} + +cleanup; + +uuid1=`uuidgen`; +uuid2=`uuidgen`; +uuid3=`uuidgen`; + +TEST launch_cluster 2; + +TEST $CLI_1 peer probe $H2; + +EXPECT_WITHIN $PROBE_TIMEOUT 1 check_peers; + +B3=/d/backends/3 +mkdir -p $B3 + +TEST truncate -s 16M $B1/brick1 +TEST truncate -s 16M $B2/brick2 +TEST truncate -s 16M $B3/brick3 + +TEST LD1=`SETUP_LOOP $B1/brick1` +TEST MKFS_LOOP $LD1 +TEST LD2=`SETUP_LOOP $B2/brick2` +TEST MKFS_LOOP $LD2 +TEST LD3=`SETUP_LOOP $B3/brick3` +TEST MKFS_LOOP $LD3 + +mkdir -p $B1/$V0 $B2/$V0 $B3/$V0 + +TEST MOUNT_LOOP $LD1 $B1/$V0 +TEST MOUNT_LOOP $LD2 $B2/$V0 +TEST MOUNT_LOOP $LD3 $B3/$V0 + +cli1=$(echo $CLI1 | sed 's/ --wignore//') + +#Case 0: Parent directory of the brick is absent +TEST ! $cli1 volume create $V0 $H1:$B1/$V0/nonexistent/b1 $H2:$B2/$V0/nonexistent/b2 + +#Case 1: File system root being used as brick directory +TEST ! $cli1 volume create $V0 $H1:$B1/$V0 $H2:$B2/$V0 + +#Case 2: Brick directory contains only one component +TEST ! $cli1 volume create $V0 $H1:/$uuid1 $H2:/$uuid2 + +#Case 3: Sub-directories of the backend FS being used as brick directory +TEST $cli1 volume create $V0 $H1:$B1/$V0/brick1 $H2:$B2/$V0/brick2 + +#add-brick tests +TEST ! $cli1 volume add-brick $V0 $H1:$B3/$V0/nonexistent/brick3 +TEST ! $cli1 volume add-brick $V0 $H1:$B3/$V0 +TEST ! $cli1 volume add-brick $V0 $H1:/$uuid3 +TEST $cli1 volume add-brick $V0 $H1:$B3/$V0/brick3 + +#####replace-brick tests +#FIX-ME : replace-brick does not currently work in the newly introduced +#####cluster test framework + +$CLI1 volume stop $V0 + +UMOUNT_LOOP $B1/$V0 +UMOUNT_LOOP $B2/$V0 +UMOUNT_LOOP $B3/$V0 + +rm -f $B1/brick1 +rm -f $B2/brick2 +rm -f $B3/brick3 + +cleanup; diff --git a/tests/bugs/glusterd/bug-948729/bug-948729.t b/tests/bugs/glusterd/bug-948729/bug-948729.t new file mode 100644 index 00000000000..2b574aa1a14 --- /dev/null +++ b/tests/bugs/glusterd/bug-948729/bug-948729.t @@ -0,0 +1,80 @@ +#!/bin/bash + +. $(dirname $0)/../../../include.rc +. $(dirname $0)/../../../volume.rc +. $(dirname $0)/../../../cluster.rc + +function check_peers { + $CLI_1 peer status | grep 'Peer in Cluster (Connected)' | wc -l +} + +cleanup; + +uuid1=`uuidgen`; +uuid2=`uuidgen`; +uuid3=`uuidgen`; + +TEST launch_cluster 2; + +TEST $CLI_1 peer probe $H2; + +EXPECT_WITHIN $PROBE_TIMEOUT 1 check_peers; + +B3=/d/backends/3 + +mkdir -p $B3 + +TEST truncate -s 16M $B1/brick1 +TEST truncate -s 16M $B2/brick2 +TEST truncate -s 16M $B3/brick3 + +TEST LD1=`SETUP_LOOP $B1/brick1` +TEST MKFS_LOOP $LD1 +TEST LD2=`SETUP_LOOP $B2/brick2` +TEST MKFS_LOOP $LD2 +TEST LD3=`SETUP_LOOP $B3/brick3` +TEST MKFS_LOOP $LD3 + +mkdir -p $B1/$V0 $B2/$V0 $B3/$V0 + +TEST MOUNT_LOOP $LD1 $B1/$V0 +TEST MOUNT_LOOP $LD2 $B2/$V0 +TEST MOUNT_LOOP $LD3 $B3/$V0 + +#Tests without options 'mode=script' and 'wignore' +cli1=$(echo $CLI1 | sed 's/ --mode=script//') +cli1=$(echo $cli1 | sed 's/ --wignore//') +#Case 0: Parent directory of the brick is absent +TEST ! $cli1 volume create $V0 $H1:$B1/$V0/nonexistent/b1 $H2:$B2/$V0/nonexistent/b2 + +#Case 1: File system root being used as brick directory +TEST ! $cli1 volume create $V0 $H1:$B1/$V0 $H2:$B2/$V0 + +#Case 2: Brick directory contains only one component +TEST ! $cli1 volume create $V0 $H1:/$uuid1 $H2:/$uuid2 + +#Case 3: Sub-directories of the backend FS being used as brick directory +TEST $cli1 volume create $V0 $H1:$B1/$V0/brick1 $H2:$B2/$V0/brick2 + +#add-brick tests +TEST ! $cli1 volume add-brick $V0 $H1:$B3/$V0/nonexistent/b3 +TEST ! $cli1 volume add-brick $V0 $H1:$B3/$V0 +TEST ! $cli1 volume add-brick $V0 $H1:/$uuid3 +TEST $cli1 volume add-brick $V0 $H1:$B3/$V0/brick3 + +#####replace-brick tests +#FIX-ME: Replace-brick does not work currently in the newly introduced cluster +#####test framework. + +$CLI1 volume stop $V0 + +UMOUNT_LOOP $B1/$V0 +UMOUNT_LOOP $B2/$V0 +UMOUNT_LOOP $B3/$V0 + +rm -f $B1/brick1 +rm -f $B2/brick2 +rm -f $B3/brick3 + + +cleanup; diff --git a/tests/bugs/glusterd/bug-949930.t b/tests/bugs/glusterd/bug-949930.t new file mode 100644 index 00000000000..774802a66b2 --- /dev/null +++ b/tests/bugs/glusterd/bug-949930.t @@ -0,0 +1,27 @@ +#!/bin/bash +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +V1=patchy2 + +cleanup; + +TEST glusterd; +TEST pidof glusterd; + +TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2}; +TEST $CLI volume start $V0; + +TEST $CLI volume create $V1 $H0:$B0/${V1}{1,2}; +TEST $CLI volume start $V1; + +TEST ! $CLI volume set $V0 performance.nfs.read-ahead blah +EXPECT '' volume_option $V0 performance.nfs.read-ahead + +TEST $CLI volume set $V0 performance.nfs.read-ahead on +EXPECT "on" volume_option $V0 performance.nfs.read-ahead + +EXPECT '' volume_option $V1 performance.nfs.read-ahead + +cleanup; + diff --git a/tests/bugs/glusterd/bug-955588.t b/tests/bugs/glusterd/bug-955588.t new file mode 100755 index 00000000000..028a34edd7d --- /dev/null +++ b/tests/bugs/glusterd/bug-955588.t @@ -0,0 +1,27 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +cleanup; +TEST glusterd +TEST pidof glusterd + +function get_brick_host_uuid() +{ + local vol=$1; + local uuid_regex='[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}' + local host_uuid_list=$($CLI volume info $vol --xml | grep "brick.uuid" | grep -o -E "$uuid_regex"); + + echo $host_uuid_list | awk '{print $1}' +} + +TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1} + +uuid=`grep UUID $GLUSTERD_WORKDIR/glusterd.info | cut -f2 -d=` +EXPECT $uuid get_brick_host_uuid $V0 + +TEST $CLI volume delete $V0; +TEST ! $CLI volume info $V0; + +cleanup; diff --git a/tests/bugs/glusterd/bug-958790.t b/tests/bugs/glusterd/bug-958790.t new file mode 100644 index 00000000000..39be0a19137 --- /dev/null +++ b/tests/bugs/glusterd/bug-958790.t @@ -0,0 +1,21 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +cleanup; + +TEST glusterd; +TEST pidof glusterd; +TEST $CLI volume info; + +touch $GLUSTERD_WORKDIR/groups/test +echo "read-ahead=off" > $GLUSTERD_WORKDIR/groups/test +echo "open-behind=off" >> $GLUSTERD_WORKDIR/groups/test + +TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2}; +TEST $CLI volume set $V0 group test +EXPECT "off" volume_option $V0 performance.read-ahead +EXPECT "off" volume_option $V0 performance.open-behind + +cleanup; diff --git a/tests/bugs/glusterd/bug-961669.t b/tests/bugs/glusterd/bug-961669.t new file mode 100644 index 00000000000..b02f2f50af1 --- /dev/null +++ b/tests/bugs/glusterd/bug-961669.t @@ -0,0 +1,48 @@ +#!/bin/bash + +#Test case: Fail remove-brick 'start' variant when reducing the replica count of a volume. + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +cleanup; + +#Basic checks +TEST glusterd +TEST pidof glusterd +TEST $CLI volume info + +#Create a 3x3 dist-rep volume +TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2,3,4,5,6,7,8}; +TEST $CLI volume start $V0 + +# Mount FUSE and create file/directory +TEST glusterfs -s $H0 --volfile-id $V0 $M0 +TEST touch $M0/zerobytefile.txt +TEST mkdir $M0/test_dir +TEST dd if=/dev/zero of=$M0/file bs=1024 count=1024 + +function remove_brick_start { + $CLI volume remove-brick $V0 replica 2 $H0:$B0/${V0}{1,4,7} start 2>&1|grep -oE 'success|failed' +} + +function remove_brick { + $CLI volume remove-brick $V0 replica 2 $H0:$B0/${V0}{1,4,7} force 2>&1|grep -oE 'success|failed' +} + +#remove-brick start variant +#Actual message displayed at cli is: +#"volume remove-brick start: failed: Rebalancing not needed when reducing replica count. Try without the 'start' option" +EXPECT "failed" remove_brick_start; + +#remove-brick commit-force +#Actual message displayed at cli is: +#"volume remove-brick commit force: success" +EXPECT "success" remove_brick + +EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0 +TEST $CLI volume stop $V0 +TEST $CLI volume delete $V0; +TEST ! $CLI volume info $V0; + +cleanup; diff --git a/tests/bugs/glusterd/bug-963541.t b/tests/bugs/glusterd/bug-963541.t new file mode 100755 index 00000000000..611626a0d10 --- /dev/null +++ b/tests/bugs/glusterd/bug-963541.t @@ -0,0 +1,33 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc + +cleanup; + +TEST glusterd +TEST pidof glusterd + +TEST $CLI volume create $V0 $H0:$B0/${V0}{1..3}; +TEST $CLI volume start $V0; + +# Start a remove-brick and try to start a rebalance/remove-brick without committing +TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}1 start + +TEST ! $CLI volume rebalance $V0 start +TEST ! $CLI volume remove-brick $V0 $H0:$B0/${V0}2 start + +#Try to start rebalance/remove-brick again after commit +TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}1 commit + +gluster volume status + +TEST $CLI volume rebalance $V0 start +TEST $CLI volume rebalance $V0 stop + +TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}2 start +TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}2 stop + +TEST $CLI volume stop $V0 + +cleanup; + diff --git a/tests/bugs/glusterd/bug-964059.t b/tests/bugs/glusterd/bug-964059.t new file mode 100755 index 00000000000..7b4f60454b8 --- /dev/null +++ b/tests/bugs/glusterd/bug-964059.t @@ -0,0 +1,30 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../cluster.rc + +function check_peers { + $CLI_1 peer status | grep 'Peer in Cluster (Connected)' | wc -l +} + +function volume_count { + local cli=$1; + if [ $cli -eq '1' ] ; then + $CLI_1 volume info | grep 'Volume Name' | wc -l; + else + $CLI_2 volume info | grep 'Volume Name' | wc -l; + fi +} + +cleanup; + +TEST launch_cluster 2; +TEST $CLI_1 peer probe $H2; + +EXPECT_WITHIN $PROBE_TIMEOUT 1 check_peers + +TEST $CLI_1 volume create $V0 $H1:$B1/$V0 $H2:$B2/$V0 +TEST $CLI_1 volume start $V0 +TEST $CLI_1 volume remove-brick $V0 $H2:$B2/$V0 start +TEST $CLI_1 volume status +cleanup; diff --git a/tests/bugs/glusterd/bug-974007.t b/tests/bugs/glusterd/bug-974007.t new file mode 100644 index 00000000000..5759adb583f --- /dev/null +++ b/tests/bugs/glusterd/bug-974007.t @@ -0,0 +1,52 @@ +#!/bin/bash + +#Test case: Create a distributed replicate volume, and remove multiple +#replica pairs in a single remove-brick command. + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +cleanup; + +#Basic checks +TEST glusterd +TEST pidof glusterd +TEST $CLI volume info + +#Create a 3X2 distributed-replicate volume +TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1..6}; +TEST $CLI volume start $V0 + +# Mount FUSE and create files +TEST glusterfs -s $H0 --volfile-id $V0 $M0 +TEST touch $M0/file{1..10} + +# Remove bricks from two sub-volumes to make it a 1x2 vol. +# Bricks in question are given in a random order but from the same subvols. +function remove_brick_start_status { + $CLI volume remove-brick $V0 \ + $H0:$B0/${V0}6 $H0:$B0/${V0}1 \ + $H0:$B0/${V0}2 $H0:$B0/${V0}5 start 2>&1 |grep -oE "success|failed" +} +EXPECT "success" remove_brick_start_status; + +# Wait for rebalance to complete +EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" remove_brick_status_completed_field "$V0" "$H0:$B0/${V0}6 $H0:$B0/${V0}1 $H0:$B0/${V0}2 $H0:$B0/${V0}5" + +# Check commit status +function remove_brick_commit_status { + $CLI volume remove-brick $V0 \ + $H0:$B0/${V0}6 $H0:$B0/${V0}1 \ + $H0:$B0/${V0}2 $H0:$B0/${V0}5 commit 2>&1 |grep -oE "success|failed" +} +EXPECT "success" remove_brick_commit_status; + +# Check the volume type +EXPECT "Replicate" echo `$CLI volume info |grep Type |awk '{print $2}'` + +EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0 +TEST $CLI volume stop $V0 +TEST $CLI volume delete $V0; +TEST ! $CLI volume info $V0; + +cleanup; diff --git a/tests/bugs/glusterfs-server/bug-852147.t b/tests/bugs/glusterfs-server/bug-852147.t new file mode 100755 index 00000000000..8cb5fd13f85 --- /dev/null +++ b/tests/bugs/glusterfs-server/bug-852147.t @@ -0,0 +1,85 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +cleanup; +logdir=`gluster --print-logdir`"/bricks" + +## Start and create a volume +TEST glusterd; +TEST pidof glusterd; +TEST $CLI volume info; + +TEST $CLI volume create $V0 replica 2 stripe 2 $H0:$B0/${V0}{1,2,3,4,5,6,7,8}; + +## Verify volume is is created +EXPECT "$V0" volinfo_field $V0 'Volume Name'; +EXPECT 'Created' volinfo_field $V0 'Status'; + +## Start volume and verify +TEST $CLI volume start $V0; +EXPECT 'Started' volinfo_field $V0 'Status'; + +TEST glusterfs -s $H0 --volfile-id=$V0 $M0 +touch $M0/file1; + +TEST $CLI volume set $V0 performance.cache-max-file-size 20MB +TEST $CLI volume set $V0 performance.cache-min-file-size 10MB + +EXPECT "20MB" volinfo_field $V0 'performance.cache-max-file-size'; +EXPECT "10MB" volinfo_field $V0 'performance.cache-min-file-size'; + +#Performing volume reset and verifying. +TEST $CLI volume reset $V0 +EXPECT "" volinfo_field $V0 'performance.cache-max-file-size'; +EXPECT "" volinfo_field $V0 'performance.cache-min-file-size'; + +#Verifying vlolume-profile start, info and stop +EXPECT "Starting volume profile on $V0 has been successful " $CLI volume profile $V0 start + +function vol_prof_info() +{ + $CLI volume profile $V0 info | grep Brick | wc -l +} +EXPECT "8" vol_prof_info + +EXPECT "Stopping volume profile on $V0 has been successful " $CLI volume profile $V0 stop + +function log-file-name() +{ + logfilename=$B0"/"$V0"1.log" + echo ${logfilename:1} | tr / - +} + +function file-size() +{ + ls -lrt $1 | awk '{print $5}' +} + +#Finding the current log file's size +log_file=$logdir"/"`log-file-name` +log_file_size=`file-size $log_file` + +#Removing the old backup log files +ren_file=$log_file".*" +rm -rf $ren_file + +#Initiating log rotate +TEST $CLI volume log rotate $V0 + +#Capturing new log file's size +new_file_size=`file-size $log_file` + +#Verifying the size of the new log file and the creation of the backup log file +TEST ! [ $new_file_size -eq $log_file_size ] +TEST ls -lrt $ren_file + +## Finish up +TEST $CLI volume stop $V0; +EXPECT 'Stopped' volinfo_field $V0 'Status'; + +TEST $CLI volume delete $V0; +TEST ! $CLI volume info $V0; + +cleanup; diff --git a/tests/bugs/glusterfs-server/bug-861542.t b/tests/bugs/glusterfs-server/bug-861542.t new file mode 100755 index 00000000000..ab572963bb0 --- /dev/null +++ b/tests/bugs/glusterfs-server/bug-861542.t @@ -0,0 +1,50 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc + +cleanup; + +TEST glusterd +TEST pidof glusterd +TEST $CLI volume info; +# Distributed volume with a single brick was chosen solely for the ease of +#implementing the test case (to be precise, for the ease of extracting the port number). +TEST $CLI volume create $V0 $H0:$B0/brick0; + +TEST $CLI volume start $V0; + +function port_field() +{ + local vol=$1; + local opt=$2; + if [ $opt -eq '0' ]; then + $CLI volume status $vol | grep "brick0" | awk '{print $3}'; + else + $CLI volume status $vol detail | grep "^Port " | awk '{print $3}'; + fi +} + +function xml_port_field() +{ + local vol=$1; + local opt=$2; + $CLI --xml volume status $vol $opt | tr -d '\n' |\ +#Find the first occurrence of the string between and + sed -rn 's//&###/;s/<\/port>/###&/;s/^.*###(.*)###.*$/\1/p' +} + +TEST $CLI volume status $V0; +TEST $CLI volume status $V0 detail; +TEST $CLI --xml volume status $V0; +TEST $CLI --xml volume status $V0 detail; + +# Kill the brick process. After this, port number for the killed (in this case brick) process must be "N/A". +kill `cat $GLUSTERD_WORKDIR/vols/$V0/run/$H0-d-backends-brick0.pid` + +EXPECT "N/A" port_field $V0 '0'; # volume status +EXPECT "N/A" port_field $V0 '1'; # volume status detail + +EXPECT "N/A" xml_port_field $V0 ''; +EXPECT "N/A" xml_port_field $V0 'detail'; + +cleanup; diff --git a/tests/bugs/glusterfs-server/bug-864222.t b/tests/bugs/glusterfs-server/bug-864222.t new file mode 100755 index 00000000000..cbda7d27f38 --- /dev/null +++ b/tests/bugs/glusterfs-server/bug-864222.t @@ -0,0 +1,27 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../nfs.rc +. $(dirname $0)/../../volume.rc + +cleanup; + +TEST glusterd +TEST pidof glusterd +TEST $CLI volume create $V0 $H0:$B0/brick0 +TEST $CLI volume start $V0 + +EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available; +TEST mount_nfs $H0:/$V0 $N0 nolock +cd $N0 + +TEST ls + +TEST $CLI volume set $V0 nfs.enable-ino32 on +# Main test. This should pass. +TEST ls + +cd +EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $N0 +cleanup + diff --git a/tests/bugs/glusterfs-server/bug-873549.t b/tests/bugs/glusterfs-server/bug-873549.t new file mode 100644 index 00000000000..a3b2f9c9bf7 --- /dev/null +++ b/tests/bugs/glusterfs-server/bug-873549.t @@ -0,0 +1,17 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc + +cleanup; + +TEST glusterd -LDEBUG; +TEST pidof glusterd; +TEST $CLI volume info; + +TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2}; + +TEST $CLI volume set $V0 performance.cache-size 512MB +TEST $CLI volume start $V0 +TEST $CLI volume statedump $V0 all + +cleanup; diff --git a/tests/bugs/glusterfs-server/bug-877992.t b/tests/bugs/glusterfs-server/bug-877992.t new file mode 100755 index 00000000000..c0287e7594a --- /dev/null +++ b/tests/bugs/glusterfs-server/bug-877992.t @@ -0,0 +1,61 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc + +cleanup; + + +## Start and create a volume +TEST glusterd -LDEBUG +TEST pidof glusterd + + +function volinfo_field() +{ + local vol=$1; + local field=$2; + + $CLI volume info $vol | grep "^$field: " | sed 's/.*: //'; +} + + +function hooks_prep () +{ + local event=$1 + touch /tmp/pre.out /tmp/post.out + touch $GLUSTERD_WORKDIR/hooks/1/"$event"/pre/Spre.sh + touch $GLUSTERD_WORKDIR/hooks/1/"$event"/post/Spost.sh + + printf "#! /bin/bash\necho "$event"Pre > /tmp/pre.out\n" > $GLUSTERD_WORKDIR/hooks/1/"$event"/pre/Spre.sh + printf "#! /bin/bash\necho "$event"Post > /tmp/post.out\n" > $GLUSTERD_WORKDIR/hooks/1/"$event"/post/Spost.sh + chmod a+x $GLUSTERD_WORKDIR/hooks/1/"$event"/pre/Spre.sh + chmod a+x $GLUSTERD_WORKDIR/hooks/1/"$event"/post/Spost.sh +} + +function hooks_cleanup () +{ + local event=$1 + rm /tmp/pre.out /tmp/post.out + rm $GLUSTERD_WORKDIR/hooks/1/"$event"/pre/Spre.sh + rm $GLUSTERD_WORKDIR/hooks/1/"$event"/post/Spost.sh +} + +## Verify volume is created and its hooks script ran +hooks_prep 'create' +TEST $CLI volume create $V0 $H0:$B0/${V0}1; +EXPECT "$V0" volinfo_field $V0 'Volume Name'; +EXPECT 'Created' volinfo_field $V0 'Status'; +EXPECT 'createPre' cat /tmp/pre.out; +EXPECT 'createPost' cat /tmp/post.out; +hooks_cleanup 'create' + + +## Start volume and verify that its hooks script ran +hooks_prep 'start' +TEST $CLI volume start $V0; +EXPECT 'Started' volinfo_field $V0 'Status'; +EXPECT 'startPre' cat /tmp/pre.out; +EXPECT 'startPost' cat /tmp/post.out; +hooks_cleanup 'start' + +cleanup; diff --git a/tests/bugs/glusterfs-server/bug-887145.t b/tests/bugs/glusterfs-server/bug-887145.t new file mode 100755 index 00000000000..35e1c928390 --- /dev/null +++ b/tests/bugs/glusterfs-server/bug-887145.t @@ -0,0 +1,88 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../nfs.rc + +cleanup; + +TEST glusterd +TEST pidof glusterd +TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2}; +TEST $CLI volume set $V0 performance.open-behind off; +TEST $CLI volume start $V0 + +## Mount FUSE with caching disabled +TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id $V0 $M0; + +EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available; + + +useradd tmp_user 2>/dev/null 1>/dev/null; +mkdir $M0/dir; +mkdir $M0/other; +cp /etc/passwd $M0/; +cp $M0/passwd $M0/file; +chmod 600 $M0/file; + +TEST mount_nfs $H0:/$V0 $N0 nolock; + +chown -R nfsnobody:nfsnobody $M0/dir; +chown -R tmp_user:tmp_user $M0/other; + +TEST $CLI volume set $V0 server.root-squash on; + +EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available; + +# create files and directories in the root of the glusterfs and nfs mount +# which is owned by root and hence the right behavior is getting EACCESS +# as the fops are executed as nfsnobody. +touch $M0/foo 2>/dev/null; +TEST [ $? -ne 0 ] +touch $N0/foo 2>/dev/null; +TEST [ $? -ne 0 ] +mkdir $M0/new 2>/dev/null; +TEST [ $? -ne 0 ] +mkdir $N0/new 2>/dev/null; +TEST [ $? -ne 0 ] +cp $M0/file $M0/tmp_file 2>/dev/null; +TEST [ $? -ne 0 ] +cp $N0/file $N0/tmp_file 2>/dev/null; +TEST [ $? -ne 0 ] +cat $M0/file 2>/dev/null; +TEST [ $? -ne 0 ] +# here read should be allowed because eventhough file "passwd" is owned +# by root, the permissions if the file allow other users to read it. +cat $M0/passwd 1>/dev/null; +TEST [ $? -eq 0 ] +cat $N0/passwd 1>/dev/null; +TEST [ $? -eq 0 ] + +# create files and directories should succeed as the fops are being executed +# inside the directory owned by nfsnobody +TEST touch $M0/dir/file; +TEST touch $N0/dir/foo; +TEST mkdir $M0/dir/new; +TEST mkdir $N0/dir/other; +TEST rm -f $M0/dir/file $M0/dir/foo; +TEST rmdir $N0/dir/*; + +# create files and directories here should fail as other directory is owned +# by tmp_user. +touch $M0/other/foo 2>/dev/null; +TEST [ $? -ne 0 ] +touch $N0/other/foo 2>/dev/null; +TEST [ $? -ne 0 ] +mkdir $M0/other/new 2>/dev/null; +TEST [ $? -ne 0 ] +mkdir $N0/other/new 2>/dev/null; +TEST [ $? -ne 0 ] + +userdel tmp_user; +rm -rf /home/tmp_user; + +EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" umount_nfs $N0 + +TEST $CLI volume stop $V0; +TEST $CLI volume delete $V0; + +cleanup; diff --git a/tests/bugs/glusterfs-server/bug-889996.t b/tests/bugs/glusterfs-server/bug-889996.t new file mode 100644 index 00000000000..d7d25c42933 --- /dev/null +++ b/tests/bugs/glusterfs-server/bug-889996.t @@ -0,0 +1,19 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +cleanup; + +TEST glusterd +TEST pidof glusterd +TEST $CLI volume info; + +TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2}; + +rm -rf $B0/${V0}1; + +TEST ! $CLI volume start $V0; +EXPECT 0 online_brick_count; + +cleanup; diff --git a/tests/bugs/glusterfs-server/bug-904300.t b/tests/bugs/glusterfs-server/bug-904300.t new file mode 100755 index 00000000000..8ce805cfcdd --- /dev/null +++ b/tests/bugs/glusterfs-server/bug-904300.t @@ -0,0 +1,62 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../nfs.rc +. $(dirname $0)/../../volume.rc + +cleanup; + +# 1-8 +TEST glusterd +TEST pidof glusterd + +TEST $CLI volume create $V0 $H0:$B0/$V0; +TEST $CLI volume start $V0 +EXPECT_WITHIN $NFS_EXPORT_TIMEOUT 1 is_nfs_export_available + +TEST mount_nfs $H0:/$V0 $N0 nolock +TEST mkdir $N0/dir1 +EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $N0 + +# +# Case 1: Allow "dir1" to be mounted only from 127.0.0.1 +# 9-12 +TEST $CLI volume set $V0 export-dir \""/dir1(127.0.0.1)"\" +EXPECT_WITHIN $NFS_EXPORT_TIMEOUT 2 is_nfs_export_available + +TEST mount_nfs localhost:/$V0/dir1 $N0 nolock +EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $N0 + +# +# Case 2: Allow "dir1" to be mounted only from 8.8.8.8. This is +# a negative test case therefore the mount should fail. +# 13-16 +TEST $CLI volume set $V0 export-dir \""/dir1(8.8.8.8)"\" +EXPECT_WITHIN $NFS_EXPORT_TIMEOUT 2 is_nfs_export_available + +TEST ! mount_nfs $H0:/$V0/dir1 $N0 nolock +TEST ! umount $N0 + + +# Case 3: Variation of test case1. Here we are checking with hostname +# instead of ip address. +# 17-20 +TEST $CLI volume set $V0 export-dir \""/dir1($H0)"\" +EXPECT_WITHIN $NFS_EXPORT_TIMEOUT 2 is_nfs_export_available + +TEST mount_nfs $H0:/$V0/dir1 $N0 nolock +EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $N0 + +# Case 4: Variation of test case1. Here we are checking with IP range +# 21-24 +TEST $CLI volume set $V0 export-dir \""/dir1(127.0.0.0/24)"\" +EXPECT_WITHIN $NFS_EXPORT_TIMEOUT 2 is_nfs_export_available + +TEST mount_nfs localhost:/$V0/dir1 $N0 nolock +EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $N0 + +## Finish up +TEST $CLI volume stop $V0; +TEST $CLI volume delete $V0; + +cleanup; diff --git a/tests/bugs/glusterfs-server/bug-905864.c b/tests/bugs/glusterfs-server/bug-905864.c new file mode 100644 index 00000000000..3cc4cc5d232 --- /dev/null +++ b/tests/bugs/glusterfs-server/bug-905864.c @@ -0,0 +1,82 @@ +#include +#include +#include +#include +#include + + +pthread_t th[5] = {0}; +void +flock_init (struct flock *f, short int type, off_t start, off_t len) +{ + f->l_type = type; + f->l_start = start; + f->l_len = len; +} + +int +flock_range_in_steps (int fd, int is_set, short l_type, + int start, int end, int step) +{ + int ret = 0; + int i = 0; + struct flock f = {0,}; + + for (i = start; i+step < end; i += step) { + flock_init (&f, l_type, i, step); + ret = fcntl (fd, (is_set) ? F_SETLKW : F_GETLK, &f); + if (ret) { + perror ("fcntl"); + goto out; + } + } +out: + return ret; +} + +void * +random_locker (void *arg) +{ + int fd = *(int *)arg; + int i = 0; + int is_set = 0; + + /* use thread id to choose GETLK or SETLK operation*/ + is_set = pthread_self () % 2; + (void)flock_range_in_steps (fd, is_set, F_WRLCK, 0, 400, 1); + + return NULL; +} + + +int main (int argc, char **argv) +{ + int fd = -1; + int ret = 1; + int i = 0; + char *fname = NULL; + + if (argc < 2) + goto out; + + fname = argv[1]; + fd = open (fname, O_RDWR); + if (fd == -1) { + perror ("open"); + goto out; + } + + ret = flock_range_in_steps (fd, 1, F_WRLCK, 0, 2000, 2); + for (i = 0; i < 5; i++) { + pthread_create (&th[i], NULL, random_locker, (void *) &fd); + } + ret = flock_range_in_steps (fd, 1, F_WRLCK, 0, 2000, 2); + for (i = 0; i < 5; i++) { + pthread_join (th[i], NULL); + } +out: + if (fd != -1) + close (fd); + + return ret; +} diff --git a/tests/bugs/glusterfs-server/bug-905864.t b/tests/bugs/glusterfs-server/bug-905864.t new file mode 100644 index 00000000000..44923a85333 --- /dev/null +++ b/tests/bugs/glusterfs-server/bug-905864.t @@ -0,0 +1,32 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +cleanup; + +## Start and create a volume +TEST glusterd; +TEST pidof glusterd; +TEST $CLI volume info; + +TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2,3,4}; +TEST $CLI volume start $V0; + +TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id $V0 $M0; +TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id $V0 $M1; + +TEST touch $M0/file1; + +#following C program tries open up race(s) if any, in F_GETLK/F_SETLKW codepaths +#of locks xlator +TEST $CC -pthread -g3 $(dirname $0)/bug-905864.c -o $(dirname $0)/bug-905864 + +$(dirname $0)/bug-905864 $M0/file1 & +$(dirname $0)/bug-905864 $M1/file1; +wait + +TEST rm -f $(dirname $0)/bug-905864 +EXPECT $(brick_count $V0) online_brick_count + +cleanup diff --git a/tests/bugs/glusterfs-server/bug-912297.t b/tests/bugs/glusterfs-server/bug-912297.t new file mode 100755 index 00000000000..f1f4147e6aa --- /dev/null +++ b/tests/bugs/glusterfs-server/bug-912297.t @@ -0,0 +1,44 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +cleanup; + +## Start and create a volume +TEST glusterd; +TEST pidof glusterd; +TEST $CLI volume create $V0 replica 2 stripe 2 $H0:$B0/${V0}{1,2,3,4,5,6,7,8}; + +## Verify volume is is created +EXPECT "$V0" volinfo_field $V0 'Volume Name'; +EXPECT 'Created' volinfo_field $V0 'Status'; + +## Start volume and verify +TEST $CLI volume start $V0; +EXPECT 'Started' volinfo_field $V0 'Status'; + +## Setting owner-uid as -12 +TEST ! $CLI volume set $V0 owner-uid -12 +EXPECT '' volinfo_field $V0 'storage.owner-uid' + +## Setting owner-gid as -5 +TEST ! $CLI volume set $V0 owner-gid -5 +EXPECT '' volinfo_field $V0 'storage.owner-gid' + +## Setting owner-uid as 36 +TEST $CLI volume set $V0 owner-uid 36 +EXPECT '36' volinfo_field $V0 'storage.owner-uid' + +## Setting owner-gid as 36 +TEST $CLI volume set $V0 owner-gid 36 +EXPECT '36' volinfo_field $V0 'storage.owner-gid' + +## Finish up +TEST $CLI volume stop $V0; +EXPECT 'Stopped' volinfo_field $V0 'Status'; + +TEST $CLI volume delete $V0; +TEST ! $CLI volume info $V0; + +cleanup; diff --git a/tests/bugs/glusterfs/bug-811493.t b/tests/bugs/glusterfs/bug-811493.t new file mode 100755 index 00000000000..98f7c121a02 --- /dev/null +++ b/tests/bugs/glusterfs/bug-811493.t @@ -0,0 +1,18 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc + +cleanup; + +TEST glusterd +TEST pidof glusterd +TEST $CLI system uuid reset; + +uuid1=$(grep UUID $GLUSTERD_WORKDIR/glusterd.info | cut -f 2 -d "="); + +TEST $CLI system uuid reset; +uuid2=$(grep UUID $GLUSTERD_WORKDIR/glusterd.info | cut -f 2 -d "="); + +TEST [ $uuid1 != $uuid2 ] + +cleanup diff --git a/tests/bugs/glusterfs/bug-844688.t b/tests/bugs/glusterfs/bug-844688.t new file mode 100755 index 00000000000..a1b0b15f5ed --- /dev/null +++ b/tests/bugs/glusterfs/bug-844688.t @@ -0,0 +1,34 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +cleanup; + +TEST glusterd +TEST pidof glusterd +TEST $CLI volume create $V0 $H0:$B0/brick0 +TEST $CLI volume start $V0 +TEST glusterfs -s $H0 --volfile-id $V0 $M0 + +mount_pid=$(get_mount_process_pid $V0); +# enable dumping of call stack creation and frame creation times in statedump +kill -USR2 $mount_pid; + +TEST touch $M0/touchfile; +(dd if=/dev/urandom of=$M0/file bs=5k 2>/dev/null 1>/dev/null)& +back_pid=$!; +statedump_file=$(generate_mount_statedump $V0); +grep "callstack-creation-time" $statedump_file 2>/dev/null 1>/dev/null; +TEST [ $? -eq 0 ]; +grep "frame-creation-time" $statedump_file 2>/dev/null 1>/dev/null; +TEST [ $? -eq 0 ]; + +kill -SIGTERM $back_pid; +wait >/dev/null 2>&1; + +TEST rm -f $M0/touchfile $M0/file; +EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0 + +rm -f $statedumpdir/glusterdump.$mount_pid.*; +cleanup diff --git a/tests/bugs/glusterfs/bug-848251.t b/tests/bugs/glusterfs/bug-848251.t new file mode 100644 index 00000000000..b44ec9d9bf2 --- /dev/null +++ b/tests/bugs/glusterfs/bug-848251.t @@ -0,0 +1,51 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +cleanup; + +TEST glusterd +TEST pidof glusterd +TEST $CLI volume info; + +TEST $CLI volume create $V0 $H0:$B0/brick1; + +TEST $CLI volume start $V0; + +#enable quota +TEST $CLI volume quota $V0 enable; + +#mount on a random dir +TEST MOUNTDIR="/tmp/$RANDOM" +TEST mkdir $MOUNTDIR +TEST glusterfs -s $H0 --volfile-id=$V0 $MOUNTDIR + +function set_quota(){ + mkdir "$MOUNTDIR/$name" + $CLI volume quota $V0 limit-usage /$name 50KB +} + +function quota_list(){ + $CLI volume quota $V0 list | grep -- /$name | awk '{print $3}' +} + +TEST name=":d1" +#file name containing ':' in the start +TEST set_quota +EXPECT "80%" quota_list + +TEST name=":d1/d:1" +#file name containing ':' in between +TEST set_quota +EXPECT "80%" quota_list + +TEST name=":d1/d:1/d1:" +#file name containing ':' in the end +TEST set_quota +EXPECT "80%" quota_list + +EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $MOUNTDIR +TEST rm -rf $MOUNTDIR + +cleanup; diff --git a/tests/bugs/glusterfs/bug-853690.t b/tests/bugs/glusterfs/bug-853690.t new file mode 100755 index 00000000000..d81be011438 --- /dev/null +++ b/tests/bugs/glusterfs/bug-853690.t @@ -0,0 +1,91 @@ +#!/bin/bash +# +# Bug 853690 - Test that short writes do not lead to corruption. +# +# Mismanagement of short writes in AFR leads to corruption and immediately +# detectable split-brain. Write a file to a replica volume using error-gen +# to cause short writes on one replica. +# +# Short writes are also possible during heal. If ignored, the files are marked +# consistent and silently differ. After reading the file, cause a lookup, wait +# for self-heal and verify that the afr xattrs do not match. +# +######## + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +cleanup; + +TEST mkdir -p $B0/test{1,2} + +# Our graph is a two brick replica with 100% frequency of short writes on one +# side of the replica. This guarantees a single write fop leads to an out-of-sync +# situation. +cat > $B0/test.vol <&1 | grep = | cut -f2 -d=` +EXPECT_NOT 0x000000000000000000000000 echo $xa + +TEST rm -f $M0/file +EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0 + +rm -f $B0/test.vol +rm -rf $B0/test1 $B0/test2 + +cleanup; + diff --git a/tests/bugs/glusterfs/bug-856455.t b/tests/bugs/glusterfs/bug-856455.t new file mode 100644 index 00000000000..25a30bfda48 --- /dev/null +++ b/tests/bugs/glusterfs/bug-856455.t @@ -0,0 +1,42 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc + +cleanup; + +BRICK_COUNT=3 + +TEST glusterd +TEST pidof glusterd + +TEST $CLI volume create $V0 $H0:$B0/${V0}0 $H0:$B0/${V0}1 $H0:$B0/${V0}2 +TEST $CLI volume start $V0 + +## Mount FUSE with caching disabled +TEST $GFS -s $H0 --volfile-id $V0 $M0; + +function query_pathinfo() +{ + local path=$1; + local retval; + + local pathinfo=$(getfattr -n trusted.glusterfs.pathinfo $path); + retval=$(echo $pathinfo | grep -o 'POSIX' | wc -l); + echo $retval +} + +TEST touch $M0/f00f; +TEST mkdir $M0/f00d; + +# verify pathinfo for a file and directory +EXPECT 1 query_pathinfo $M0/f00f; +EXPECT $BRICK_COUNT query_pathinfo $M0/f00d; + +# Kill a brick process and then query for pathinfo +# for directories pathinfo should list backend patch from available (up) subvolumes + +kill -9 `cat $GLUSTERD_WORKDIR/vols/$V0/run/$H0-d-backends-${V0}1.pid`; + +EXPECT `expr $BRICK_COUNT - 1` query_pathinfo $M0/f00d; + +cleanup; diff --git a/tests/bugs/glusterfs/bug-860297.t b/tests/bugs/glusterfs/bug-860297.t new file mode 100644 index 00000000000..c2d21553f68 --- /dev/null +++ b/tests/bugs/glusterfs/bug-860297.t @@ -0,0 +1,13 @@ +#!/bin/bash +. $(dirname $0)/../../include.rc +cleanup; + +TEST glusterd +TEST pidof glusterd +TEST $CLI volume info +TEST $CLI volume create $V0 $H0:$B0/brick1 +setfattr -x trusted.glusterfs.volume-id $B0/brick1 +## If Extended attribute trusted.glusterfs.volume-id is not present +## then volume should not be able to start +TEST ! $CLI volume start $V0; +cleanup; diff --git a/tests/bugs/glusterfs/bug-861015-index.t b/tests/bugs/glusterfs/bug-861015-index.t new file mode 100644 index 00000000000..05f3e8b1ee0 --- /dev/null +++ b/tests/bugs/glusterfs/bug-861015-index.t @@ -0,0 +1,36 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +cleanup; + +TEST glusterd +TEST pidof glusterd +TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1,2,3,4,5} +TEST $CLI volume set $V0 ensure-durability off +TEST $CLI volume start $V0 +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status +TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0 +TEST kill_brick $V0 $H0 $B0/${V0}0 +TEST kill_brick $V0 $H0 $B0/${V0}2 +TEST kill_brick $V0 $H0 $B0/${V0}4 +cd $M0 +HEAL_FILES=0 +for i in {1..10} +do + echo "abc" > $i + HEAL_FILES=$(($HEAL_FILES+1)) +done +HEAL_FILES=$(($HEAL_FILES+3)) #count brick root distribute-subvol num of times + +cd ~ +EXPECT "$HEAL_FILES" afr_get_pending_heal_count $V0 +TEST rm -f $M0/* +EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0 +TEST $CLI volume heal $V0 info +#Only root dir should be present now in the indices +EXPECT "1" afr_get_num_indices_in_brick $B0/${V0}1 +EXPECT "1" afr_get_num_indices_in_brick $B0/${V0}3 +EXPECT "1" afr_get_num_indices_in_brick $B0/${V0}5 +cleanup diff --git a/tests/bugs/glusterfs/bug-861015-log.t b/tests/bugs/glusterfs/bug-861015-log.t new file mode 100644 index 00000000000..2f3e0ad14f4 --- /dev/null +++ b/tests/bugs/glusterfs/bug-861015-log.t @@ -0,0 +1,29 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +cleanup; + +log_wd=$(gluster --print-logdir) +TEST glusterd +TEST pidof glusterd +rm -f $log_wd/glustershd.log +TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1} +TEST $CLI volume start $V0 +TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0 +TEST kill_brick $V0 $H0 $B0/${V0}0 +cd $M0 +for i in {1..10} +do + dd if=/dev/urandom of=f bs=1024k count=10 2>/dev/null +done + +cd ~ +TEST $CLI volume heal $V0 info +function count_inode_link_failures { + logfile=$1 + grep "inode link failed on the inode" $logfile | wc -l +} +EXPECT "0" count_inode_link_failures $log_wd/glustershd.log +cleanup diff --git a/tests/bugs/glusterfs/bug-866459.t b/tests/bugs/glusterfs/bug-866459.t new file mode 100644 index 00000000000..f90aa3fdc08 --- /dev/null +++ b/tests/bugs/glusterfs/bug-866459.t @@ -0,0 +1,45 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +cleanup; + + +## Start and create a volume +TEST glusterd +TEST pidof glusterd +TEST $CLI volume info; + +## Create and start a volume with aio enabled +TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2}; +TEST $CLI volume set $V0 linux-aio on +TEST $CLI volume set $V0 background-self-heal-count 0 +TEST $CLI volume set $V0 performance.stat-prefetch off; +TEST $CLI volume start $V0 + +## Mount FUSE with caching disabled +TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id $V0 $M0; + +dd of=$M0/a if=/dev/urandom bs=1024k count=1 2>&1 > /dev/null +B0_hiphenated=`echo $B0 | tr '/' '-'` +## Bring a brick down +TEST kill_brick $V0 $H0 $B0/${V0}1 +EXPECT '1' echo `pgrep glusterfsd | wc -l` +## Rewrite the file +dd of=$M0/a if=/dev/urandom bs=1024k count=1 2>&1 > /dev/null +TEST $CLI volume start $V0 force +## Wait for the brick to give CHILD_UP in client protocol +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0 +md5offile2=`md5sum $B0/${V0}2/a | awk '{print $1}'` + +##trigger self-heal +ls -l $M0/a + +EXPECT "$md5offile2" echo `md5sum $B0/${V0}1/a | awk '{print $1}'` + +## Finish up +TEST $CLI volume stop $V0; +TEST $CLI volume delete $V0; + +cleanup; diff --git a/tests/bugs/glusterfs/bug-867253.t b/tests/bugs/glusterfs/bug-867253.t new file mode 100644 index 00000000000..3df49a1bd61 --- /dev/null +++ b/tests/bugs/glusterfs/bug-867253.t @@ -0,0 +1,69 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc +. $(dirname $0)/../../nfs.rc + +# Skip the entire test if /proc/sys/vm/drop_caches does not exist +if [ ! -f /proc/sys/vm/drop_caches ] ; then + echo "Skip test using /proc/sys/vm/drop_caches, "\ + "which does not exists on this system" >&2 + SKIP_TESTS + exit 0 +fi + +cleanup; + +function file_count() +{ + val=1 + + if [ "$1" == "0" ] + then + if [ "$2" == "0" ] + then + val=0 + fi + fi + echo $val +} + +BRICK_COUNT=2 + +TEST glusterd +TEST pidof glusterd + +TEST $CLI volume create $V0 $H0:$B0/${V0}0 $H0:$B0/${V0}1 +TEST $CLI volume start $V0 + +EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available; +## Mount nfs, with nocache option +TEST mount_nfs $H0:/$V0 $M0 nolock,noac; + +touch $M0/files{1..1000}; + +# Kill a brick process +kill -9 `cat $GLUSTERD_WORKDIR/vols/$V0/run/$H0-d-backends-${V0}0.pid`; + +echo 3 >/proc/sys/vm/drop_caches; + +ls -l $M0 >/dev/null; + +NEW_FILE_COUNT=`echo $?`; + +TEST $CLI volume start $V0 force + +# Kill a brick process +kill -9 `cat $GLUSTERD_WORKDIR/vols/$V0/run/$H0-d-backends-${V0}1.pid`; + +echo 3 >/proc/sys/vm/drop_caches; + +ls -l $M0 >/dev/null; + +NEW_FILE_COUNT1=`echo $?`; + +EXPECT "0" file_count $NEW_FILE_COUNT $NEW_FILE_COUNT1 + +EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0 + +cleanup diff --git a/tests/bugs/glusterfs/bug-869724.t b/tests/bugs/glusterfs/bug-869724.t new file mode 100644 index 00000000000..ca5bb17081c --- /dev/null +++ b/tests/bugs/glusterfs/bug-869724.t @@ -0,0 +1,37 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc + +cleanup; + + +## Start and create a volume +TEST glusterd +TEST pidof glusterd + +TEST $CLI volume create $V0 $H0:$B0/${V0}1; + +## Verify volume is is created +EXPECT "$V0" volinfo_field $V0 'Volume Name'; +EXPECT 'Created' volinfo_field $V0 'Status'; + + +## Start volume and verify +TEST $CLI volume start $V0; +EXPECT 'Started' volinfo_field $V0 'Status'; + + +## Make volume tightly consistent for metdata +TEST $CLI volume set $V0 performance.stat-prefetch off; + +## Mount FUSE with caching disabled +TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id $V0 $M0; + +touch $M0/test; +build_tester $(dirname $0)/getlk_owner.c + +TEST $(dirname $0)/getlk_owner $M0/test; + +rm -f $(dirname $0)/getlk_owner +cleanup; + diff --git a/tests/bugs/glusterfs/bug-872923.t b/tests/bugs/glusterfs/bug-872923.t new file mode 100755 index 00000000000..de24117a037 --- /dev/null +++ b/tests/bugs/glusterfs/bug-872923.t @@ -0,0 +1,56 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc +. $(dirname $0)/../../nfs.rc + +cleanup; + +TEST glusterd +TEST pidof glusterd +TEST $CLI volume info +TEST $CLI volume create $V0 replica 2 $H0:$B0/brick0 $H0:$B0/brick1 +TEST $CLI volume start $V0 + +EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available; +TEST mount_nfs $H0:/$V0 $N0 nolock + +cd $N0 +mkdir test_hardlink_self_heal; +cd test_hardlink_self_heal; + +for i in `seq 1 5`; +do + mkdir dir.$i; + for j in `seq 1 10`; + do + dd if=/dev/zero of=dir.$i/file.$j bs=1k count=$j > /dev/null 2>&1; + done; +done; + +cd .. +TEST kill_brick $V0 $H0 $B0/brick0 +cd test_hardlink_self_heal; + +RET=0 +for i in `seq 1 5`; +do + for j in `seq 1 10`; + do + ln dir.$i/file.$j dir.$i/link_file.$j > /dev/null 2>&1; + RET=$? + if [ $RET -ne 0 ]; then + break; + fi + done ; + if [ $RET -ne 0 ]; then + break; + fi +done; + +cd +EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $N0 + +EXPECT "0" echo $RET; + +cleanup; diff --git a/tests/bugs/glusterfs/bug-873962-spb.t b/tests/bugs/glusterfs/bug-873962-spb.t new file mode 100644 index 00000000000..db84a223089 --- /dev/null +++ b/tests/bugs/glusterfs/bug-873962-spb.t @@ -0,0 +1,39 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +cleanup; +TEST glusterd +TEST pidof glusterd + +TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1} +TEST $CLI volume set $V0 cluster.self-heal-daemon off +TEST $CLI volume set $V0 performance.quick-read off +TEST $CLI volume set $V0 performance.io-cache off +TEST $CLI volume set $V0 performance.write-behind off +TEST $CLI volume set $V0 performance.stat-prefetch off +TEST $CLI volume set $V0 performance.read-ahead off +TEST $CLI volume set $V0 cluster.background-self-heal-count 0 +TEST $CLI volume start $V0 +TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id=$V0 $M0 --direct-io-mode=enable +touch $M0/a + +exec 5<$M0/a + +kill_brick $V0 $H0 $B0/${V0}0 +echo "hi" > $M0/a +TEST $CLI volume start $V0 force +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0 + +kill_brick $V0 $H0 $B0/${V0}1 +echo "bye" > $M0/a +TEST $CLI volume start $V0 force +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1 + +TEST ! cat $M0/a #To mark split-brain + +TEST ! read -u 5 line +exec 5<&- + +cleanup; diff --git a/tests/bugs/glusterfs/bug-873962.t b/tests/bugs/glusterfs/bug-873962.t new file mode 100755 index 00000000000..492d0285497 --- /dev/null +++ b/tests/bugs/glusterfs/bug-873962.t @@ -0,0 +1,107 @@ +#!/bin/bash + +#AFR TEST-IDENTIFIER SPLIT-BRAIN +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +cleanup; + +TEST glusterd +TEST pidof glusterd +TEST $CLI volume info; + +B0_hiphenated=`echo $B0 | tr '/' '-'` +TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2} + +# If we allow self-heal to happen in the background, we'll get spurious +# failures - especially at the point labeled "FAIL HERE" but +# occasionally elsewhere. This behavior is very timing-dependent. It +# doesn't show up in Jenkins, but it does on JD's and KP's machines, and +# it got sharply worse because of an unrelated fsync change (6ae6f3d) +# which changed timing. Putting anything at the FAIL HERE marker tends +# to make it go away most of the time on affected machines, even if the +# "anything" is unrelated. +# +# What's going on is that the I/O on the first mountpoint is allowed to +# complete even though self-heal is still in progress and the state on +# disk does not reflect its result. In fact, the state changes during +# self-heal create the appearance of split brain when the second I/O +# comes in, so that fails even though we haven't actually been in split +# brain since the manual xattr operations. By disallowing background +# self-heal, we ensure that the second I/O can't happen before self-heal +# is complete, because it has to follow the first I/O which now has to +# follow self-heal. +TEST $CLI volume set $V0 cluster.background-self-heal-count 0 + +#Make sure self-heal is not triggered when the bricks are re-started +TEST $CLI volume set $V0 cluster.self-heal-daemon off +TEST $CLI volume set $V0 performance.stat-prefetch off +TEST $CLI volume start $V0 +TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id=$V0 $M0 --direct-io-mode=enable +TEST touch $M0/a +TEST touch $M0/b +TEST touch $M0/c +TEST touch $M0/d +echo "1" > $M0/b +echo "1" > $M0/d +TEST kill_brick $V0 $H0 $B0/${V0}2 +echo "1" > $M0/a +echo "1" > $M0/c +TEST setfattr -n trusted.mdata -v abc $M0/b +TEST setfattr -n trusted.mdata -v abc $M0/d +TEST $CLI volume start $V0 force +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1 +TEST kill_brick $V0 $H0 $B0/${V0}1 +echo "2" > $M0/a +echo "2" > $M0/c +TEST setfattr -n trusted.mdata -v def $M0/b +TEST setfattr -n trusted.mdata -v def $M0/d +TEST $CLI volume start $V0 force +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0 +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1 + +TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id=$V0 $M1 --direct-io-mode=enable + +#Files are in split-brain, so open should fail +TEST ! cat $M0/a; +TEST ! cat $M1/a; +TEST cat $M0/b; +TEST cat $M1/b; + +#Reset split-brain status +TEST setfattr -n trusted.afr.$V0-client-1 -v 0x000000000000000000000000 $B0/${V0}1/a; +TEST setfattr -n trusted.afr.$V0-client-1 -v 0x000000000000000000000000 $B0/${V0}1/b; + +#The operations should do self-heal and give correct output +EXPECT "2" cat $M0/a; +# FAIL HERE - see comment about cluster.self-heal-background-count above. +EXPECT "2" cat $M1/a; +TEST dd if=$M0/b of=/dev/null bs=1024k +EXPECT "def" getfattr -n trusted.mdata --only-values $M0/b 2>/dev/null +EXPECT "def" getfattr -n trusted.mdata --only-values $M1/b 2>/dev/null + +EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0 +EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M1 + +TEST $CLI volume set $V0 cluster.data-self-heal off +TEST $CLI volume set $V0 cluster.metadata-self-heal off + +TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id=$V0 $M0 --direct-io-mode=enable +TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id=$V0 $M1 --direct-io-mode=enable + +#Files are in split-brain, so open should fail +TEST ! cat $M0/c +TEST ! cat $M1/c +TEST cat $M0/d +TEST cat $M1/d + +TEST setfattr -n trusted.afr.$V0-client-1 -v 0x000000000000000000000000 $B0/${V0}1/c +TEST setfattr -n trusted.afr.$V0-client-1 -v 0x000000000000000000000000 $B0/${V0}1/d + +#The operations should NOT do self-heal but give correct output +EXPECT "2" cat $M0/c +EXPECT "2" cat $M1/c +EXPECT "1" cat $M0/d +EXPECT "1" cat $M1/d + +cleanup; diff --git a/tests/bugs/glusterfs/bug-879490.t b/tests/bugs/glusterfs/bug-879490.t new file mode 100755 index 00000000000..7cec6713654 --- /dev/null +++ b/tests/bugs/glusterfs/bug-879490.t @@ -0,0 +1,37 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +cleanup; + +## Start and create a volume +TEST glusterd; +TEST pidof glusterd; +TEST $CLI volume info; + +TEST $CLI volume create $V0 replica 2 stripe 2 $H0:$B0/${V0}{1,2,3,4,5,6,7,8}; + +## Verify volume is is created +EXPECT "$V0" volinfo_field $V0 'Volume Name'; +EXPECT 'Created' volinfo_field $V0 'Status'; + +## Start volume and verify +TEST $CLI volume start $V0; +EXPECT 'Started' volinfo_field $V0 'Status'; + +function peer_probe() +{ + $CLI peer probe a.b.c.d --xml | xmllint --format - | grep "" +} + +EXPECT " Probe returned with unknown errno 107" peer_probe + +## Finish up +TEST $CLI volume stop $V0; +EXPECT 'Stopped' volinfo_field $V0 'Status'; + +TEST $CLI volume delete $V0; +TEST ! $CLI volume info $V0; + +cleanup; diff --git a/tests/bugs/glusterfs/bug-879494.t b/tests/bugs/glusterfs/bug-879494.t new file mode 100755 index 00000000000..06a5e5d876d --- /dev/null +++ b/tests/bugs/glusterfs/bug-879494.t @@ -0,0 +1,37 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +cleanup; + +## Start and create a volume +TEST glusterd; +TEST pidof glusterd; +TEST $CLI volume info; + +TEST $CLI volume create $V0 replica 2 stripe 2 $H0:$B0/${V0}{1,2,3,4,5,6,7,8}; + +## Verify volume is is created +EXPECT "$V0" volinfo_field $V0 'Volume Name'; +EXPECT 'Created' volinfo_field $V0 'Status'; + +## Start volume and verify +TEST $CLI volume start $V0; +EXPECT 'Started' volinfo_field $V0 'Status'; + +function peer_probe() +{ + $CLI peer detach a.b.c.d --xml | xmllint --format - | grep "" +} + +EXPECT " a.b.c.d is not part of cluster" peer_probe + +## Finish up +TEST $CLI volume stop $V0; +EXPECT 'Stopped' volinfo_field $V0 'Status'; + +TEST $CLI volume delete $V0; +TEST ! $CLI volume info $V0; + +cleanup; diff --git a/tests/bugs/glusterfs/bug-892730.t b/tests/bugs/glusterfs/bug-892730.t new file mode 100755 index 00000000000..a76961134c5 --- /dev/null +++ b/tests/bugs/glusterfs/bug-892730.t @@ -0,0 +1,77 @@ +#!/bin/bash +# +# Bug 892730 - Verify that afr handles EIO errors from the brick properly. +# +# The associated bug describes a problem where EIO errors returned from the +# local filesystem of a brick that is part of a replica volume are exposed to +# the user. This test simulates such failures and verifies that the volume +# operates as expected. +# +######## + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +cleanup; + +TEST mkdir -p $B0/test{1,2} + +# The graph is a two brick replica with error-gen enabled on the second brick +# and configured to return EIO lookup errors 100% of the time. This simulates +# a brick with a crashed or shut down local filesystem. Note that the order in +# which errors occur is a factor in reproducing the original bug (error-gen +# must be enabled in the second brick for this test to be effective). + +cat > $B0/test.vol </dev/null` + if [ $? -eq 0 ] + then + let j++ + let "BRICK${j}=$i" + + fi + let i++ + done + return $j +} + +function get_cached_brick() +{ + i=1 + while [ $i -lt 3 ] + do + test=`getfattr -n trusted.glusterfs.dht.linkto -e text $B0/${V0}$BRICK$i 2>&1` + if [ $? -eq 1 ] + then + cached=$BRICK"$i" + i=$(( $i+3 )) + fi + let i++ + done + + return $cached +} + +TEST glusterd +TEST pidof glusterd + +TEST $CLI volume create $V0 $H0:$B0/${V0}0 $H0:$B0/${V0}1 $H0:$B0/${V0}2 +TEST $CLI volume start $V0 + +## Mount FUSE +TEST glusterfs --attribute-timeout=0 --entry-timeout=0 -s $H0 --volfile-id $V0 $M0; + +## create a linkfile on subvolume 0 +TEST touch $M0/1 +TEST mv $M0/1 $M0/2 + +file_has_linkfile 2 +has_link=$? +if [ $has_link -eq 2 ] +then + get_cached_brick + CACHED=$? + # Kill a brick process + kill -9 `cat $GLUSTERD_WORKDIR/vols/$V0/run/$H0-d-backends-${V0}$CACHED.pid`; +fi + +## trigger a lookup +ls -l $M0/2 2>/dev/null + +## fail dd if file exists. + +dd if=/dev/zero of=$M0/2 bs=1 count=1 conv=excl 2>/dev/null +EXPECT "1" echo $? + +cleanup; diff --git a/tests/bugs/glusterfs/bug-895235.t b/tests/bugs/glusterfs/bug-895235.t new file mode 100644 index 00000000000..ac9caae9561 --- /dev/null +++ b/tests/bugs/glusterfs/bug-895235.t @@ -0,0 +1,23 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +cleanup; +TEST glusterd +TEST pidof glusterd + +TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1} +TEST $CLI volume set $V0 ensure-durability off +TEST $CLI volume set $V0 performance.write-behind off +TEST $CLI volume set $V0 cluster.eager-lock off +TEST $CLI volume start $V0 +TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id=$V0 $M0 --direct-io-mode=enable + +TEST gluster volume profile $V0 start +TEST dd of=$M0/a if=/dev/zero bs=1024k count=1 oflag=append +finodelk_max_latency=$($CLI volume profile $V0 info | grep FINODELK | awk 'BEGIN {max = 0} {if ($6 > max) max=$6;} END {print max}' | cut -d. -f 1 | egrep "[0-9]{7,}") + +TEST [ -z $finodelk_max_latency ] + +cleanup; diff --git a/tests/bugs/glusterfs/bug-896431.t b/tests/bugs/glusterfs/bug-896431.t new file mode 100755 index 00000000000..7764a88d896 --- /dev/null +++ b/tests/bugs/glusterfs/bug-896431.t @@ -0,0 +1,124 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +cleanup; + +## Start and create a volume +TEST glusterd; +TEST pidof glusterd; +TEST $CLI volume create $V0 replica 2 stripe 2 $H0:$B0/${V0}{1,2,3,4,5,6,7,8}; + +## Verify volume is created +EXPECT "$V0" volinfo_field $V0 'Volume Name'; +EXPECT 'Created' volinfo_field $V0 'Status'; + +## Start volume and verify +TEST $CLI volume start $V0; +EXPECT 'Started' volinfo_field $V0 'Status'; + +## Setting cluster.subvols-per-directory as -5 +TEST ! $CLI volume set $V0 cluster.subvols-per-directory -5 +EXPECT '' volinfo_field $V0 'cluster.subvols-per-directory'; +TEST ! $CLI volume set $V0 subvols-per-directory -5 +EXPECT '' volinfo_field $V0 'cluster.subvols-per-directory'; + +## Setting cluster.subvols-per-directory as 0 +TEST ! $CLI volume set $V0 cluster.subvols-per-directory 0 +EXPECT '' volinfo_field $V0 'cluster.subvols-per-directory'; +TEST ! $CLI volume set $V0 subvols-per-directory 0 +EXPECT '' volinfo_field $V0 'cluster.subvols-per-directory'; + +## Setting cluster.subvols-per-directory as 4 (the total number of bricks) +TEST ! $CLI volume set $V0 cluster.subvols-per-directory 4 +EXPECT '' volinfo_field $V0 'cluster.subvols-per-directory'; +TEST ! $CLI volume set $V0 subvols-per-directory 4 +EXPECT '' volinfo_field $V0 'cluster.subvols-per-directory'; + +## Setting cluster.subvols-per-directory as 2 (the total number of subvolumes) +TEST $CLI volume set $V0 cluster.subvols-per-directory 2 +EXPECT '2' volinfo_field $V0 'cluster.subvols-per-directory'; + +## Setting cluster.subvols-per-directory as 1 +TEST $CLI volume set $V0 subvols-per-directory 1 +EXPECT '1' volinfo_field $V0 'cluster.subvols-per-directory'; + +## Finish up +TEST $CLI volume stop $V0; +EXPECT 'Stopped' volinfo_field $V0 'Status'; + +TEST $CLI volume delete $V0; +TEST ! $CLI volume info $V0; + +cleanup; + +## Start and create a pure replicate volume +TEST glusterd; +TEST pidof glusterd; +TEST $CLI volume create $V0 replica 8 $H0:$B0/${V0}{1,2,3,4,5,6,7,8}; + +## Verify volume is created +EXPECT "$V0" volinfo_field $V0 'Volume Name'; +EXPECT 'Created' volinfo_field $V0 'Status'; +EXPECT 'Replicate' volinfo_field $V0 'Type'; + +## Start volume and verify +TEST $CLI volume start $V0; +EXPECT 'Started' volinfo_field $V0 'Status'; + +## Setting cluster.subvols-per-directory as 8 for a replicate volume +TEST ! $CLI volume set $V0 cluster.subvols-per-directory 8 +EXPECT '' volinfo_field $V0 'cluster.subvols-per-directory'; +TEST ! $CLI volume set $V0 subvols-per-directory 8 +EXPECT '' volinfo_field $V0 'cluster.subvols-per-directory'; + +## Setting cluster.subvols-per-directory as 1 for a replicate volume +TEST $CLI volume set $V0 cluster.subvols-per-directory 1 +EXPECT '1' volinfo_field $V0 'cluster.subvols-per-directory'; +TEST $CLI volume set $V0 subvols-per-directory 1 +EXPECT '1' volinfo_field $V0 'cluster.subvols-per-directory'; + +## Finish up +TEST $CLI volume stop $V0; +EXPECT 'Stopped' volinfo_field $V0 'Status'; + +TEST $CLI volume delete $V0; +TEST ! $CLI volume info $V0; + +cleanup; + +## Start and create a pure stripe volume +TEST glusterd; +TEST pidof glusterd; +TEST $CLI volume create $V0 stripe 8 $H0:$B0/${V0}{1,2,3,4,5,6,7,8}; + +## Verify volume is created +EXPECT "$V0" volinfo_field $V0 'Volume Name'; +EXPECT 'Created' volinfo_field $V0 'Status'; +EXPECT 'Stripe' volinfo_field $V0 'Type'; + +## Start volume and verify +TEST $CLI volume start $V0; +EXPECT 'Started' volinfo_field $V0 'Status'; + +## Setting cluster.subvols-per-directory as 8 for a stripe volume +TEST ! $CLI volume set $V0 cluster.subvols-per-directory 8 +EXPECT '' volinfo_field $V0 'cluster.subvols-per-directory'; +TEST ! $CLI volume set $V0 subvols-per-directory 8 +EXPECT '' volinfo_field $V0 'cluster.subvols-per-directory'; + +## Setting cluster.subvols-per-directory as 1 for a stripe volume +TEST $CLI volume set $V0 cluster.subvols-per-directory 1 +EXPECT '1' volinfo_field $V0 'cluster.subvols-per-directory'; +TEST $CLI volume set $V0 subvols-per-directory 1 +EXPECT '1' volinfo_field $V0 'cluster.subvols-per-directory'; + +## Finish up +TEST $CLI volume stop $V0; +EXPECT 'Stopped' volinfo_field $V0 'Status'; + +TEST $CLI volume delete $V0; +TEST ! $CLI volume info $V0; + +cleanup; diff --git a/tests/bugs/glusterfs/bug-902610.t b/tests/bugs/glusterfs/bug-902610.t new file mode 100755 index 00000000000..656bf50137e --- /dev/null +++ b/tests/bugs/glusterfs/bug-902610.t @@ -0,0 +1,65 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc + +cleanup; + +## Layout-spread set to 3, but subvols up are 2. So layout should split 50-50 +function get_layout() +{ + layout1=`getfattr -n trusted.glusterfs.dht -e hex $1 2>&1|grep dht |cut -d = -f2` + layout1_s=$(echo $layout1 | cut -c 19-26) + layout1_e=$(echo $layout1 | cut -c 27-34) + #echo "layout1 from $layout1_s to $layout1_e" > /dev/tty + layout2=`getfattr -n trusted.glusterfs.dht -e hex $2 2>&1|grep dht |cut -d = -f2` + layout2_s=$(echo $layout2 | cut -c 19-26) + layout2_e=$(echo $layout2 | cut -c 27-34) + #echo "layout2 from $layout2_s to $layout2_e" > /dev/tty + + if [ x"$layout2_s" = x"00000000" ]; then + # Reverse so we only have the real logic in one place. + tmp_s=$layout1_s + tmp_e=$layout1_e + layout1_s=$layout2_s + layout1_e=$layout2_e + layout2_s=$tmp_s + layout2_e=$tmp_e + fi + + # Figure out where the join point is. + target=$( $PYTHON -c "print '%08x' % (0x$layout1_e + 1)") + #echo "target for layout2 = $target" > /dev/tty + + # The second layout should cover everything that the first doesn't. + if [ x"$layout2_s" = x"$target" -a x"$layout2_e" = x"ffffffff" ]; then + return 0 + fi + + return 1 +} + +BRICK_COUNT=4 + +TEST glusterd +TEST pidof glusterd + +TEST $CLI volume create $V0 $H0:$B0/${V0}0 $H0:$B0/${V0}1 $H0:$B0/${V0}2 $H0:$B0/${V0}3 +## set subvols-per-dir option +TEST $CLI volume set $V0 subvols-per-directory 3 +TEST $CLI volume start $V0 + +## Mount FUSE +TEST glusterfs -s $H0 --volfile-id $V0 $M0 --entry-timeout=0 --attribute-timeout=0; + +TEST ls -l $M0 + +## kill 2 bricks to bring down available subvol < spread count +kill -9 `cat $GLUSTERD_WORKDIR/vols/$V0/run/$H0-d-backends-${V0}2.pid`; +kill -9 `cat $GLUSTERD_WORKDIR/vols/$V0/run/$H0-d-backends-${V0}3.pid`; + +mkdir $M0/dir1 2>/dev/null + +get_layout $B0/${V0}0/dir1 $B0/${V0}1/dir1 +EXPECT "0" echo $? + +cleanup; diff --git a/tests/bugs/glusterfs/bug-906646.t b/tests/bugs/glusterfs/bug-906646.t new file mode 100644 index 00000000000..45c85d9f67c --- /dev/null +++ b/tests/bugs/glusterfs/bug-906646.t @@ -0,0 +1,93 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +cleanup; + +REPLICA=2 + +TEST glusterd +TEST pidof glusterd + +TEST $CLI volume create $V0 replica $REPLICA $H0:$B0/${V0}-00 $H0:$B0/${V0}-01 $H0:$B0/${V0}-10 $H0:$B0/${V0}-11 +TEST $CLI volume start $V0 + +TEST $CLI volume set $V0 cluster.self-heal-daemon off +TEST $CLI volume set $V0 cluster.background-self-heal-count 0 + +## Mount FUSE with caching disabled +TEST $GFS -s $H0 --volfile-id $V0 $M0; + +function xattr_query_check() +{ + local path=$1 + local xa_name=$2 + + local ret=$(getfattr -n $xa_name $path 2>&1 | grep -o "$xa_name: No such attribute" | wc -l) + echo $ret +} + +function set_xattr() +{ + local path=$1 + local xa_name=$2 + local xa_val=$3 + + setfattr -n $xa_name -v $xa_val $path + echo $? +} + +function remove_xattr() +{ + local path=$1 + local xa_name=$2 + + setfattr -x $xa_name $path + echo $? +} + +f=f00f +pth=$M0/$f + +TEST touch $pth + +# fetch backend paths +backend_paths=`get_backend_paths $pth` + +# convert it into and array +backend_paths_array=($backend_paths) + +# setxattr xattr for this file +EXPECT 0 set_xattr $pth "trusted.name" "test" + +# confirm the set on backend +EXPECT 0 xattr_query_check ${backend_paths_array[0]} "trusted.name" +EXPECT 0 xattr_query_check ${backend_paths_array[1]} "trusted.name" + +brick_path=`echo ${backend_paths_array[0]} | sed -n 's/\(.*\)\/'$f'/\1/p'` +brick_id=`$CLI volume info $V0 | grep "Brick[[:digit:]]" | grep -n $brick_path | cut -f1 -d:` + +# Kill a brick process +TEST kill_brick $V0 $H0 $brick_path + +# remove the xattr from the mount point +EXPECT 0 remove_xattr $pth "trusted.name" + +# we killed ${backend_paths[0]} - so expect the xattr to be there +# on the backend there +EXPECT 0 xattr_query_check ${backend_paths_array[0]} "trusted.name" +EXPECT 1 xattr_query_check ${backend_paths_array[1]} "trusted.name" + +# restart the brick process +TEST $CLI volume start $V0 force + +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 `expr $brick_id - 1` + +cat $pth >/dev/null + +# check backends - xattr should not be present anywhere +EXPECT 1 xattr_query_check ${backend_paths_array[0]} "trusted.name" +EXPECT 1 xattr_query_check ${backend_paths_array[1]} "trusted.name" + +cleanup; diff --git a/tests/bugs/glusterfs/getlk_owner.c b/tests/bugs/glusterfs/getlk_owner.c new file mode 100644 index 00000000000..85fd1042496 --- /dev/null +++ b/tests/bugs/glusterfs/getlk_owner.c @@ -0,0 +1,120 @@ +#include +#include +#include +#include + +#define GETLK_OWNER_CHECK(f, cp, label) \ + do { \ + switch (f.l_type) { \ + case F_RDLCK: \ + case F_WRLCK: \ + ret = 1; \ + goto label; \ + case F_UNLCK: \ + if (!are_flocks_sane (&f, &cp)) { \ + ret = 1; \ + goto label; \ + } \ + break; \ + } \ + } while (0) + +void +flock_init (struct flock *f, short int type, off_t start, off_t len) +{ + f->l_type = type; + f->l_start = start; + f->l_len = len; +} + +int +flock_cp (struct flock *dst, struct flock *src) +{ + memcpy ((void *) dst, (void *) src, sizeof (struct flock)); +} + +int +are_flocks_sane (struct flock *src, struct flock *cpy) +{ + return ((src->l_whence == cpy->l_whence) && + (src->l_start == cpy->l_start) && + (src->l_len == cpy->l_len)); +} + +/* + * Test description: + * SETLK (0,3), F_WRLCK + * SETLK (3,3), F_WRLCK + * + * the following GETLK requests must return flock struct unmodified + * except for l_type to F_UNLCK + * GETLK (3,3), F_WRLCK + * GETLK (3,3), F_RDLCK + * + * */ + +int main (int argc, char **argv) +{ + int fd = -1; + int ret = 1; + char *fname = NULL; + struct flock f = {0,}; + struct flock cp = {0,}; + + if (argc < 2) + goto out; + + fname = argv[1]; + fd = open (fname, O_RDWR); + if (fd == -1) { + perror ("open"); + goto out; + } + + flock_init (&f, F_WRLCK, 0, 3); + flock_cp (&cp, &f); + ret = fcntl (fd, F_SETLK, &f); + if (ret) { + perror ("fcntl"); + goto out; + } + if (!are_flocks_sane (&f, &cp)) { + ret = 1; + goto out; + } + + flock_init (&f, F_WRLCK, 3, 3); + flock_cp (&cp, &f); + ret = fcntl (fd, F_SETLK, &f); + if (ret) { + perror ("fcntl"); + goto out; + } + if (!are_flocks_sane (&f, &cp)) { + ret = 1; + goto out; + } + + flock_init (&f, F_WRLCK, 3, 3); + flock_cp (&cp, &f); + ret = fcntl (fd, F_GETLK, &f); + if (ret) { + perror ("fcntl"); + return 1; + } + GETLK_OWNER_CHECK (f, cp, out); + + flock_init (&f, F_RDLCK, 3, 3); + flock_cp (&cp, &f); + ret = fcntl (fd, F_GETLK, &f); + if (ret) { + perror ("fcntl"); + return 1; + } + GETLK_OWNER_CHECK (f, cp, out); + +out: + if (fd != -1) + close (fd); + return ret; +} diff --git a/tests/bugs/io-cache/bug-858242.c b/tests/bugs/io-cache/bug-858242.c new file mode 100644 index 00000000000..ecdda2a5d23 --- /dev/null +++ b/tests/bugs/io-cache/bug-858242.c @@ -0,0 +1,81 @@ +#include +#include +#include +#include +#include +#include +#include +#include + +#ifndef linux +#define fstat64(fd, st) fstat(fd, st) +#endif + +int +main (int argc, char *argv[]) +{ + char *filename = NULL, *volname = NULL, *cmd = NULL; + char buffer[1024] = {0, }; + int fd = -1; + int ret = -1; + struct stat statbuf = {0, }; + + if (argc != 3) { + fprintf (stderr, "usage: %s \n", argv[0]); + goto out; + } + + filename = argv[1]; + volname = argv[2]; + + fd = open (filename, O_RDWR | O_CREAT, 0); + if (fd < 0) { + fprintf (stderr, "open (%s) failed (%s)\n", filename, + strerror (errno)); + goto out; + } + + ret = write (fd, "test-content", 12); + if (ret < 0) { + fprintf (stderr, "write failed (%s)", strerror (errno)); + goto out; + } + + ret = fsync (fd); + if (ret < 0) { + fprintf (stderr, "fsync failed (%s)", strerror (errno)); + goto out; + } + + ret = fstat64 (fd, &statbuf); + if (ret < 0) { + fprintf (stderr, "fstat64 failed (%s)", strerror (errno)); + goto out; + } + + ret = asprintf (&cmd, "gluster --mode=script volume stop %s force", + volname); + if (ret < 0) { + fprintf (stderr, "cannot construct cli command string (%s)", + strerror (errno)); + goto out; + } + + ret = system (cmd); + if (ret < 0) { + fprintf (stderr, "stopping volume (%s) failed", volname); + goto out; + } + + ret = read (fd, buffer, 1024); + if (ret >= 0) { + fprintf (stderr, "read should've returned error, " + "but is successful\n"); + ret = -1; + goto out; + } + + ret = 0; +out: + return ret; +} diff --git a/tests/bugs/io-cache/bug-858242.t b/tests/bugs/io-cache/bug-858242.t new file mode 100755 index 00000000000..0c8ffb6ab30 --- /dev/null +++ b/tests/bugs/io-cache/bug-858242.t @@ -0,0 +1,28 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc + +cleanup; + +TEST glusterd +TEST pidof glusterd +TEST $CLI volume info; + +TEST $CLI volume create $V0 $H0:$B0/brick1; +EXPECT 'Created' volinfo_field $V0 'Status'; + +TEST $CLI volume start $V0; +EXPECT 'Started' volinfo_field $V0 'Status'; + +TEST $CLI volume set $V0 performance.quick-read off + +#mount on a random dir +TEST glusterfs --entry-timeout=3600 --attribute-timeout=3600 -s $H0 --volfile-id=$V0 $M0 --direct-io-mode=yes + +build_tester $(dirname $0)/bug-858242.c + +TEST $(dirname $0)/bug-858242 $M0/testfile $V0 + +TEST rm -rf $(dirname $0)/858242 +cleanup; + diff --git a/tests/bugs/libgfapi/bug-1032894.t b/tests/bugs/libgfapi/bug-1032894.t new file mode 100644 index 00000000000..88d110136e2 --- /dev/null +++ b/tests/bugs/libgfapi/bug-1032894.t @@ -0,0 +1,33 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +#Check stale indices are deleted as part of self-heal-daemon crawl. +cleanup; + +TEST glusterd +TEST pidof glusterd +TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1} +TEST $CLI volume set $V0 cluster.self-heal-daemon off +TEST $CLI volume start $V0 +TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 +cd $M0 +TEST mkdir a +cd a +TEST kill_brick $V0 $H0 $B0/${V0}0 +# Create stale indices +for i in {1..10}; do echo abc > $i; done +for i in {1..10}; do rm -f $i; done + +TEST $CLI volume start $V0 force +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0 +TEST $CLI volume set $V0 cluster.self-heal-daemon on +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status + +#Since maximum depth of the directory structure that needs healin is 2 +#Trigger two self-heals. That should make sure the heal is complete +TEST $CLI volume heal $V0 + +EXPECT_WITHIN $HEAL_TIMEOUT "0" afr_get_index_count $B0/${V0}1 +cleanup diff --git a/tests/bugs/logging/bug-823081.t b/tests/bugs/logging/bug-823081.t new file mode 100755 index 00000000000..0ed8f4c26c1 --- /dev/null +++ b/tests/bugs/logging/bug-823081.t @@ -0,0 +1,41 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc + +cleanup; +cmd_log_history="cmd_history.log" +V1=patchy2 + +TEST glusterd +TEST pidof glusterd + +logdir=`gluster --print-logdir` +function set_tail () +{ + vol=$1; + tail_success="volume create $vol $H0:$B0/${vol}1 $H0:$B0/${vol}2 : SUCCESS" + tail_failure="volume create $vol $H0:$B0/${vol}1 $H0:$B0/${vol}2 : FAILED : Volume $vol already exists" + tail_success_force="volume create $vol $H0:$B0/${vol}1 $H0:$B0/${vol}2 force : SUCCESS" + tail_failure_force="volume create $vol $H0:$B0/${vol}1 $H0:$B0/${vol}2 force : FAILED : Volume $vol already exists" +} + +set_tail $V0; + +TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2}; +tail=`tail -n 1 $logdir/$cmd_log_history | cut -d " " -f 5-` +TEST [[ \"$tail\" == \"$tail_success\" ]] + +TEST ! $CLI volume create $V0 $H0:$B0/${V0}{1,2}; +tail=`tail -n 1 $logdir/$cmd_log_history | cut -d " " -f 5-` +TEST [[ \"$tail\" == \"$tail_failure\" ]] + +set_tail $V1; +TEST gluster volume create $V1 $H0:$B0/${V1}{1,2} force; +tail=`tail -n 1 $logdir/$cmd_log_history | cut -d " " -f 5-` +TEST [[ \"$tail\" == \"$tail_success_force\" ]] + +TEST ! gluster volume create $V1 $H0:$B0/${V1}{1,2} force; +tail=`tail -n 1 $logdir/$cmd_log_history | cut -d " " -f 5-` +TEST [[ \"$tail\" == \"$tail_failure_force\" ]] + +cleanup; diff --git a/tests/bugs/nfs/bug-1053579.t b/tests/bugs/nfs/bug-1053579.t new file mode 100755 index 00000000000..81e786d2ec6 --- /dev/null +++ b/tests/bugs/nfs/bug-1053579.t @@ -0,0 +1,111 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc +. $(dirname $0)/../../nfs.rc + +cleanup + +# prepare the users and groups +NEW_USER=bug1053579 +NEW_UID=1053579 +NEW_GID=1053579 +LAST_GID=1053779 +NEW_GIDS=${NEW_GID} + +# OS-specific overrides +case $OSTYPE in +NetBSD|Darwin) + # only NGROUPS_MAX=16 secondary groups are supported + LAST_GID=1053593 + ;; +FreeBSD) + # NGROUPS_MAX=1023 (FreeBSD>=8.0), we can afford 200 groups + ;; +Linux) + # NGROUPS_MAX=65536, we can afford 200 groups + ;; +*) + ;; +esac + +# create a user that belongs to many groups +for GID in $(seq -f '%6.0f' ${NEW_GID} ${LAST_GID}) +do + groupadd -o -g ${GID} ${NEW_USER}-${GID} + NEW_GIDS="${NEW_GIDS},${NEW_USER}-${GID}" +done +TEST useradd -o -M -u ${NEW_UID} -g ${NEW_GID} -G ${NEW_USER}-${NEW_GIDS} ${NEW_USER} + +# preparation done, start the tests + +TEST glusterd +TEST pidof glusterd +TEST $CLI volume create $V0 $H0:$B0/${V0}1 +TEST $CLI volume set $V0 nfs.server-aux-gids on +TEST $CLI volume start $V0 + +EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available + +# mount the volume +TEST mount_nfs $H0:/$V0 $N0 nolock +TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 + +# the actual test, this used to crash +su -m ${NEW_USER} -c "stat $N0/. > /dev/null" +TEST [ $? -eq 0 ] + +# create a file that only a user in a high-group can access +echo 'Hello World!' > $N0/README +chgrp ${LAST_GID} $N0/README +chmod 0640 $N0/README + +#su -m ${NEW_USER} -c "cat $N0/README 2>&1 > /dev/null" +su -m ${NEW_USER} -c "cat $N0/README" +ret=$? + +case $OSTYPE in +Linux) # Linux NFS fails with big GID + if [ $ret -ne 0 ] ; then + res="Y" + else + res="N" + fi + ;; +*) # Other systems should cope better + if [ $ret -eq 0 ] ; then + res="Y" + else + res="N" + fi + ;; +esac +TEST [ "x$res" = "xY" ] + +# This passes only on build.gluster.org, not reproducible on other machines?! +#su -m ${NEW_USER} -c "cat $M0/README 2>&1 > /dev/null" +#TEST [ $? -ne 0 ] + +# enable server.manage-gids and things should work +TEST $CLI volume set $V0 server.manage-gids on + +su -m ${NEW_USER} -c "cat $N0/README 2>&1 > /dev/null" +TEST [ $? -eq 0 ] +su -m ${NEW_USER} -c "cat $M0/README 2>&1 > /dev/null" +TEST [ $? -eq 0 ] + +# cleanup +userdel --force ${NEW_USER} +for GID in $(seq -f '%6.0f' ${NEW_GID} ${LAST_GID}) +do + groupdel ${NEW_USER}-${GID} +done + +rm -f $N0/README +EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $N0 +EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0 + +TEST $CLI volume stop $V0 +TEST $CLI volume delete $V0 + +cleanup diff --git a/tests/bugs/nfs/bug-1116503.t b/tests/bugs/nfs/bug-1116503.t new file mode 100644 index 00000000000..5527f768b45 --- /dev/null +++ b/tests/bugs/nfs/bug-1116503.t @@ -0,0 +1,45 @@ +#!/bin/bash +# +# Verify that mounting NFS over UDP (MOUNT service only) works. +# + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc +. $(dirname $0)/../../nfs.rc + + +cleanup; +TEST glusterd +TEST pidof glusterd + +TEST $CLI volume create $V0 $H0:$B0/$V0 +TEST $CLI volume set $V0 nfs.mount-udp on + +TEST $CLI volume start $V0 +EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available; + +TEST mount_nfs $H0:/$V0 $N0 nolock,mountproto=udp,proto=tcp; +TEST mkdir -p $N0/foo/bar +TEST ls $N0/foo +TEST ls $N0/foo/bar +EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $N0 + +EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available; +TEST mount_nfs $H0:/$V0/foo $N0 nolock,mountproto=udp,proto=tcp; +EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $N0 + +EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available; +TEST mount_nfs $H0:/$V0/foo/bar $N0 nolock,mountproto=udp,proto=tcp; +EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $N0 + +TEST $CLI volume set $V0 nfs.addr-namelookup on +TEST $CLI volume set $V0 nfs.rpc-auth-allow $H0 +EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available; +TEST mount_nfs $H0:/$V0/foo/bar $N0 nolock,mountproto=udp,proto=tcp; +EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $N0 + +TEST $CLI volume set $V0 nfs.rpc-auth-reject $H0 +EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available; +TEST ! mount_nfs $H0:/$V0/foo/bar $N0 nolock,mountproto=udp,proto=tcp; + +cleanup; diff --git a/tests/bugs/nfs/bug-1157223-symlink-mounting.t b/tests/bugs/nfs/bug-1157223-symlink-mounting.t new file mode 100644 index 00000000000..469b221f5b3 --- /dev/null +++ b/tests/bugs/nfs/bug-1157223-symlink-mounting.t @@ -0,0 +1,124 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../nfs.rc + +cleanup; + +## Start and create a volume +TEST glusterd +TEST pidof glusterd + +TEST $CLI volume info; +TEST $CLI volume create $V0 $H0:$B0/$V0 + +TEST $CLI volume start $V0; + +## Wait for volume to register with rpc.mountd +EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available; + +## Mount NFS +TEST mount_nfs $H0:/$V0 $N0 nolock; + +mkdir $N0/dir1; +mkdir $N0/dir2; +pushd $N0/ ; + +##link created using relative path +ln -s dir1 symlink1; + +##relative path contains ".." +ln -s ../dir1 dir2/symlink2; + +##link created using absolute path +ln -s $N0/dir1 symlink3; + +##link pointing to another symlinks +ln -s symlink1 symlink4 +ln -s symlink3 symlink5 + +##dead links +ln -s does/not/exist symlink6 + +##link which contains ".." points out of glusterfs +ln -s ../../ symlink7 + +##links pointing to unauthorized area +ln -s .glusterfs symlink8 + +popd ; + +##Umount the volume +EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" umount_nfs $N0 + +## Mount and umount NFS via directory +TEST mount_nfs $H0:/$V0/dir1 $N0 nolock; +EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" umount_nfs $N0 + +## Mount and umount NFS via symlink1 +TEST mount_nfs $H0:/$V0/symlink1 $N0 nolock; +EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" umount_nfs $N0 + +## Mount and umount NFS via symlink2 +TEST mount_nfs $H0:/$V0/dir2/symlink2 $N0 nolock; +EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" umount_nfs $N0 + +## Mount NFS via symlink3 should fail +TEST ! mount_nfs $H0:/$V0/symlink3 $N0 nolock; + +## Mount and umount NFS via symlink4 +TEST mount_nfs $H0:/$V0/symlink4 $N0 nolock; +EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" umount_nfs $N0 + +## Mount NFS via symlink5 should fail +TEST ! mount_nfs $H0:/$V0/symlink5 $N0 nolock; + +## Mount NFS via symlink6 should fail +TEST ! mount_nfs $H0:/$V0/symlink6 $N0 nolock; + +## Mount NFS via symlink7 should fail +TEST ! mount_nfs $H0:/$V0/symlink7 $N0 nolock; + +## Mount NFS via symlink8 should fail +TEST ! mount_nfs $H0:/$V0/symlink8 $N0 nolock; + +##Similar check for udp mount +$CLI volume stop $V0 +TEST $CLI volume set $V0 nfs.mount-udp on +$CLI volume start $V0 + +## Wait for volume to register with rpc.mountd +EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available; + +## Mount and umount NFS via directory +TEST mount_nfs $H0:/$V0/dir1 $N0 nolock,mountproto=udp,proto=tcp; +EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" umount_nfs $N0 + +## Mount and umount NFS via symlink1 +TEST mount_nfs $H0:/$V0/symlink1 $N0 nolock,mountproto=udp,proto=tcp; +EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" umount_nfs $N0 + +## Mount and umount NFS via symlink2 +TEST mount_nfs $H0:/$V0/dir2/symlink2 $N0 nolock,mountproto=udp,proto=tcp; +EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" umount_nfs $N0 + +## Mount NFS via symlink3 should fail +TEST ! mount_nfs $H0:/$V0/symlink3 $N0 nolock,mountproto=udp,proto=tcp; + +## Mount and umount NFS via symlink4 +TEST mount_nfs $H0:/$V0/symlink4 $N0 nolock,mountproto=udp,proto=tcp; +EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" umount_nfs $N0 + +## Mount NFS via symlink5 should fail +TEST ! mount_nfs $H0:/$V0/symlink5 $N0 nolock,mountproto=udp,proto=tcp; + +## Mount NFS via symlink6 should fail +TEST ! mount_nfs $H0:/$V0/symlink6 $N0 nolock,mountproto=udp,proto=tcp; + +##symlink7 is not check here, because in udp mount ../../ resolves into root '/' + +## Mount NFS via symlink8 should fail +TEST ! mount_nfs $H0:/$V0/symlink8 $N0 nolock,mountproto=udp,proto=tcp; + +rm -rf $H0:$B0/ +cleanup; diff --git a/tests/bugs/nfs/bug-1161092-nfs-acls.t b/tests/bugs/nfs/bug-1161092-nfs-acls.t new file mode 100644 index 00000000000..ed7761db3e3 --- /dev/null +++ b/tests/bugs/nfs/bug-1161092-nfs-acls.t @@ -0,0 +1,36 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../nfs.rc + +cleanup; + +TEST glusterd +TEST pidof glusterd +TEST $CLI volume info + +TEST $CLI volume create $V0 $H0:$B0/brick1; +EXPECT 'Created' volinfo_field $V0 'Status'; + +TEST $CLI volume start $V0; +EXPECT 'Started' volinfo_field $V0 'Status'; + +EXPECT_WITHIN $NFS_EXPORT_TIMEOUT 1 is_nfs_export_available +TEST mount_nfs $H0:/$V0 $N0 + +TEST touch $N0/file1 +TEST chmod 700 $N0/file1 +TEST getfacl $N0/file1 + +TEST $CLI volume set $V0 root-squash on +TEST getfacl $N0/file1 + +TEST umount_nfs $H0:/$V0 $N0 +TEST mount_nfs $H0:/$V0 $N0 +TEST getfacl $N0/file1 + +## Before killing daemon to avoid deadlocks +umount_nfs $N0 + +cleanup; + diff --git a/tests/bugs/nfs/bug-847622.t b/tests/bugs/nfs/bug-847622.t new file mode 100755 index 00000000000..22167e87e50 --- /dev/null +++ b/tests/bugs/nfs/bug-847622.t @@ -0,0 +1,36 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../nfs.rc +. $(dirname $0)/../../volume.rc + +case $OSTYPE in +NetBSD) + echo "Skip test on ACL which are not available on NetBSD" >&2 + SKIP_TESTS + exit 0 + ;; +*) + ;; +esac + +cleanup; + +TEST glusterd +TEST pidof glusterd +TEST $CLI volume create $V0 $H0:$B0/brick0 +TEST $CLI volume start $V0 + +EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available; +TEST mount_nfs $H0:/$V0 $N0 nolock +cd $N0 + +# simple getfacl setfacl commands +TEST touch testfile +TEST setfacl -m u:14:r testfile +TEST getfacl testfile + +cd +EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $N0 +cleanup + diff --git a/tests/bugs/nfs/bug-877885.t b/tests/bugs/nfs/bug-877885.t new file mode 100755 index 00000000000..47eb396b532 --- /dev/null +++ b/tests/bugs/nfs/bug-877885.t @@ -0,0 +1,36 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../nfs.rc +. $(dirname $0)/../../volume.rc + +cleanup; + +TEST glusterd +TEST pidof glusterd +TEST $CLI volume create $V0 replica 2 $H0:$B0/brick0 $H0:$B0/brick1 +TEST $CLI volume start $V0 + +## Mount FUSE with caching disabled +TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id $V0 \ +$M0; + +TEST touch $M0/file +TEST mkdir $M0/dir + +EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available; +TEST mount_nfs $H0:/$V0 $N0 nolock +cd $N0 + +rm -rf * & + +TEST mount_nfs $H0:/$V0 $N1 retry=0,nolock; + +cd; + +kill %1; + +EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $N0 +EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $N1 + +cleanup diff --git a/tests/bugs/nfs/bug-904065.t b/tests/bugs/nfs/bug-904065.t new file mode 100755 index 00000000000..ff6797bcbec --- /dev/null +++ b/tests/bugs/nfs/bug-904065.t @@ -0,0 +1,91 @@ +#!/bin/bash +# +# This test does not use 'showmount' from the nfs-utils package, it would +# require setting up a portmapper (either rpcbind or portmap, depending on the +# Linux distribution used for testing). The persistancy of the rmtab should not +# affect the current showmount outputs, so existing regression tests should be +# sufficient. +# + +# count the lines of a file, return 0 if the file does not exist +function count_lines() +{ + if [ -e "$1" ] + then + wc -l < $1 + else + echo 0 + fi +} + + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../nfs.rc +. $(dirname $0)/../../volume.rc + +cleanup + +TEST glusterd +TEST pidof glusterd + +TEST $CLI volume create $V0 $H0:$B0/brick1 +EXPECT 'Created' volinfo_field $V0 'Status' + +TEST $CLI volume start $V0; +EXPECT 'Started' volinfo_field $V0 'Status' + +# glusterfs/nfs needs some time to start up in the background +EXPECT_WITHIN $NFS_EXPORT_TIMEOUT 1 is_nfs_export_available + +# before mounting the rmtab should be empty +EXPECT '0' count_lines $GLUSTERD_WORKDIR/nfs/rmtab + +TEST mount_nfs $H0:/$V0 $N0 nolock +# the output would looks similar to: +# +# hostname-0=172.31.122.104 +# mountpoint-0=/ufo +# +EXPECT '2' count_lines $GLUSTERD_WORKDIR/nfs/rmtab + +# duplicate mounts should not be recorded (client could have crashed) +TEST mount_nfs $H0:/$V0 $N1 nolock +EXPECT '2' count_lines $GLUSTERD_WORKDIR/nfs/rmtab + +# removing a mount should (even if there are two) should remove the entry +EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $N1 +EXPECT '0' count_lines $GLUSTERD_WORKDIR/nfs/rmtab + +# unmounting the other mount should work flawlessly +EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $N0 +EXPECT '0' count_lines $GLUSTERD_WORKDIR/nfs/rmtab + +TEST glusterfs --entry-timeout=0 --attribute-timeout=0 --volfile-server=$H0 --volfile-id=$V0 $M0 + +# we'll create a fake rmtab here, similar to how an other storage server would do +# using an invalid IP address to prevent (unlikely) collisions on the test-machine +cat << EOF > $M0/rmtab +hostname-0=127.0.0.256 +mountpoint-0=/ufo +EOF +EXPECT '2' count_lines $M0/rmtab + +# reconfigure merges the rmtab with the one on the volume +TEST gluster volume set $V0 nfs.mount-rmtab $M0/rmtab + +# glusterfs/nfs needs some time to restart +EXPECT_WITHIN $NFS_EXPORT_TIMEOUT 1 is_nfs_export_available + +# a new mount should be added to the rmtab, not overwrite exiting ones +TEST mount_nfs $H0:/$V0 $N0 nolock +EXPECT '4' count_lines $M0/rmtab + +EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $N0 +EXPECT '2' count_lines $M0/rmtab + +# TODO: nfs/reconfigure() is never called and is therefor disabled. When the +# NFS-server supports reloading and does not get restarted anymore, we should +# add a test that includes the merging of entries in the old rmtab with the new +# rmtab. + +cleanup diff --git a/tests/bugs/nfs/bug-915280.t b/tests/bugs/nfs/bug-915280.t new file mode 100755 index 00000000000..72bdf2c0d9c --- /dev/null +++ b/tests/bugs/nfs/bug-915280.t @@ -0,0 +1,51 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc +. $(dirname $0)/../../nfs.rc + +cleanup; + +TEST glusterd +TEST pidof glusterd + +function volinfo_field() +{ + local vol=$1; + local field=$2; + + $CLI volume info $vol | grep "^$field: " | sed 's/.*: //'; +} + +TEST $CLI volume create $V0 $H0:$B0/brick1 $H0:$B0/brick2; +EXPECT 'Created' volinfo_field $V0 'Status'; + +TEST $CLI volume start $V0; +EXPECT 'Started' volinfo_field $V0 'Status'; + +MOUNTDIR=$N0; +EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available; +TEST mount_nfs $H0:/$V0 $N0 nolock,timeo=30,retrans=1 +TEST touch $N0/testfile + +TEST $CLI volume set $V0 debug.error-gen client +TEST $CLI volume set $V0 debug.error-fops stat +TEST $CLI volume set $V0 debug.error-failure 100 + +EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available; + +pid_file=$(read_nfs_pidfile); + +getfacl $N0/testfile 2>/dev/null + +nfs_pid=$(get_nfs_pid); +if [ ! $nfs_pid ] +then + nfs_pid=0; +fi + +TEST [ $nfs_pid -eq $pid_file ] + +EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $MOUNTDIR + +cleanup; diff --git a/tests/bugs/nfs/bug-970070.t b/tests/bugs/nfs/bug-970070.t new file mode 100755 index 00000000000..61be4844e51 --- /dev/null +++ b/tests/bugs/nfs/bug-970070.t @@ -0,0 +1,13 @@ +#!/bin/bash +# TEST the nfs.acl option +. $(dirname $0)/../../include.rc + +cleanup +TEST glusterd +TEST pidof glusterd + +TEST $CLI volume create $V0 $H0:$B0/$V0 +TEST $CLI volume start $V0 +TEST $CLI volume set $V0 nfs.acl off +TEST $CLI volume set $V0 nfs.acl on +cleanup diff --git a/tests/bugs/nfs/bug-974972.t b/tests/bugs/nfs/bug-974972.t new file mode 100755 index 00000000000..9ed19915a67 --- /dev/null +++ b/tests/bugs/nfs/bug-974972.t @@ -0,0 +1,37 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc +. $(dirname $0)/../../nfs.rc + +#This script checks that nfs mount does not fail lookup on files with split-brain +cleanup; + +TEST glusterd +TEST pidof glusterd +TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1} +TEST $CLI volume set $V0 self-heal-daemon off +TEST $CLI volume start $V0 +EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available; +TEST mount_nfs $H0:/$V0 $N0 +TEST touch $N0/1 +TEST kill_brick ${V0} ${H0} ${B0}/${V0}1 +echo abc > $N0/1 +TEST $CLI volume start $V0 force +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" nfs_up_status +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_nfs $V0 0 +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_nfs $V0 1 + +TEST kill_brick ${V0} ${H0} ${B0}/${V0}0 +echo def > $N0/1 +TEST $CLI volume start $V0 force +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" nfs_up_status +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_nfs $V0 0 +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_nfs $V0 1 + +#Lookup should not fail +TEST ls $N0/1 +TEST ! cat $N0/1 + +EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $N0 +cleanup diff --git a/tests/bugs/overlap.py b/tests/bugs/overlap.py deleted file mode 100755 index 15f2da473f1..00000000000 --- a/tests/bugs/overlap.py +++ /dev/null @@ -1,59 +0,0 @@ -#!/usr/bin/python - -import sys - -def calculate_one (ov, nv): - old_start = int(ov[18:26],16) - old_end = int(ov[26:34],16) - new_start = int(nv[18:26],16) - new_end = int(nv[26:34],16) - if (new_end < old_start) or (new_start > old_end): - #print '%s, %s -> ZERO' % (ov, nv) - return 0 - all_start = max(old_start,new_start) - all_end = min(old_end,new_end) - #print '%s, %s -> %08x' % (ov, nv, all_end - all_start + 1) - return all_end - all_start + 1 - -def calculate_all (values): - total = 0 - nv_index = len(values) / 2 - for old_val in values[:nv_index]: - new_val = values[nv_index] - nv_index += 1 - total += calculate_one(old_val,new_val) - return total - -""" -test1_vals = [ - '0x0000000000000000000000003fffffff', # first quarter - '0x0000000000000000400000007fffffff', # second quarter - '0x000000000000000080000000ffffffff', # second half - '0x00000000000000000000000055555554', # first third - '0x000000000000000055555555aaaaaaa9', # second third - '0x0000000000000000aaaaaaaaffffffff', # last third -] - -test2_vals = [ - '0x0000000000000000000000003fffffff', # first quarter - '0x0000000000000000400000007fffffff', # second quarter - '0x000000000000000080000000ffffffff', # second half - '0x00000000000000000000000055555554', # first third - # Next two are (incorrectly) swapped. - '0x0000000000000000aaaaaaaaffffffff', # last third - '0x000000000000000055555555aaaaaaa9', # second third -] - -print '%08x' % calculate_one(test1_vals[0],test1_vals[3]) -print '%08x' % calculate_one(test1_vals[1],test1_vals[4]) -print '%08x' % calculate_one(test1_vals[2],test1_vals[5]) -print '= %08x' % calculate_all(test1_vals) -print '%08x' % calculate_one(test2_vals[0],test2_vals[3]) -print '%08x' % calculate_one(test2_vals[1],test2_vals[4]) -print '%08x' % calculate_one(test2_vals[2],test2_vals[5]) -print '= %08x' % calculate_all(test2_vals) -""" - -if __name__ == '__main__': - # Return decimal so bash can reason about it. - print '%d' % calculate_all(sys.argv[1:]) diff --git a/tests/bugs/posix/bug-1034716.t b/tests/bugs/posix/bug-1034716.t new file mode 100644 index 00000000000..d36f8b598f4 --- /dev/null +++ b/tests/bugs/posix/bug-1034716.t @@ -0,0 +1,60 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +cleanup; + +#Basic checks +TEST glusterd +TEST pidof glusterd +TEST $CLI volume info + +#Create a distributed volume +TEST $CLI volume create $V0 $H0:$B0/${V0}{1..2}; +TEST $CLI volume start $V0 + +# Mount FUSE +TEST glusterfs -s $H0 --volfile-id $V0 $M0 + +#Create a file and perform fop on a DIR +TEST touch $M0/foo + +function xattr_query_check() { + local path=$1 + + local ret=`getfattr -m . -d $path 2>&1 | grep -c 'trusted.glusterfs'` + echo $ret +} + +function set_xattr() { + local path=$1 + local xa_name=$2 + local xa_val=$3 + + setfattr -n $xa_name -v $xa_val $path + echo $? +} + +function remove_xattr() { + local path=$1 + local xa_name=$2 + + setfattr -x $xa_name $path + echo $? +} + +EXPECT 0 xattr_query_check $M0/ +EXPECT 0 xattr_query_check $M0/foo + +EXPECT 1 set_xattr $M0/ 'trusted.glusterfs.volume-id' 'foo' +EXPECT 1 remove_xattr $M0/ 'trusted.glusterfs.volume-id' + + +## Finish up +EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0 +TEST $CLI volume stop $V0 +TEST $CLI volume delete $V0; +TEST ! $CLI volume info $V0; + +cleanup; diff --git a/tests/bugs/posix/bug-1040275-brick-uid-reset-on-volume-restart.t b/tests/bugs/posix/bug-1040275-brick-uid-reset-on-volume-restart.t new file mode 100755 index 00000000000..e67616db618 --- /dev/null +++ b/tests/bugs/posix/bug-1040275-brick-uid-reset-on-volume-restart.t @@ -0,0 +1,54 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +function get_uid() { + stat -c '%u' $1; +} + +function get_gid() { + stat -c '%g' $1; +} + + +cleanup; + +TEST glusterd +TEST pidof glusterd + +TEST $CLI volume create $V0 replica 2 stripe 2 $H0:$B0/${V0}{1,2,3,4,5,6,7,8}; + +EXPECT "$V0" volinfo_field $V0 'Volume Name'; +EXPECT 'Created' volinfo_field $V0 'Status'; +EXPECT '8' brick_count $V0 + +TEST $CLI volume start $V0; +EXPECT 'Started' volinfo_field $V0 'Status'; + +TEST glusterfs -s $H0 --volfile-id $V0 $M0; + +EXPECT 0 get_uid $M0; +EXPECT 0 get_gid $M0; + +TEST chown 100:101 $M0; + +EXPECT 100 get_uid $M0; +EXPECT 101 get_gid $M0; + +TEST $CLI volume stop $V0; +TEST $CLI volume start $V0; + +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0 +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1 +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 2 +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 3 +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 4 +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 5 +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 6 +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 7 + +EXPECT 100 get_uid $M0; +EXPECT 101 get_gid $M0; + +cleanup; diff --git a/tests/bugs/posix/bug-765380.t b/tests/bugs/posix/bug-765380.t new file mode 100644 index 00000000000..384b8022a42 --- /dev/null +++ b/tests/bugs/posix/bug-765380.t @@ -0,0 +1,39 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc + +cleanup; + +TEST glusterd +TEST pidof glusterd + +REPLICA=2 + +TEST $CLI volume create $V0 replica $REPLICA $H0:$B0/${V0}00 $H0:$B0/${V0}01 $H0:$B0/${V0}10 $H0:$B0/${V0}11 +TEST $CLI volume start $V0 + +## Mount FUSE with caching disabled +TEST $GFS -s $H0 --volfile-id $V0 $M0; + +function count_hostname_or_uuid_from_pathinfo() +{ + pathinfo=$(getfattr -n trusted.glusterfs.pathinfo $M0/f00f) + echo $pathinfo | grep -o $1 | wc -l +} + +TEST touch $M0/f00f + +EXPECT $REPLICA count_hostname_or_uuid_from_pathinfo $H0 + +# turn on node-uuid-pathinfo option +TEST $CLI volume set $V0 node-uuid-pathinfo on + +# do not expext hostname as part of the pathinfo string +EXPECT 0 count_hostname_or_uuid_from_pathinfo $H0 + +uuid=$(grep UUID $GLUSTERD_WORKDIR/glusterd.info | cut -f2 -d=) + +# ... but expect the uuid $REPLICA times +EXPECT $REPLICA count_hostname_or_uuid_from_pathinfo $uuid + +cleanup; diff --git a/tests/bugs/posix/bug-990028.t b/tests/bugs/posix/bug-990028.t new file mode 100755 index 00000000000..a35ea2a9f6e --- /dev/null +++ b/tests/bugs/posix/bug-990028.t @@ -0,0 +1,155 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../fileio.rc + +cleanup; + +TESTS_EXPECTED_IN_LOOP=153 + +function __init() +{ + TEST glusterd + TEST pidof glusterd + TEST $CLI volume info; + + TEST $CLI volume create $V0 $H0:$B0/brick + + EXPECT 'Created' volinfo_field $V0 'Status'; + + TEST $CLI volume start $V0 + + TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0 + + TEST $CLI volume quota $V0 enable +} + +#CASE-1 +#checking pgfid under same directory +function links_in_same_directory() +{ + # create a file file1 + TEST touch $M0/file1 + + # create 50 hardlinks for file1 + for i in `seq 2 50`; do + TEST_IN_LOOP ln $M0/file1 $M0/file$i + done + + # store the pgfid of file1 in PGFID_FILE1 [should be 50 now (0x000000032)] + PGFID_FILE1=`getfattr -m "trusted.pgfid.*" -de hex $B0/brick/file1 2>&1 | grep "trusted.pgfid" | gawk -F '=' '{print $2}'` + + # compare the pgfid(link value ) of each hard links are equal or not + for i in `seq 2 50`; do + TEMP=`getfattr -m "trusted.pgfid.*" -de hex $B0/brick/file$i 2>&1 | grep "trusted.pgfid" | gawk -F '=' '{print $2}'` + TEST_IN_LOOP [ $PGFID_FILE1 = $TEMP ] + done + + # check if no of links value is 50 or not + TEST [ $PGFID_FILE1 = "0x00000032" ] + + # unlink file 2 to 50 + for i in `seq 2 50`; do + TEST_IN_LOOP unlink $M0/file$i; + done + + # now check if pgfid value is 1 or not + PGFID_FILE1=`getfattr -m "trusted.pgfid.*" -de hex $B0/brick/file1 2>&1 | grep "trusted.pgfid" | gawk -F '=' '{print $2}'`; + + TEST [ $PGFID_FILE1 = "0x00000001" ] + + TEST rm -f $M0/* +} + +##checking pgfid under diff directories +function links_across_directories() +{ + TEST mkdir $M0/dir1 $M0/dir2; + + # create a file in dir1 + TEST touch $M0/dir1/file1; + + # create hard link for file1 in dir2 + TEST ln $M0/dir1/file1 $M0/dir2/file2; + + #first check is to find whether there are two pgfids or not + LINES=`getfattr -m "trusted.pgfid.*" -de hex $B0/brick/dir1/file1 2>&1 | grep "trusted.pgfid" | wc -l` + TEST [ $LINES = 2 ] + + for i in $(seq 1 2); do + HL=`getfattr -m "trusted.pgfid.*" -de hex $B0/brick/dir$i/file$i 2>&1 | grep "trusted.pgfid" | cut -d$'\n' -f$i | cut -d'=' -f2` + TEST_IN_LOOP [ $HL = "0x00000001" ] + done + + #now unlink file2 and check the pgfid of file1 + #1. no. of pgfid should be one + #2. no. of hard link should be one + TEST unlink $M0/dir2/file2 + + LINES=`getfattr -m "trusted.pgfid.*" -de hex $B0/brick/dir1/file1 2>&1 | grep "trusted.pgfid" | wc -l` + TEST [ $LINES == 1 ] + + #next to check is to whether they contain hard link value of one or not + HL=`getfattr -m "trusted.pgfid.*" -de hex $B0/brick/dir1/file1 2>&1 | grep "trusted.pgfid" | cut -d'=' -f2` + TEST [ $HL = "0x00000001" ] + + #rename file under same directory + + TEST touch $M0/r_file1 + PGFID_rfile1=`getfattr -m "trusted.pgfid.*" -de hex $B0/brick/r_file1 2>&1 | grep "trusted.pgfid"` + + #cross check whether hard link count is one + HL=`getfattr -m "trusted.pgfid.*" -de hex $B0/brick/r_file1 2>&1 | grep "trusted.pgfid" | cut -d'=' -f2` + + TEST [ $HL = "0x00000001" ] + + #now rename the file to r_file1 + TEST mv $M0/r_file1 $M0/r_file2 + + #now check the pgfid hard link count is still one or not + HL=`getfattr -m "trusted.pgfid.*" -de hex $B0/brick/r_file2 2>&1 | grep "trusted.pgfid" | cut -d'=' -f2` + + TEST [ $HL = "0x00000001" ] + + #now move the file to a different directory where it has no hard link and check + TEST mkdir $M0/dir3; + TEST mv $M0/r_file2 $M0/dir3; + + #now check the pgfid has changed or not and hard limit is one or not + PGFID_newDir=`getfattr -m "trusted.pgfid.*" -de hex $B0/brick/dir3/r_file2 2>&1 | grep "trusted.pgfid"` + + #now the older pgfid and new pgfid shouldn't match + TEST [ $PGFID_rfile1 != $PGFID_newDir ] + + HL=`getfattr -m "trusted.pgfid" -de hex $B0/brick/dir3/r_file2 2>&1 | grep "trusted.pgfid" | cut -d'=' -f2` + TEST [ $HL = "0x00000001" ] + + TEST touch $M0/dir1/rl_file_1 + ln $M0/dir1/rl_file_1 $M0/dir2/rl_file_2 + mv $M0/dir1/rl_file_1 $M0/dir2 + + #now the there should be just one pgfid for both files + for i in $(seq 1 2); do + NL=`getfattr -m "trusted.pgfid" -de hex $B0/brick/dir2/rl_file_$i 2>&1 | grep "trusted.pgfid"|wc -l ` + TEST_IN_LOOP [ $HL = "0x00000001" ] + done + + #now pgfid of both files should match + P_rl_file_1=`getfattr -m "trusted.pgfid" -de hex $B0/brick/dir2/rl_file_1 2>&1 | grep "trusted.pgfid"` + P_rl_file_2=`getfattr -m "trusted.pgfid" -de hex $B0/brick/dir2/rl_file_2 2>&1 | grep "trusted.pgfid"` + TEST [ $P_rl_file_1 = $P_rl_file_2 ] + + #now the no of hard link should be two for both rl_file_1 and rl_file_2 + for i in $(seq 1 2); do + HL=`getfattr -m "trusted.pgfid" -de hex $B0/brick/dir2/rl_file_$i 2>&1 | grep "trusted.pgfid" | cut -d'=' -f2` + TEST_IN_LOOP [ $HL = "0x00000002" ] + done + + TEST rm -rf $M0/* +} + +__init; +links_in_same_directory; +links_across_directories; + +cleanup diff --git a/tests/bugs/protocol/bug-762989.t b/tests/bugs/protocol/bug-762989.t new file mode 100755 index 00000000000..1607fcf57f8 --- /dev/null +++ b/tests/bugs/protocol/bug-762989.t @@ -0,0 +1,40 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc + +cleanup; + +# Skip the entire test if ip_local_reserved_ports does not exist +if [ ! -f /proc/sys/net/ipv4/ip_local_reserved_ports ] ; then + echo "Skip test on /proc/sys/net/ipv4/ip_local_reserved_ports, "\ + "which does not exists on this system" >&2 + SKIP_TESTS + exit 0 +fi + +## reserve port 1023 +older_ports=$(cat /proc/sys/net/ipv4/ip_local_reserved_ports); +echo "1023" > /proc/sys/net/ipv4/ip_local_reserved_ports; + +## Start and create a volume +TEST glusterd +TEST pidof glusterd +TEST $CLI volume info; + +TEST $CLI volume create $V0 replica 2 stripe 2 $H0:$B0/${V0}{1,2,3,4,5,6,7,8}; + +TEST $CLI volume start $V0; + +TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id $V0 \ +$M0; + +## Wait for volume to register with rpc.mountd +sleep 6; +## check if port 1023 (which has been reserved) is used by the gluster processes +op=$(netstat -ntp | grep gluster | grep -w 1023); +EXPECT "" echo $op; + +#set the reserved ports to the older values +echo $older_ports > /proc/sys/net/ipv4/ip_local_reserved_ports + +cleanup; diff --git a/tests/bugs/protocol/bug-808400-dist.t b/tests/bugs/protocol/bug-808400-dist.t new file mode 100755 index 00000000000..0df972585c0 --- /dev/null +++ b/tests/bugs/protocol/bug-808400-dist.t @@ -0,0 +1,32 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +cleanup; + +TEST glusterd +TEST pidof glusterd +TEST $CLI volume info; + +TEST $CLI volume create $V0 $H0:$B0/brick1 $H0:$B0/brick2; +EXPECT 'Created' volinfo_field $V0 'Status'; + +TEST $CLI volume start $V0; +EXPECT 'Started' volinfo_field $V0 'Status'; + +MOUNTDIR=$M0; +TEST glusterfs --entry-timeout=0 --attribute-timeout=0 --volfile-server=$H0 --volfile-id=$V0 $MOUNTDIR; + +build_tester $(dirname $0)/bug-808400-flock.c +build_tester $(dirname $0)/bug-808400-fcntl.c + +TEST $(dirname $0)/bug-808400-flock $MOUNTDIR/testfile \'gluster volume set $V0 performance.write-behind off\' +TEST $(dirname $0)/bug-808400-fcntl $MOUNTDIR/testfile \'gluster volume set $V0 performance.write-behind on\' + +TEST rm -rf $MOUNTDIR/* +TEST rm -rf $(dirname $0)/bug-808400-flock $(dirname $0)/bug-808400-fcntl $(dirname $0)/glusterfs.log + +EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $MOUNTDIR + +cleanup; diff --git a/tests/bugs/protocol/bug-808400-fcntl.c b/tests/bugs/protocol/bug-808400-fcntl.c new file mode 100644 index 00000000000..87a83f317b8 --- /dev/null +++ b/tests/bugs/protocol/bug-808400-fcntl.c @@ -0,0 +1,117 @@ +#include +#include +#include +#include +#include +#include +#include +#include + +#ifndef linux +#define fstat64(fd, st) fstat(fd, st) +#endif + +int +run_child (char *filename) +{ + int fd = -1, ret = -1; + struct flock lock = {0, }; + int ppid = 0; + + fd = open (filename, O_RDWR); + if (fd < 0) { + fprintf (stderr, "open failed (%s)\n", strerror (errno)); + goto out; + } + + ppid = getppid (); + + lock.l_type = F_WRLCK; + lock.l_whence = SEEK_SET; + lock.l_start = 0; + lock.l_len = 0; + + ret = fcntl (fd, F_GETLK, &lock); + if (ret < 0) { + fprintf (stderr, "GETLK failed (%s)\n", strerror (errno)); + goto out; + } + + if ((lock.l_type == F_UNLCK) || + (ppid != lock.l_pid)) { + fprintf (stderr, "no locks present, though parent has held " + "one\n"); + ret = -1; + goto out; + } + + ret = 0; +out: + return ret; +} + +int +main (int argc, char *argv[]) +{ + int fd = -1, ret = -1, status = 0; + char *filename = NULL, *cmd = NULL; + struct stat stbuf = {0, }; + struct flock lock = {0, }; + + if (argc != 3) { + fprintf (stderr, "Usage: %s " + "\n", argv[0]); + goto out; + } + + filename = argv[1]; + cmd = argv[2]; + + fd = open (filename, O_RDWR | O_CREAT, 0); + if (fd < 0) { + fprintf (stderr, "open (%s) failed (%s)\n", filename, + strerror (errno)); + goto out; + } + + lock.l_type = F_WRLCK; + lock.l_whence = SEEK_SET; + lock.l_start = 0; + lock.l_len = 0; + + ret = fcntl (fd, F_SETLK, &lock); + if (ret < 0) { + fprintf (stderr, "fcntl failed (%s)\n", strerror (errno)); + goto out; + } + + system (cmd); + + /* wait till graph switch completes */ + ret = fstat64 (fd, &stbuf); + if (ret < 0) { + fprintf (stderr, "fstat64 failure (%s)\n", strerror (errno)); + goto out; + } + + sleep (10); + + /* By now old-graph would be disconnected and locks should be cleaned + * up if they are not migrated. Check that by trying to acquire a lock + * on a new fd opened by another process on same file. + */ + ret = fork (); + if (ret == 0) { + ret = run_child (filename); + } else { + wait (&status); + if (WIFEXITED(status)) { + ret = WEXITSTATUS(status); + } else { + ret = 0; + } + } + +out: + return ret; +} diff --git a/tests/bugs/protocol/bug-808400-flock.c b/tests/bugs/protocol/bug-808400-flock.c new file mode 100644 index 00000000000..bd2ce8cfb01 --- /dev/null +++ b/tests/bugs/protocol/bug-808400-flock.c @@ -0,0 +1,96 @@ +#include +#include +#include +#include +#include +#include +#include +#include + +#ifndef linux +#define fstat64(fd, st) fstat(fd, st) +#endif + +int +run_child (char *filename) +{ + int fd = -1, ret = -1; + + fd = open (filename, O_RDWR); + if (fd < 0) { + fprintf (stderr, "open failed (%s)\n", strerror (errno)); + goto out; + } + + ret = flock (fd, LOCK_EX | LOCK_NB); + if ((ret == 0) || (errno != EWOULDBLOCK)) { + fprintf (stderr, "no locks present, though parent has held " + "one\n"); + ret = -1; + goto out; + } + + ret = 0; +out: + return ret; +} + +int +main (int argc, char *argv[]) +{ + int fd = -1, ret = -1, status = 0; + char *filename = NULL, *cmd = NULL; + struct stat stbuf = {0, }; + + if (argc != 3) { + fprintf (stderr, "Usage: %s " + "\n", argv[0]); + goto out; + } + + filename = argv[1]; + cmd = argv[2]; + + fd = open (filename, O_RDWR | O_CREAT, 0); + if (fd < 0) { + fprintf (stderr, "open (%s) failed (%s)\n", filename, + strerror (errno)); + goto out; + } + + ret = flock (fd, LOCK_EX); + if (ret < 0) { + fprintf (stderr, "flock failed (%s)\n", strerror (errno)); + goto out; + } + + system (cmd); + + /* wait till graph switch completes */ + ret = fstat64 (fd, &stbuf); + if (ret < 0) { + fprintf (stderr, "fstat64 failure (%s)\n", strerror (errno)); + goto out; + } + + sleep (10); + + /* By now old-graph would be disconnected and locks should be cleaned + * up if they are not migrated. Check that by trying to acquire a lock + * on a new fd opened by another process on same file + */ + ret = fork (); + if (ret == 0) { + ret = run_child (filename); + } else { + wait (&status); + if (WIFEXITED(status)) { + ret = WEXITSTATUS(status); + } else { + ret = 0; + } + } + +out: + return ret; +} diff --git a/tests/bugs/protocol/bug-808400-repl.t b/tests/bugs/protocol/bug-808400-repl.t new file mode 100755 index 00000000000..611e5ec93b7 --- /dev/null +++ b/tests/bugs/protocol/bug-808400-repl.t @@ -0,0 +1,31 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +cleanup; + +TEST glusterd +TEST pidof glusterd +TEST $CLI volume info; +TEST $CLI volume create $V0 replica 2 $H0:$B0/brick1 $H0:$B0/brick2; +EXPECT 'Created' volinfo_field $V0 'Status'; + +TEST $CLI volume start $V0; +EXPECT 'Started' volinfo_field $V0 'Status'; + +MOUNTDIR=$M0; +TEST glusterfs --entry-timeout=0 --attribute-timeout=0 --volfile-server=$H0 --volfile-id=$V0 $MOUNTDIR; + +build_tester $(dirname $0)/bug-808400-flock.c +build_tester $(dirname $0)/bug-808400-fcntl.c + +TEST $(dirname $0)/bug-808400-flock $MOUNTDIR/testfile \'gluster volume set $V0 performance.write-behind off\' +TEST $(dirname $0)/bug-808400-fcntl $MOUNTDIR/testfile \'gluster volume set $V0 performance.write-behind on\' + +TEST rm -rf $MOUNTDIR/* +TEST rm -rf $(dirname $0)/bug-808400-flock $(dirname $0)/bug-808400-fcntl $(dirname $0)/glusterfs.log + +EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $MOUNTDIR + +cleanup; diff --git a/tests/bugs/protocol/bug-808400-stripe.t b/tests/bugs/protocol/bug-808400-stripe.t new file mode 100755 index 00000000000..6d6c7271852 --- /dev/null +++ b/tests/bugs/protocol/bug-808400-stripe.t @@ -0,0 +1,32 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +cleanup; + +TEST glusterd +TEST pidof glusterd +TEST $CLI volume info; + +TEST $CLI volume create $V0 stripe 2 $H0:$B0/brick1 $H0:$B0/brick2; +EXPECT 'Created' volinfo_field $V0 'Status'; + +TEST $CLI volume start $V0; +EXPECT 'Started' volinfo_field $V0 'Status'; + +MOUNTDIR=$M0; +TEST glusterfs --entry-timeout=0 --attribute-timeout=0 --volfile-server=$H0 --volfile-id=$V0 $MOUNTDIR; + +build_tester $(dirname $0)/bug-808400-flock.c +build_tester $(dirname $0)/bug-808400-fcntl.c + +TEST $(dirname $0)/bug-808400-flock $MOUNTDIR/testfile \'gluster volume set $V0 performance.write-behind off\' +TEST $(dirname $0)/bug-808400-fcntl $MOUNTDIR/testfile \'gluster volume set $V0 performance.write-behind on\' + +TEST rm -rf $MOUNTDIR/* +TEST rm -rf $(dirname $0)/bug-808400-flock $(dirname $0)/bug-808400-fcntl $(dirname $0)/glusterfs.log + +EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $MOUNTDIR + +cleanup; diff --git a/tests/bugs/protocol/bug-808400.t b/tests/bugs/protocol/bug-808400.t new file mode 100755 index 00000000000..4ae1722fca2 --- /dev/null +++ b/tests/bugs/protocol/bug-808400.t @@ -0,0 +1,35 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +cleanup; + +TEST glusterd +TEST pidof glusterd +TEST $CLI volume info; + +TEST $CLI volume create $V0 $H0:$B0/brick1; +EXPECT 'Created' volinfo_field $V0 'Status'; + +TEST $CLI volume start $V0; +EXPECT 'Started' volinfo_field $V0 'Status'; + +#mount on a random dir +TEST MOUNTDIR="/tmp/$RANDOM" +TEST mkdir $MOUNTDIR +TEST glusterfs --entry-timeout=0 --attribute-timeout=0 --volfile-server=$H0 --volfile-id=$V0 $MOUNTDIR; + +build_tester $(dirname $0)/bug-808400-flock.c +build_tester $(dirname $0)/bug-808400-fcntl.c + +TEST $(dirname $0)/bug-808400-flock $MOUNTDIR/testfile \'gluster volume set $V0 performance.write-behind off\' +TEST $(dirname $0)/bug-808400-fcntl $MOUNTDIR/testfile \'gluster volume set $V0 performance.write-behind on\' + +TEST rm -rf $MOUNTDIR/* +TEST rm -rf $(dirname $0)/bug-808400-flock $(dirname $0)/bug-808400-fcntl $(dirname $0)/glusterfs.log + +EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $MOUNTDIR +TEST rm -rf $MOUNTDIR + +cleanup; diff --git a/tests/bugs/quick-read/bug-846240.t b/tests/bugs/quick-read/bug-846240.t new file mode 100644 index 00000000000..c47040de1d1 --- /dev/null +++ b/tests/bugs/quick-read/bug-846240.t @@ -0,0 +1,58 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc +. $(dirname $0)/../../fileio.rc + +cleanup; + +TEST glusterd +TEST pidof glusterd +TEST $CLI volume info; + +function volinfo_field() +{ + local vol=$1; + local field=$2; + + $CLI volume info $vol | grep "^$field: " | sed 's/.*: //'; +} + +TEST $CLI volume create $V0 $H0:$B0/brick1 $H0:$B0/brick2; +EXPECT 'Created' volinfo_field $V0 'Status'; + +TEST $CLI volume start $V0; +EXPECT 'Started' volinfo_field $V0 'Status'; + +MOUNTDIR=$M0; +TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 $MOUNTDIR; +TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 $M1; + +TEST touch $M0/testfile; + +# open the file with the fd as 4 +TEST fd=`fd_available`; +TEST fd_open $fd 'w' "$M0/testfile"; + +# remove the file from the other mount point. If unlink is sent from +# $M0 itself, then the file will be actually opened by open-behind which +# we dont want for this testcase +TEST rm -f $M1/testfile; + +# below command opens the file and writes to the file. +# upon open, open-behind unwinds the open call with success. +# now when write comes, open-behind actually opens the file +# and then sends write on the fd. But before sending open itself, +# the file would have been removed from the mount $M1. open() gets error +# and the write call which is put into a stub (open had to be sent first) +# should unwind with the error received in the open call. +echo "data" >> $M0/testfile 2>/dev/null 1>/dev/null; +TEST [ $? -ne 0 ] + +TEST fd_close $fd; + +TEST rm -rf $MOUNTDIR/* + +EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $MOUNTDIR + +cleanup; diff --git a/tests/bugs/quota/afr-quota-xattr-mdata-heal.t b/tests/bugs/quota/afr-quota-xattr-mdata-heal.t new file mode 100644 index 00000000000..b7a15a31f66 --- /dev/null +++ b/tests/bugs/quota/afr-quota-xattr-mdata-heal.t @@ -0,0 +1,138 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +cleanup; +TEST glusterd +TEST pidof glusterd +TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1} +TEST $CLI volume start $V0 +TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0 +TEST $CLI volume quota $V0 enable +TEST $CLI volume quota $V0 limit-usage / 1MB +TEST mkdir $M0/d +TEST $CLI volume quota $V0 limit-usage /d 1MB +TEST touch $M0/d/a +echo abc > $M0/d/a +#Set the acl xattrs directly on backend, for some reason on mount it gives error +acl_access_val="0x0200000001000600ffffffff04000400ffffffff10000400ffffffff20000400ffffffff" +acl_file_val="0x0000000400000001ffffffff0006000000000004ffffffff0004000000000010ffffffff0004000000000020ffffffff00040000" +TEST setfattr -n system.posix_acl_access -v $acl_access_val $B0/${V0}0/d +TEST setfattr -n trusted.SGI_ACL_FILE -v $acl_file_val $B0/${V0}0/d +TEST setfattr -n system.posix_acl_access -v $acl_access_val $B0/${V0}1/d +TEST setfattr -n trusted.SGI_ACL_FILE -v $acl_file_val $B0/${V0}1/d +TEST setfattr -n trusted.foo -v "baz" $M0/d +TEST setfattr -n trusted.foo -v "baz" $M0/d/a +TEST setfattr -n trusted.foo1 -v "baz1" $M0/d +TEST setfattr -n trusted.foo1 -v "baz1" $M0/d/a +TEST setfattr -n trusted.foo3 -v "unchanged" $M0/d +TEST setfattr -n trusted.foo3 -v "unchanged" $M0/d/a + +TEST kill_brick $V0 $H0 $B0/${V0}0 +#Induce metadata self-heal +TEST setfattr -n trusted.foo -v "bar" $M0/d +TEST setfattr -n trusted.foo -v "bar" $M0/d/a +TEST setfattr -x trusted.foo1 $M0/d +TEST setfattr -x trusted.foo1 $M0/d/a +TEST setfattr -n trusted.foo2 -v "bar2" $M0/d +TEST setfattr -n trusted.foo2 -v "bar2" $M0/d/a +d_quota_contri=$(getfattr -d -m . -e hex $B0/${V0}1/d | grep -E "trusted.glusterfs.quota.*.contri") +d_quota_dirty=$(getfattr -d -m . -e hex $B0/${V0}1/d | grep -E "trusted.glusterfs.quota.dirty") +d_quota_limit=$(getfattr -d -m . -e hex $B0/${V0}1/d | grep -E "trusted.glusterfs.quota.limit-set") +d_quota_size=$(getfattr -d -m . -e hex $B0/${V0}1/d | grep -E "trusted.glusterfs.quota.size") + +a_pgfid=$(getfattr -d -m . -e hex $B0/${V0}1/d/a | grep -E "trusted.pgfid.") + +#Change internal xattrs in the backend, later check that they are not healed +TEST setfattr -n trusted.glusterfs.quota.00000000-0000-0000-0000-000000000001.contri -v 0x0000000000000400 $B0/${V0}0/d +TEST setfattr -n trusted.glusterfs.quota.dirty -v 0x0000000000000400 $B0/${V0}0/d +TEST setfattr -n trusted.glusterfs.quota.limit-set -v 0x0000000000000400 $B0/${V0}0/d #This will be healed, this is external xattr +TEST setfattr -n trusted.glusterfs.quota.size -v 0x0000000000000400 $B0/${V0}0/d +TEST setfattr -n $(echo $a_pgfid | cut -f1 -d'=') -v "orphan" $B0/${V0}0/d/a + +TEST $CLI volume set $V0 cluster.self-heal-daemon on +TEST $CLI volume start $V0 force +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0 +EXPECT_WITHIN $HEAL_TIMEOUT "0" afr_get_pending_heal_count $V0 + +#Check external xattrs match +EXPECT "bar" echo $(getfattr -d -m. -e text $B0/${V0}0/d | grep trusted.foo) +EXPECT "bar" echo $(getfattr -d -m. -e text $B0/${V0}0/d/a | grep trusted.foo) +TEST ! getfattr -n trusted.foo1 $B0/${V0}0/d +TEST ! getfattr -n trusted.foo1 $B0/${V0}0/d/a +EXPECT "unchanged" echo $(getfattr -d -m. -e text $B0/${V0}0/d | grep trusted.foo3) +EXPECT "unchanged" echo $(getfattr -d -m. -e text $B0/${V0}0/d/a | grep trusted.foo3) +EXPECT "bar2" echo $(getfattr -d -m. -e text $B0/${V0}0/d | grep trusted.foo2) +EXPECT "bar2" echo $(getfattr -d -m. -e text $B0/${V0}0/d/a | grep trusted.foo2) +EXPECT "$d_quota_limit" echo $(getfattr -d -m . -e hex $B0/${V0}0/d | grep "trusted.glusterfs.quota.limit-set") + +EXPECT "bar" echo $(getfattr -d -m. -e text $B0/${V0}1/d | grep trusted.foo) +EXPECT "bar" echo $(getfattr -d -m. -e text $B0/${V0}1/d/a | grep trusted.foo) +TEST ! getfattr -n trusted.foo1 $B0/${V0}1/d +TEST ! getfattr -n trusted.foo1 $B0/${V0}1/d/a +EXPECT "unchanged" echo $(getfattr -d -m. -e text $B0/${V0}1/d | grep trusted.foo3) +EXPECT "unchanged" echo $(getfattr -d -m. -e text $B0/${V0}1/d/a | grep trusted.foo3) +EXPECT "bar2" echo $(getfattr -d -m. -e text $B0/${V0}1/d | grep trusted.foo2) +EXPECT "bar2" echo $(getfattr -d -m. -e text $B0/${V0}1/d/a | grep trusted.foo2) +EXPECT "$d_quota_limit" echo $(getfattr -d -m . -e hex $B0/${V0}1/d | grep "trusted.glusterfs.quota.limit-set") + +#Test that internal xattrs on B0 are not healed +EXPECT 0x0000000000000400 echo $(getfattr -d -m. -e hex $B0/${V0}0/d | grep trusted.glusterfs.quota.00000000-0000-0000-0000-000000000001.contri) +EXPECT 0x0000000000000400 echo $(getfattr -d -m. -e hex $B0/${V0}0/d | grep trusted.glusterfs.quota.dirty) +EXPECT "$d_quota_limit" echo $(getfattr -d -m. -e hex $B0/${V0}0/d | grep trusted.glusterfs.quota.limit-set) #This will be healed, this is external xattr +EXPECT 0x0000000000000400 echo $(getfattr -d -m. -e hex $B0/${V0}0/d | grep trusted.glusterfs.quota.size) +EXPECT "$acl_access_val" echo $(getfattr -d -m. -e hex $B0/${V0}0/d | grep system.posix_acl_access) +EXPECT "$acl_file_val" echo $(getfattr -d -m. -e hex $B0/${V0}0/d | grep trusted.SGI_ACL_FILE) +EXPECT "orphan" echo $(getfattr -d -m. -e text $B0/${V0}0/d/a | grep $(echo $a_pgfid | cut -f1 -d'=')) + +#Test that xattrs didn't go bad in source +EXPECT "$d_quota_contri" echo $(getfattr -d -m . -e hex $B0/${V0}1/d | grep -E "trusted.glusterfs.quota.*.contri") +EXPECT "$d_quota_dirty" echo $(getfattr -d -m . -e hex $B0/${V0}1/d | grep -E "trusted.glusterfs.quota.dirty") +EXPECT "$d_quota_limit" echo $(getfattr -d -m . -e hex $B0/${V0}1/d | grep -E "trusted.glusterfs.quota.limit-set") +EXPECT "$d_quota_size" echo $(getfattr -d -m . -e hex $B0/${V0}1/d | grep -E "trusted.glusterfs.quota.size") +EXPECT "$a_pgfid" echo $(getfattr -d -m . -e hex $B0/${V0}1/d/a | grep -E "trusted.pgfid.") +EXPECT "$acl_access_val" echo $(getfattr -d -m. -e hex $B0/${V0}1/d | grep system.posix_acl_access) +EXPECT "$acl_file_val" echo $(getfattr -d -m. -e hex $B0/${V0}1/d | grep trusted.SGI_ACL_FILE) + +#Do a lookup and it shouldn't trigger metadata self-heal and heal xattrs +EXPECT "bar" echo $(getfattr -d -m. -e text $B0/${V0}0/d | grep trusted.foo) +EXPECT "bar" echo $(getfattr -d -m. -e text $B0/${V0}0/d/a | grep trusted.foo) +TEST ! getfattr -n trusted.foo1 $B0/${V0}0/d +TEST ! getfattr -n trusted.foo1 $B0/${V0}0/d/a +EXPECT "unchanged" echo $(getfattr -d -m. -e text $B0/${V0}0/d | grep trusted.foo3) +EXPECT "unchanged" echo $(getfattr -d -m. -e text $B0/${V0}0/d/a | grep trusted.foo3) +EXPECT "bar2" echo $(getfattr -d -m. -e text $B0/${V0}0/d | grep trusted.foo2) +EXPECT "bar2" echo $(getfattr -d -m. -e text $B0/${V0}0/d/a | grep trusted.foo2) +EXPECT "$d_quota_limit" echo $(getfattr -d -m . -e hex $B0/${V0}0/d | grep "trusted.glusterfs.quota.limit-set") + +EXPECT "bar" echo $(getfattr -d -m. -e text $B0/${V0}1/d | grep trusted.foo) +EXPECT "bar" echo $(getfattr -d -m. -e text $B0/${V0}1/d/a | grep trusted.foo) +TEST ! getfattr -n trusted.foo1 $B0/${V0}1/d +TEST ! getfattr -n trusted.foo1 $B0/${V0}1/d/a +EXPECT "unchanged" echo $(getfattr -d -m. -e text $B0/${V0}1/d | grep trusted.foo3) +EXPECT "unchanged" echo $(getfattr -d -m. -e text $B0/${V0}1/d/a | grep trusted.foo3) +EXPECT "bar2" echo $(getfattr -d -m. -e text $B0/${V0}1/d | grep trusted.foo2) +EXPECT "bar2" echo $(getfattr -d -m. -e text $B0/${V0}1/d/a | grep trusted.foo2) +EXPECT "$d_quota_limit" echo $(getfattr -d -m . -e hex $B0/${V0}1/d | grep "trusted.glusterfs.quota.limit-set") + +#Test that internal xattrs on B0 are not healed +EXPECT 0x0000000000000400 echo $(getfattr -d -m. -e hex $B0/${V0}0/d | grep trusted.glusterfs.quota.00000000-0000-0000-0000-000000000001.contri) +EXPECT 0x0000000000000400 echo $(getfattr -d -m. -e hex $B0/${V0}0/d | grep trusted.glusterfs.quota.dirty) +EXPECT "$d_quota_limit" echo $(getfattr -d -m. -e hex $B0/${V0}0/d | grep trusted.glusterfs.quota.limit-set) #This will be healed, this is external xattr +EXPECT 0x0000000000000400 echo $(getfattr -d -m. -e hex $B0/${V0}0/d | grep trusted.glusterfs.quota.size) +EXPECT "orphan" echo $(getfattr -d -m. -e text $B0/${V0}0/d/a | grep $(echo $a_pgfid | cut -f1 -d'=')) + +#Test that xattrs didn't go bad in source +EXPECT "$d_quota_contri" echo $(getfattr -d -m . -e hex $B0/${V0}1/d | grep -E "trusted.glusterfs.quota.*.contri") +EXPECT "$d_quota_dirty" echo $(getfattr -d -m . -e hex $B0/${V0}1/d | grep -E "trusted.glusterfs.quota.dirty") +EXPECT "$d_quota_limit" echo $(getfattr -d -m . -e hex $B0/${V0}1/d | grep -E "trusted.glusterfs.quota.limit-set") +EXPECT "$d_quota_size" echo $(getfattr -d -m . -e hex $B0/${V0}1/d | grep -E "trusted.glusterfs.quota.size") +EXPECT "$a_pgfid" echo $(getfattr -d -m . -e hex $B0/${V0}1/d/a | grep -E "trusted.pgfid.") + +EXPECT "$acl_access_val" echo $(getfattr -d -m. -e hex $B0/${V0}0/d | grep system.posix_acl_access) +EXPECT "$acl_file_val" echo $(getfattr -d -m. -e hex $B0/${V0}0/d | grep trusted.SGI_ACL_FILE) +EXPECT "$acl_access_val" echo $(getfattr -d -m. -e hex $B0/${V0}1/d | grep system.posix_acl_access) +EXPECT "$acl_file_val" echo $(getfattr -d -m. -e hex $B0/${V0}1/d | grep trusted.SGI_ACL_FILE) +cleanup diff --git a/tests/bugs/quota/bug-1023974.t b/tests/bugs/quota/bug-1023974.t new file mode 100644 index 00000000000..017a6decf88 --- /dev/null +++ b/tests/bugs/quota/bug-1023974.t @@ -0,0 +1,35 @@ +#!/bin/bash + +# This regression test tries to ensure renaming a directory with content, and +# no limit set, is accounted properly, when moved into a directory with quota +# limit set. + +. $(dirname $0)/../../include.rc + +cleanup; + +TEST glusterd +TEST pidof glusterd; +TEST $CLI volume info; + +TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2,3,4,5,6}; +TEST $CLI volume start $V0; + +TEST $CLI volume quota $V0 enable; + +TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0; + +TEST mkdir -p $M0/1/2; +TEST $CLI volume quota $V0 limit-usage /1/2 100MB 70%; +TEST $CLI volume quota $V0 hard-timeout 0 +TEST $CLI volume quota $V0 soft-timeout 0 + +#The corresponding write(3) should fail with EDQUOT ("Disk quota exceeded") +TEST ! dd if=/dev/urandom of=$M0/1/2/file bs=1024k count=102; +TEST mkdir -p $M0/1/3; +TEST dd if=/dev/urandom of=$M0/1/3/file bs=1024k count=102; + +#The corresponding rename(3) should fail with EDQUOT ("Disk quota exceeded") +TEST ! mv $M0/1/3/ $M0/1/2/3_mvd; + +cleanup; diff --git a/tests/bugs/quota/bug-1035576.t b/tests/bugs/quota/bug-1035576.t new file mode 100644 index 00000000000..dd4f499d98e --- /dev/null +++ b/tests/bugs/quota/bug-1035576.t @@ -0,0 +1,52 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +#This script tests that self-heal of limit-set xattr is happening on a directory +#but self-heal of quota.size xattr is not happening + +cleanup; + +TEST glusterd +TEST pidof glusterd +TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1} +TEST $CLI volume start $V0 +#Lets disable perf-xls so that lookup would reach afr +TEST $CLI volume set $V0 performance.quick-read off +TEST $CLI volume set $V0 performance.io-cache off +TEST $CLI volume set $V0 performance.write-behind off +TEST $CLI volume set $V0 performance.stat-prefetch off +TEST $CLI volume set $V0 performance.read-ahead off +TEST $CLI volume set $V0 background-self-heal-count 0 +TEST $CLI volume set $V0 self-heal-daemon off +TEST $CLI volume quota $V0 enable + +TEST kill_brick $V0 $H0 $B0/${V0}0 +TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0 +cd $M0 +TEST mkdir $M0/a +TEST $CLI volume quota $V0 limit-usage /a 1GB +echo abc > $M0/a/f +$CLI volume start $V0 force +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0 +quota_limit_val1=$(get_hex_xattr trusted.glusterfs.quota.limit-set $B0/${V0}1/a) +quota_size_val1=$(get_hex_xattr trusted.glusterfs.quota.size $B0/${V0}1/a) + +#Trigger entry,metadata self-heal +TEST ls $M0/a + +quota_limit_val0=$(get_hex_xattr trusted.glusterfs.quota.limit-set $B0/${V0}0/a) +quota_size_val0=$(get_hex_xattr trusted.glusterfs.quota.size $B0/${V0}0/a) + +#Test that limit-set xattr is healed +TEST [ $quota_limit_val0 == $quota_limit_val1 ] + +#Only entry, metadata self-heal is done quota size value should not be same +TEST [ $quota_size_val0 != $quota_size_val1 ] +TEST cat $M0/a/f + +#Now that data self-heal is done quota size value should be same +quota_size_val0=$(get_hex_xattr trusted.glusterfs.quota.size $B0/${V0}0/a) +TEST [ $quota_size_val0 == $quota_size_val1 ] +cleanup diff --git a/tests/bugs/quota/bug-1038598.t b/tests/bugs/quota/bug-1038598.t new file mode 100644 index 00000000000..c51bc470ce1 --- /dev/null +++ b/tests/bugs/quota/bug-1038598.t @@ -0,0 +1,80 @@ +#!/bin/bash +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +cleanup; + +TEST glusterd +TEST pidof glusterd +TEST $CLI volume info; + +TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2}; + +function hard_limit() +{ + local QUOTA_PATH=$1; + $CLI volume quota $V0 list $QUOTA_PATH | grep "$QUOTA_PATH" | awk '{print $2}' +} + +function soft_limit() +{ + local QUOTA_PATH=$1; + $CLI volume quota $V0 list $QUOTA_PATH | grep "$QUOTA_PATH" | awk '{print $3}' +} + +function usage() +{ + local QUOTA_PATH=$1; + $CLI volume quota $V0 list $QUOTA_PATH | grep "$QUOTA_PATH" | awk '{print $4}' +} + +function sl_exceeded() +{ + local QUOTA_PATH=$1; + $CLI volume quota $V0 list $QUOTA_PATH | grep "$QUOTA_PATH" | awk '{print $6}' +} + +function hl_exceeded() +{ + local QUOTA_PATH=$1; + $CLI volume quota $V0 list $QUOTA_PATH | grep "$QUOTA_PATH" | awk '{print $7}' + +} + +EXPECT "$V0" volinfo_field $V0 'Volume Name'; +EXPECT 'Created' volinfo_field $V0 'Status'; +EXPECT '2' brick_count $V0 + +TEST $CLI volume start $V0; +EXPECT 'Started' volinfo_field $V0 'Status'; + +TEST $CLI volume quota $V0 enable +sleep 5 + +TEST glusterfs -s $H0 --volfile-id $V0 $M0; + +TEST mkdir -p $M0/test_dir +TEST $CLI volume quota $V0 limit-usage /test_dir 10MB 50 + +EXPECT "10.0MB" hard_limit "/test_dir"; +EXPECT "50%" soft_limit "/test_dir"; + +TEST dd if=/dev/zero of=$M0/test_dir/file1.txt bs=1024k count=4 +EXPECT "4.0MB" usage "/test_dir"; +EXPECT 'No' sl_exceeded "/test_dir"; +EXPECT 'No' hl_exceeded "/test_dir"; + +TEST dd if=/dev/zero of=$M0/test_dir/file1.txt bs=1024k count=6 +EXPECT "6.0MB" usage "/test_dir"; +EXPECT 'Yes' sl_exceeded "/test_dir"; +EXPECT 'No' hl_exceeded "/test_dir"; + +#set timeout to 0 so that quota gets enforced without any lag +TEST $CLI volume set $V0 features.hard-timeout 0 +TEST $CLI volume set $V0 features.soft-timeout 0 + +TEST ! dd if=/dev/zero of=$M0/test_dir/file1.txt bs=1024k count=15 +EXPECT 'Yes' sl_exceeded "/test_dir"; +EXPECT 'Yes' hl_exceeded "/test_dir"; + +cleanup; diff --git a/tests/bugs/quota/bug-1040423.t b/tests/bugs/quota/bug-1040423.t new file mode 100755 index 00000000000..4e7b5642c94 --- /dev/null +++ b/tests/bugs/quota/bug-1040423.t @@ -0,0 +1,72 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +cleanup + +function _init() { +# Start glusterd +TEST glusterd; +TEST pidof glusterd; +TEST $CLI volume info; + +# Lets create volume +TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2}; + +## Verify volume is created +EXPECT "$V0" volinfo_field $V0 'Volume Name'; +EXPECT 'Created' volinfo_field $V0 'Status'; + +#Start volume and verify +TEST $CLI volume start $V0; +EXPECT 'Started' volinfo_field $V0 'Status'; +TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0 + +#Enable Quota +TEST $CLI volume quota $V0 enable + +#As quotad consumes some time to connect to brick process we invoke sleep +sleep 10; + +#set limit of 1GB of quota on root +TEST $CLI volume quota $V0 limit-usage / 1GB +} + +function get_hardlimit() +{ + VOLUME=$1 + + $CLI volume quota $VOLUME list | tail -1 | sed "s/ \{1,\}/ /g" | + cut -d' ' -f 2 +} + +function check_fattrs { + +touch $M0/file1; + +#This confirms that pgfid is also filtered +TEST ! "getfattr -d -e hex -m . $M0/file1 | grep pgfid "; + +#just check for quota xattr are visible or not +TEST ! "getfattr -d -e hex -m . $M0 | grep quota"; + +#setfattr should fail +TEST ! setfattr -n trusted.glusterfs.quota.limit-set -v 10 $M0; + +#remove xattr should fail +TEST ! setfattr -x trusted.glusterfs.quota.limit-set $M0; + +#check if list command still shows the correct value or not + +EXPECT "1.0GB" get_hardlimit $V0 + +} + +_init; +check_fattrs; +cleanup + + + + diff --git a/tests/bugs/quota/bug-1049323.t b/tests/bugs/quota/bug-1049323.t new file mode 100755 index 00000000000..818c5f0e65b --- /dev/null +++ b/tests/bugs/quota/bug-1049323.t @@ -0,0 +1,64 @@ +#!/bin/bash +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +cleanup; + +function _init() +{ +# Start glusterd +TEST glusterd; +TEST pidof glusterd; +TEST $CLI volume info; + +#Create a volume +TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2}; + +#Verify volume is created +EXPECT "$V0" volinfo_field $V0 'Volume Name'; +EXPECT 'Created' volinfo_field $V0 'Status'; + +#Start volume and verify +TEST $CLI volume start $V0; +EXPECT 'Started' volinfo_field $V0 'Status'; +TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0 + +#Enable Quota +TEST $CLI volume quota $V0 enable + +##Wait for the auxiliary mount to comeup +sleep 3; +} + +function get_aux() +{ +##Check if a auxiliary mount is there +df -h | grep "/var/run/gluster/$V0" - + +if [ $? -eq 0 ] +then + echo "0" +else + echo "1" +fi +} + +function create_data() +{ +#set some limit on the volume +TEST $CLI volume quota $V0 limit-usage / 50MB; + +#Auxiliary mount should be there before stopping the volume +EXPECT "0" get_aux; + +TEST $CLI volume stop $V0; + +#Aux mount should have been removed +EXPECT "1" get_aux; + +} + + +_init; +create_data; +cleanup; diff --git a/tests/bugs/quota/bug-1087198.t b/tests/bugs/quota/bug-1087198.t new file mode 100644 index 00000000000..69ae18c7fe2 --- /dev/null +++ b/tests/bugs/quota/bug-1087198.t @@ -0,0 +1,77 @@ +#!/bin/bash + +## The script tests the logging of the quota in the bricks after reaching soft +## limit of the configured limit. +## +## Steps: +## 1. Create and mount the volume +## 2. Enable quota and set the limit on 2 directories +## 3. Write some data to cross the limit +## 4. Grep the string expected in brick logs +## 5. Wait for 10 seconds (alert timeout is set to 10s) +## 6. Repeat 3 and 4. +## 7. Cleanup + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../fileio.rc +. $(dirname $0)/../../volume.rc +. $(dirname $0)/../../nfs.rc + +cleanup; + +#1 +## Step 1 +TEST glusterd +TEST pidof glusterd +TEST $CLI volume info; + +TEST $CLI volume create $V0 $H0:$B0/brick{1..4}; +EXPECT 'Created' volinfo_field $V0 'Status'; + +TEST $CLI volume start $V0; +EXPECT 'Started' volinfo_field $V0 'Status'; + +EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available; +TEST mount_nfs $H0:/$V0 $N0 noac,nolock + + +QUOTA_LIMIT_DIR="quota_limit_dir" +BRICK_LOG_DIR="`gluster --print-logdir`/bricks" + +#9 +TEST mkdir $N0/$QUOTA_LIMIT_DIR + +#10 +## Step 2 +TEST $CLI volume quota $V0 enable +TEST $CLI volume quota $V0 alert-time 10 +TEST $CLI volume quota $V0 hard-timeout 0 +TEST $CLI volume quota $V0 soft-timeout 0 +TEST $CLI volume quota $V0 limit-usage / 200KB +TEST $CLI volume quota $V0 limit-usage /$QUOTA_LIMIT_DIR 100KB + +#16 +## Step 3 and 4 +TEST dd if=/dev/urandom of=$N0/$QUOTA_LIMIT_DIR/95KB_file bs=1k count=95 +TEST grep -e "\"Usage crossed soft limit:.*used by /$QUOTA_LIMIT_DIR\"" -- $BRICK_LOG_DIR/* + +TEST dd if=/dev/urandom of=$N0/100KB_file bs=1k count=100 +TEST grep -e "\"Usage crossed soft limit:.*used by /\"" -- $BRICK_LOG_DIR/* + +#20 +## Step 5 +TEST sleep 10 + +## Step 6 +TEST dd if=/dev/urandom of=$N0/$QUOTA_LIMIT_DIR/1KB_file bs=1k count=1 +TEST grep -e "\"Usage is above soft limit:.*used by /$QUOTA_LIMIT_DIR\"" -- $BRICK_LOG_DIR/* + +#23 +TEST dd if=/dev/urandom of=$N0/1KB_file bs=1k count=1 +TEST grep -e "\"Usage is above soft limit:.*used by /\"" -- $BRICK_LOG_DIR/* + +#25 +## Step 7 +EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $N0 + +cleanup; diff --git a/tests/bugs/quota/bug-1100050.t b/tests/bugs/quota/bug-1100050.t new file mode 100644 index 00000000000..e12f64f88d8 --- /dev/null +++ b/tests/bugs/quota/bug-1100050.t @@ -0,0 +1,25 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +cleanup; + +TEST glusterd; +TEST pidof glusterd; + +TEST gluster volume create $V0 stripe 2 $H0:$B0/{1,2} force; +TEST gluster volume start $V0; +TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 $M0; + +TEST gluster volume quota $V0 enable; + +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" quotad_up_status; + +TEST mkdir $M0/dir; + +TEST gluster volume quota $V0 limit-usage /dir 10MB; + +TEST mkdir $M0/dir/subdir; + +cleanup; diff --git a/tests/bugs/quota/bug-1104692.t b/tests/bugs/quota/bug-1104692.t new file mode 100755 index 00000000000..6f6b174aa03 --- /dev/null +++ b/tests/bugs/quota/bug-1104692.t @@ -0,0 +1,32 @@ +#!/bin/bash +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +cleanup; + +TEST glusterd +TEST pidof glusterd + +TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}0 $H0:$B0/${V0}1 $H0:$B0/${V0}2 $H0:$B0/${V0}3 +TEST $CLI volume start $V0 + +TEST glusterfs -s $H0 --volfile-id $V0 $M0; +TEST mkdir -p $M0/limit_one/limit_two/limit_three $M0/limit_four \ + $M0/limit_one/limit_five + +TEST $CLI volume set $V0 server.root-squash on +TEST $CLI volume quota $V0 enable + +TEST $CLI volume quota $V0 limit-usage / 1GB +TEST $CLI volume quota $V0 limit-usage /limit_one 1GB +TEST $CLI volume quota $V0 limit-usage /limit_one/limit_two 1GB +TEST $CLI volume quota $V0 limit-usage /limit_one/limit_two/limit_three 1GB +TEST $CLI volume quota $V0 limit-usage /limit_four 1GB +TEST $CLI volume quota $V0 limit-usage /limit_one/limit_five 1GB + +#Cleanup +EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0 +TEST $CLI volume stop $V0 +TEST $CLI volume delete $V0 + +cleanup; diff --git a/tests/bugs/rdma/bug-765473.t b/tests/bugs/rdma/bug-765473.t new file mode 100755 index 00000000000..9f595a1d479 --- /dev/null +++ b/tests/bugs/rdma/bug-765473.t @@ -0,0 +1,35 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc +. $(dirname $0)/../../fileio.rc + +cleanup; + +function clients_connected() +{ + volname=$1 + gluster volume status $volname clients | grep -i 'Clients connected' | sed -e 's/[^0-9]*\(.*\)/\1/g' +} + +## Start and create a volume +TEST glusterd; +TEST pidof glusterd; +TEST $CLI volume create $V0 $H0:$B0/${V0}1 +TEST $CLI volume start $V0; + +TEST glusterfs --direct-io-mode=yes --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id $V0 $M0; + +TEST fd=`fd_available` +TEST fd_open $fd 'w' "$M0/testfile" +TEST fd_write $fd "content" +TEST $CLI volume stop $V0 +# write some content which will result in marking fd bad +fd_write $fd "more content" +sync $V0 +TEST $CLI volume start $V0 +EXPECT 'Started' volinfo_field $V0 'Status'; +EXPECT_WITHIN $PROCESS_UP_TIMEOUT 2 clients_connected $V0 +TEST ! fd_write $fd "still more content" + +cleanup diff --git a/tests/bugs/replicate/886998/strict-readdir.t b/tests/bugs/replicate/886998/strict-readdir.t new file mode 100644 index 00000000000..63fe313b201 --- /dev/null +++ b/tests/bugs/replicate/886998/strict-readdir.t @@ -0,0 +1,52 @@ +#!/bin/bash + +. $(dirname $0)/../../../include.rc +. $(dirname $0)/../../../volume.rc + +function num_files_in_dir { + d=$1 + ls $d | sort | uniq | wc -l +} + +#Basic sanity tests for readdir functionality +cleanup; +TEST glusterd +TEST pidof glusterd + +TEST $CLI volume create $V0 replica 2 $H0:$B0/r2d2_0 $H0:$B0/r2d2_1 $H0:$B0/r2d2_2 $H0:$B0/r2d2_3 +TEST $CLI volume start $V0 +TEST glusterfs --volfile-server=$H0 --volfile-id=/$V0 $M0 + +TEST touch $M0/{1..100} +EXPECT "100" num_files_in_dir $M0 + +TEST kill_brick $V0 $H0 $B0/r2d2_0 +TEST kill_brick $V0 $H0 $B0/r2d2_2 +EXPECT "100" num_files_in_dir $M0 + +TEST $CLI volume start $V0 force +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0 +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 2 + +TEST kill_brick $V0 $H0 $B0/r2d2_1 +TEST kill_brick $V0 $H0 $B0/r2d2_3 +EXPECT "100" num_files_in_dir $M0 + +TEST $CLI volume start $V0 force +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1 +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 3 + +TEST $CLI volume set $V0 cluster.strict-readdir on +EXPECT "on" volinfo_field $V0 cluster.strict-readdir +TEST kill_brick $V0 $H0 $B0/r2d2_0 +TEST kill_brick $V0 $H0 $B0/r2d2_2 +EXPECT "100" num_files_in_dir $M0 + +TEST $CLI volume start $V0 force +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0 +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 2 + +TEST kill_brick $V0 $H0 $B0/r2d2_1 +TEST kill_brick $V0 $H0 $B0/r2d2_3 +EXPECT "100" num_files_in_dir $M0 +cleanup; diff --git a/tests/bugs/replicate/bug-1015990-rep.t b/tests/bugs/replicate/bug-1015990-rep.t new file mode 100755 index 00000000000..4e959e6e70e --- /dev/null +++ b/tests/bugs/replicate/bug-1015990-rep.t @@ -0,0 +1,80 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc +. $(dirname $0)/../../afr.rc +cleanup; + +## Start and create a volume +TEST glusterd; +TEST pidof glusterd; +TEST $CLI volume info; + +TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2,3,4}; + +## Verify volume is is created +EXPECT "$V0" volinfo_field $V0 'Volume Name'; +EXPECT 'Created' volinfo_field $V0 'Status'; + +## Start volume and verify +TEST $CLI volume start $V0; +EXPECT 'Started' volinfo_field $V0 'Status'; + + +TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0 + + + +TEST kill_brick $V0 $H0 $B0/$V0"1" +sleep 5 +TEST kill_brick $V0 $H0 $B0/$V0"3" +sleep 5 + +for i in {1..100}; do echo "STRING" > $M0/File$i; done + +brick_2_sh_entries=$(count_sh_entries $B0/$V0"2") +brick_4_sh_entries=$(count_sh_entries $B0/$V0"4") + +command_output=$(gluster volume heal $V0 statistics heal-count replica $H0:$B0/$V0"1") + + +substring="Number of entries:" +count=0 +while read -r line; +do + if [[ "$line" == *$substring* ]] + then + value=$(echo $line | cut -f 2 -d :) + count=$(($count + $value)) + fi + +done <<< "$command_output" + +brick_2_entries_count=$(($count-$value)) + +EXPECT "0" echo $brick_2_entries_count + +brick_2_entries_count=$count + + +xattrop_count_brick_2=$(count_sh_entries $B0/$V0"2") +##Remove the count of the xattrop-gfid entry count as it does not contribute +##to the number of files to be healed + +sub_val=1 +xattrop_count_brick_2=$(($xattrop_count_brick_2-$sub_val)) + +ret=0 +if [ "$xattrop_count_brick_2" -eq "$brick_2_entries_count" ] + then + ret=$(($ret + $sub_val)) +fi + +EXPECT "1" echo $ret +## Finish up +TEST $CLI volume stop $V0; +EXPECT 'Stopped' volinfo_field $V0 'Status'; +TEST $CLI volume delete $V0; +TEST ! $CLI volume info $V0 + +cleanup; diff --git a/tests/bugs/replicate/bug-1015990.t b/tests/bugs/replicate/bug-1015990.t new file mode 100755 index 00000000000..48181c00329 --- /dev/null +++ b/tests/bugs/replicate/bug-1015990.t @@ -0,0 +1,95 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc +. $(dirname $0)/../../afr.rc +cleanup; + +## Start and create a volume +TEST glusterd; +TEST pidof glusterd; +TEST $CLI volume info; + +TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2,3,4}; + +## Verify volume is is created +EXPECT "$V0" volinfo_field $V0 'Volume Name'; +EXPECT 'Created' volinfo_field $V0 'Status'; + +## Start volume and verify +TEST $CLI volume start $V0; +EXPECT 'Started' volinfo_field $V0 'Status'; + + +TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0 + + + +TEST kill_brick $V0 $H0 $B0/$V0"1" +sleep 5 +TEST kill_brick $V0 $H0 $B0/$V0"3" +sleep 5 + +for i in {1..100}; do echo "STRING" > $M0/File$i; done + +brick_2_sh_entries=$(count_sh_entries $B0/$V0"2") +brick_4_sh_entries=$(count_sh_entries $B0/$V0"4") + + +command_output=$(gluster volume heal $V0 statistics heal-count) + + +substring="Number of entries:" +count=0 +while read -r line; +do + if [[ "$line" == *$substring* ]] + then + value=$(echo $line | cut -f 2 -d :) + count=$(($count + $value)) + fi + +done <<< "$command_output" + +brick_2_entries_count=$(($count-$value)) +brick_4_entries_count=$value + + +xattrop_count_brick_2=$(count_sh_entries $B0/$V0"2") +##Remove the count of the xattrop-gfid entry count as it does not contribute +##to the number of files to be healed + +sub_val=1 +xattrop_count_brick_2=$(($xattrop_count_brick_2-$sub_val)) + +xattrop_count_brick_4=$(count_sh_entries $B0/$V0"4") +##Remove xattrop-gfid entry count + +xattrop_count_brick_4=$(($xattrop_count_brick_4-$sub_val)) + + +ret=0 +if [ "$xattrop_count_brick_2" -eq "$brick_2_entries_count" ] + then + ret=$(($ret + $sub_val)) +fi + +EXPECT "1" echo $ret + + +ret=0 +if [ "$xattrop_count_brick_4" -eq "$brick_4_entries_count" ] + then + ret=$(($ret + $sub_val)) +fi + +EXPECT "1" echo $ret + +## Finish up +TEST $CLI volume stop $V0; +EXPECT 'Stopped' volinfo_field $V0 'Status'; +TEST $CLI volume delete $V0; +TEST ! $CLI volume info $V0 + +cleanup; + diff --git a/tests/bugs/replicate/bug-1032927.t b/tests/bugs/replicate/bug-1032927.t new file mode 100644 index 00000000000..eb663d03fed --- /dev/null +++ b/tests/bugs/replicate/bug-1032927.t @@ -0,0 +1,32 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +#This tests if pathinfo getxattr fails when one of the bricks is down +#Lets hope it doesn't + +cleanup; +function get_pathinfo_in_loop { + failed=0 + for i in {1..1000} + do + getfattr -n trusted.glusterfs.pathinfo $M0 2>/dev/null + if [ $? -ne 0 ]; then failed=1;break; fi + done + return $failed +} + +TEST glusterd +TEST pidof glusterd +TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1} +TEST $CLI volume start $V0 +TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0 +cd $M0 +TEST kill_brick $V0 $H0 $B0/${V0}1 + +#when one of the bricks is down getfattr of pathinfo should not fail +#Lets just do the test for 1000 times to see if we hit the race +TEST get_pathinfo_in_loop + +cleanup diff --git a/tests/bugs/replicate/bug-1037501.t b/tests/bugs/replicate/bug-1037501.t new file mode 100755 index 00000000000..ce079555b50 --- /dev/null +++ b/tests/bugs/replicate/bug-1037501.t @@ -0,0 +1,104 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +function write_file() +{ + path="$1"; shift + echo "$*" > "$path" +} + +cleanup; +TEST glusterd +TEST pidof glusterd +TEST $CLI volume info; + +## Start and create a volume +mkdir -p ${B0}/${V0}-0 +mkdir -p ${B0}/${V0}-1 +mkdir -p ${B0}/${V0}-2 +TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}-{0,1,2} + +## Verify volume is created +EXPECT "$V0" volinfo_field $V0 'Volume Name'; +EXPECT 'Created' volinfo_field $V0 'Status'; + +## Start volume and verify +TEST $CLI volume start $V0; +EXPECT 'Started' volinfo_field $V0 'Status'; + +## Mount native +TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 $M0 + +TEST `echo "TEST-FILE" > $M0/File` +TEST `mkdir $M0/Dir` +TEST `ln $M0/File $M0/Link` +TEST `mknod $M0/FIFO p` + +TEST $CLI volume add-brick $V0 replica 4 $H0:$B0/$V0-3 force +TEST $CLI volume add-brick $V0 replica 5 $H0:$B0/$V0-4 force +TEST $CLI volume add-brick $V0 replica 6 $H0:$B0/$V0-5 force + +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 3 +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 4 +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 5 +TEST gluster volume heal $V0 full +EXPECT_WITHIN $HEAL_TIMEOUT "Y" path_exists $B0/$V0-0/File +EXPECT_WITHIN $HEAL_TIMEOUT "Y" path_exists $B0/$V0-1/File +EXPECT_WITHIN $HEAL_TIMEOUT "Y" path_exists $B0/$V0-2/File +EXPECT_WITHIN $HEAL_TIMEOUT "Y" path_exists $B0/$V0-3/File +EXPECT_WITHIN $HEAL_TIMEOUT "Y" path_exists $B0/$V0-4/File +EXPECT_WITHIN $HEAL_TIMEOUT "Y" path_exists $B0/$V0-5/File + +EXPECT_WITHIN $HEAL_TIMEOUT "Y" path_exists $B0/$V0-0/Link +EXPECT_WITHIN $HEAL_TIMEOUT "Y" path_exists $B0/$V0-1/Link +EXPECT_WITHIN $HEAL_TIMEOUT "Y" path_exists $B0/$V0-2/Link +EXPECT_WITHIN $HEAL_TIMEOUT "Y" path_exists $B0/$V0-3/Link +EXPECT_WITHIN $HEAL_TIMEOUT "Y" path_exists $B0/$V0-4/Link +EXPECT_WITHIN $HEAL_TIMEOUT "Y" path_exists $B0/$V0-5/Link + +EXPECT_WITHIN $HEAL_TIMEOUT "Y" path_exists $B0/$V0-0/Dir +EXPECT_WITHIN $HEAL_TIMEOUT "Y" path_exists $B0/$V0-1/Dir +EXPECT_WITHIN $HEAL_TIMEOUT "Y" path_exists $B0/$V0-2/Dir +EXPECT_WITHIN $HEAL_TIMEOUT "Y" path_exists $B0/$V0-3/Dir +EXPECT_WITHIN $HEAL_TIMEOUT "Y" path_exists $B0/$V0-4/Dir +EXPECT_WITHIN $HEAL_TIMEOUT "Y" path_exists $B0/$V0-5/Dir + +EXPECT_WITHIN $HEAL_TIMEOUT "Y" path_exists $B0/$V0-0/FIFO +EXPECT_WITHIN $HEAL_TIMEOUT "Y" path_exists $B0/$V0-1/FIFO +EXPECT_WITHIN $HEAL_TIMEOUT "Y" path_exists $B0/$V0-2/FIFO +EXPECT_WITHIN $HEAL_TIMEOUT "Y" path_exists $B0/$V0-3/FIFO +EXPECT_WITHIN $HEAL_TIMEOUT "Y" path_exists $B0/$V0-4/FIFO +EXPECT_WITHIN $HEAL_TIMEOUT "Y" path_exists $B0/$V0-5/FIFO + +EXPECT 10 stat -c '%s' $B0/$V0-0/File +EXPECT 10 stat -c '%s' $B0/$V0-1/File +EXPECT 10 stat -c '%s' $B0/$V0-2/File +EXPECT 10 stat -c '%s' $B0/$V0-3/File +EXPECT 10 stat -c '%s' $B0/$V0-4/File +EXPECT 10 stat -c '%s' $B0/$V0-5/File + +EXPECT 3 stat -c '%h' $B0/$V0-0/Link +EXPECT 3 stat -c '%h' $B0/$V0-1/Link +EXPECT 3 stat -c '%h' $B0/$V0-2/Link +EXPECT 3 stat -c '%h' $B0/$V0-3/Link +EXPECT 3 stat -c '%h' $B0/$V0-4/Link +EXPECT 3 stat -c '%h' $B0/$V0-5/Link + +EXPECT 'directory' stat -c '%F' $B0/$V0-0/Dir +EXPECT 'directory' stat -c '%F' $B0/$V0-1/Dir +EXPECT 'directory' stat -c '%F' $B0/$V0-2/Dir +EXPECT 'directory' stat -c '%F' $B0/$V0-3/Dir +EXPECT 'directory' stat -c '%F' $B0/$V0-4/Dir +EXPECT 'directory' stat -c '%F' $B0/$V0-5/Dir + +EXPECT 'fifo' stat -c '%F' $B0/$V0-0/FIFO +EXPECT 'fifo' stat -c '%F' $B0/$V0-1/FIFO +EXPECT 'fifo' stat -c '%F' $B0/$V0-2/FIFO +EXPECT 'fifo' stat -c '%F' $B0/$V0-3/FIFO +EXPECT 'fifo' stat -c '%F' $B0/$V0-4/FIFO +EXPECT 'fifo' stat -c '%F' $B0/$V0-5/FIFO + +cleanup; diff --git a/tests/bugs/replicate/bug-1046624.t b/tests/bugs/replicate/bug-1046624.t new file mode 100755 index 00000000000..9ae40879228 --- /dev/null +++ b/tests/bugs/replicate/bug-1046624.t @@ -0,0 +1,46 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +cleanup; +TEST glusterd +TEST pidof glusterd + +## Start and create a volume +TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}-{0,1} + +## Verify volume is created +EXPECT "$V0" volinfo_field $V0 'Volume Name'; +EXPECT 'Created' volinfo_field $V0 'Status'; + + +## Make sure automatic self-heal doesn't perturb our results. +TEST $CLI volume set $V0 cluster.self-heal-daemon off +TEST $CLI volume set $V0 stat-prefetch off +TEST $CLI volume set $V0 background-self-heal-count 0 + +## Start volume and verify +TEST $CLI volume start $V0; +EXPECT 'Started' volinfo_field $V0 'Status'; + +## Mount native +TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 $M0 --use-readdirp=no + +TEST `echo "TEST-FILE" > $M0/File` +TEST `mkdir $M0/Dir` +TEST kill_brick $V0 $H0 $B0/${V0}-0 + +TEST `ln -s $M0/File $M0/Link1` +TEST `ln -s $M0/Dir $M0/Link2` + +TEST $CLI volume start $V0 force +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0 + +TEST `find $M0/ 2>/dev/null 1>/dev/null` +TEST `find $M0/ | xargs stat 2>/dev/null 1>/dev/null` + +TEST stat $B0/${V0}-0/Link1 +TEST stat $B0/${V0}-0/Link2 + +cleanup; diff --git a/tests/bugs/replicate/bug-1058797.t b/tests/bugs/replicate/bug-1058797.t new file mode 100644 index 00000000000..99ab3eb3a66 --- /dev/null +++ b/tests/bugs/replicate/bug-1058797.t @@ -0,0 +1,45 @@ +#!/bin/bash +#Test that the setuid bit is healed correctly. + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +cleanup; +#Basic checks +TEST glusterd + +#Create a 1x2 replica volume +TEST $CLI volume create $V0 replica 2 $H0:$B0/brick{0,1}; +TEST $CLI volume start $V0 +TEST $CLI volume set $V0 cluster.self-heal-daemon off + +# FUSE mount;create a file +TEST glusterfs -s $H0 --volfile-id $V0 $M0 +TEST touch $M0/file + +#Kill brick1 and set S_ISUID and S_ISGID bits from mount point +kill_brick $V0 $H0 $B0/brick1 +TEST chmod +x,+s $M0/file + +#Get file permissions from backend brick0 and verify that S_ISUID is indeed set +file_permissions1=`ls -l $B0/brick0/file | awk '{print $1}'| cut -d. -f1 | cut -d- -f2,3,4,5,6` +setuid_bit1=`echo $file_permissions1 | cut -b3` +EXPECT "s" echo $setuid_bit1 + +#Restart volume and do lookup from mount to trigger heal +TEST $CLI volume start $V0 force +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1 +TEST dd if=$M0/file of=/dev/null + +#Get file permissions from healed brick1 and verify that S_ISUID is indeed set +file_permissions2=`ls -l $B0/brick1/file | awk '{print $1}' | cut -d. -f1 | cut -d- -f2,3,4,5,6` +setuid_bit2=`echo $file_permissions2 | cut -b3` +EXPECT "s" echo $setuid_bit2 + +#Also compare the entire permission string,just to be sure +EXPECT $file_permissions1 echo $file_permissions2 +EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0 +TEST $CLI volume stop $V0 +TEST $CLI volume delete $V0; + +cleanup; diff --git a/tests/bugs/replicate/bug-1101647.t b/tests/bugs/replicate/bug-1101647.t new file mode 100644 index 00000000000..148af987f20 --- /dev/null +++ b/tests/bugs/replicate/bug-1101647.t @@ -0,0 +1,29 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc +. $(dirname $0)/../../afr.rc + +cleanup; + +TEST glusterd +TEST pidof glusterd +TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2}; +TEST $CLI volume start $V0; +TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0 +EXPECT_WITHIN 20 "Y" glustershd_up_status + +#Create base entry in indices/xattrop and indices/base_indices_holder +echo "Data">$M0/file + +TEST $CLI volume heal $V0 +#Entries from indices/xattrop and indices/base_indices_holder should not be cleared after a heal. +EXPECT 1 count_sh_entries $B0/$V0"1" +EXPECT 1 count_sh_entries $B0/$V0"2" + +TEST kill_brick $V0 $H0 $B0/${V0}2 +echo "More data">>$M0/file + +EXPECT 1 echo `$CLI volume heal $V0 statistics heal-count|grep "Number of entries:"|head -n1|awk '{print $4}'` + +cleanup; diff --git a/tests/bugs/replicate/bug-1130892.t b/tests/bugs/replicate/bug-1130892.t new file mode 100644 index 00000000000..0840ffbb0b9 --- /dev/null +++ b/tests/bugs/replicate/bug-1130892.t @@ -0,0 +1,60 @@ +#!/bin/bash +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc +. $(dirname $0)/../../afr.rc + +cleanup; + +TEST glusterd +TEST pidof glusterd +TEST $CLI volume info; + +# Create a 1X2 replica +TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}-{0,1} +EXPECT 'Created' volinfo_field $V0 'Status'; + +# Disable self-heal daemon +TEST gluster volume set $V0 self-heal-daemon off + +# Disable all perf-xlators +TEST $CLI volume set $V0 performance.quick-read off +TEST $CLI volume set $V0 performance.io-cache off +TEST $CLI volume set $V0 performance.write-behind off +TEST $CLI volume set $V0 performance.stat-prefetch off +TEST $CLI volume set $V0 performance.read-ahead off + +# Volume start +TEST $CLI volume start $V0; +EXPECT 'Started' volinfo_field $V0 'Status'; + +# FUSE Mount +TEST glusterfs -s $H0 --volfile-id $V0 $M0 + +# Create files and dirs +TEST mkdir -p $M0/one/two/ +TEST `echo "Carpe diem" > $M0/one/two/three` + +# Simulate disk-replacement +TEST kill_brick $V0 $H0 $B0/${V0}-1 +TEST rm -rf $B0/${V0}-1/one +TEST rm -rf $B0/${V0}-1/.glusterfs + +# Start force +TEST $CLI volume start $V0 force + +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1 + +TEST stat $M0/one + +# Check pending xattrs +EXPECT "00000000" afr_get_specific_changelog_xattr $B0/${V0}-0/one trusted.afr.$V0-client-1 data +EXPECT_NOT "00000000" afr_get_specific_changelog_xattr $B0/${V0}-0/one trusted.afr.$V0-client-1 entry +EXPECT "00000000" afr_get_specific_changelog_xattr $B0/${V0}-0/one trusted.afr.$V0-client-1 metadata + +TEST gluster volume set $V0 self-heal-daemon on +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status +EXPECT_WITHIN $HEAL_TIMEOUT "Y" is_dir_heal_done $B0/${V0}-0 $B0/${V0}-1 one +EXPECT_WITHIN $HEAL_TIMEOUT "Y" is_dir_heal_done $B0/${V0}-0 $B0/${V0}-1 one/two +EXPECT_WITHIN $HEAL_TIMEOUT "Y" is_file_heal_done $B0/${V0}-0 $B0/${V0}-1 one/two/three + +cleanup; diff --git a/tests/bugs/replicate/bug-1132102.t b/tests/bugs/replicate/bug-1132102.t new file mode 100644 index 00000000000..c7dbbf818aa --- /dev/null +++ b/tests/bugs/replicate/bug-1132102.t @@ -0,0 +1,28 @@ +#!/bin/bash + +#This tests that mknod and create fops mark necessary pending changelog +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +cleanup; + +TEST glusterd +TEST pidof glusterd +TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1} +TEST $CLI volume start $V0 +TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0 +TEST kill_brick $V0 $H0 $B0/${V0}0 +cd $M0 +TEST mkfifo fifo +TEST mknod block b 0 0 +TEST touch a +EXPECT "00000000" afr_get_specific_changelog_xattr $B0/${V0}1/fifo trusted.afr.$V0-client-0 data +EXPECT "00000000" afr_get_specific_changelog_xattr $B0/${V0}1/fifo trusted.afr.$V0-client-0 entry +EXPECT_NOT "00000000" afr_get_specific_changelog_xattr $B0/${V0}1/fifo trusted.afr.$V0-client-0 metadata +EXPECT "00000000" afr_get_specific_changelog_xattr $B0/${V0}1/block trusted.afr.$V0-client-0 data +EXPECT "00000000" afr_get_specific_changelog_xattr $B0/${V0}1/block trusted.afr.$V0-client-0 entry +EXPECT_NOT "00000000" afr_get_specific_changelog_xattr $B0/${V0}1/block trusted.afr.$V0-client-0 metadata +EXPECT_NOT "00000000" afr_get_specific_changelog_xattr $B0/${V0}1/a trusted.afr.$V0-client-0 data +EXPECT "00000000" afr_get_specific_changelog_xattr $B0/${V0}1/a trusted.afr.$V0-client-0 entry +EXPECT_NOT "00000000" afr_get_specific_changelog_xattr $B0/${V0}1/a trusted.afr.$V0-client-0 metadata +cleanup diff --git a/tests/bugs/replicate/bug-1134691-afr-lookup-metadata-heal.t b/tests/bugs/replicate/bug-1134691-afr-lookup-metadata-heal.t new file mode 100644 index 00000000000..f43c7cea551 --- /dev/null +++ b/tests/bugs/replicate/bug-1134691-afr-lookup-metadata-heal.t @@ -0,0 +1,50 @@ +#!/bin/bash +#### Test iatt and user xattr heal from lookup code path #### + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc +cleanup; + +TEST glusterd +TEST pidof glusterd +TEST $CLI volume create $V0 replica 3 $H0:$B0/brick{0,1,2} +TEST $CLI volume start $V0 +TEST $CLI volume set $V0 cluster.self-heal-daemon off +TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0 + +cd $M0 +TEST touch file +TEST setfattr -n user.attribute1 -v "value" $B0/brick0/file +TEST kill_brick $V0 $H0 $B0/brick2 +TEST chmod +x file +iatt=$(stat -c "%g:%u:%A" file) + +TEST $CLI volume start $V0 force +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 2 + +#Trigger metadataheal +TEST stat file + +#iattrs must be matching +iatt1=$(stat -c "%g:%u:%A" $B0/brick0/file) +iatt2=$(stat -c "%g:%u:%A" $B0/brick1/file) +iatt3=$(stat -c "%g:%u:%A" $B0/brick2/file) +EXPECT $iatt echo $iatt1 +EXPECT $iatt echo $iatt2 +EXPECT $iatt echo $iatt3 + +#xattrs must be matching +xatt1_cnt=$(getfattr -d $B0/brick0/file|wc|awk '{print $1}') +xatt2_cnt=$(getfattr -d $B0/brick1/file|wc|awk '{print $1}') +xatt3_cnt=$(getfattr -d $B0/brick2/file|wc|awk '{print $1}') +EXPECT "$xatt1_cnt" echo $xatt2_cnt +EXPECT "$xatt1_cnt" echo $xatt3_cnt + +#changelogs must be zero +xattr1=$(get_hex_xattr trusted.afr.$V0-client-2 $B0/brick0/file) +xattr2=$(get_hex_xattr trusted.afr.$V0-client-2 $B0/brick1/file) +EXPECT "000000000000000000000000" echo $xattr1 +EXPECT "000000000000000000000000" echo $xattr2 + +cd - +cleanup; diff --git a/tests/bugs/replicate/bug-1139230.t b/tests/bugs/replicate/bug-1139230.t new file mode 100644 index 00000000000..9ceac6c4f4e --- /dev/null +++ b/tests/bugs/replicate/bug-1139230.t @@ -0,0 +1,58 @@ +#!/bin/bash +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc +. $(dirname $0)/../../afr.rc + +cleanup; + +TEST glusterd +TEST pidof glusterd +TEST $CLI volume info; + +# Create a 1X2 replica +TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}-{0,1} +EXPECT 'Created' volinfo_field $V0 'Status'; + +# Volume start +TEST $CLI volume start $V0; +EXPECT 'Started' volinfo_field $V0 'Status'; + +# FUSE Mount +TEST glusterfs -s $H0 --volfile-id $V0 $M0 + +TEST mkdir -p $M0/one + +# Kill a brick +TEST kill_brick $V0 $H0 $B0/${V0}-1 + +TEST `echo "A long" > $M0/one/two` + +# Start force +TEST $CLI volume start $V0 force + +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1 + +EXPECT_WITHIN $HEAL_TIMEOUT "Y" is_dir_heal_done $B0/${V0}-0 $B0/${V0}-1 one +EXPECT_WITHIN $HEAL_TIMEOUT "Y" is_file_heal_done $B0/${V0}-0 $B0/${V0}-1 one/two + +# Pending xattrs should be set for all the bricks once self-heal is done +# Check pending xattrs +EXPECT "0x000000000000000000000000" afr_get_changelog_xattr $B0/${V0}-0/one trusted.afr.$V0-client-0 +EXPECT "0x000000000000000000000000" afr_get_changelog_xattr $B0/${V0}-0/one trusted.afr.$V0-client-1 +EXPECT "0x000000000000000000000000" afr_get_changelog_xattr $B0/${V0}-1/one trusted.afr.$V0-client-0 +EXPECT "0x000000000000000000000000" afr_get_changelog_xattr $B0/${V0}-1/one trusted.afr.$V0-client-1 +EXPECT "0x000000000000000000000000" afr_get_changelog_xattr $B0/${V0}-0/one trusted.afr.dirty +EXPECT "0x000000000000000000000000" afr_get_changelog_xattr $B0/${V0}-1/one trusted.afr.dirty + +TEST `echo "time ago" > $M0/one/three` + +# Pending xattrs should be set for all the bricks once transaction is done +# Check pending xattrs +EXPECT "0x000000000000000000000000" afr_get_changelog_xattr $B0/${V0}-0/one/three trusted.afr.$V0-client-0 +EXPECT "0x000000000000000000000000" afr_get_changelog_xattr $B0/${V0}-0/one/three trusted.afr.$V0-client-1 +EXPECT "0x000000000000000000000000" afr_get_changelog_xattr $B0/${V0}-1/one/three trusted.afr.$V0-client-0 +EXPECT "0x000000000000000000000000" afr_get_changelog_xattr $B0/${V0}-1/one/three trusted.afr.$V0-client-1 +EXPECT "0x000000000000000000000000" afr_get_changelog_xattr $B0/${V0}-0/one/three trusted.afr.dirty +EXPECT "0x000000000000000000000000" afr_get_changelog_xattr $B0/${V0}-1/one/three trusted.afr.dirty + +cleanup; diff --git a/tests/bugs/replicate/bug-765564.t b/tests/bugs/replicate/bug-765564.t new file mode 100644 index 00000000000..098d225018f --- /dev/null +++ b/tests/bugs/replicate/bug-765564.t @@ -0,0 +1,86 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +cleanup; + +TEST glusterd +TEST pidof glusterd + +## Start and create a volume +mkdir -p ${B0}/${V0}-0 +mkdir -p ${B0}/${V0}-1 +TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}-{0,1} + +TEST $CLI volume set $V0 performance.io-cache off; +TEST $CLI volume set $V0 performance.write-behind off; +TEST $CLI volume set $V0 performance.stat-prefetch off + +TEST $CLI volume start $V0; + +## Mount native +TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 $M0 + +#returns success if 'olddir' is absent +#'olddir' must be absent in both replicas +function rm_succeeded () { + local dir1=$1 + [[ -d $H0:$B0/${V0}-0/$dir1 || -d $H0:$B0/${V0}-1/$dir1 ]] && return 0 + return 1 +} + +# returns successes if 'newdir' is present +#'newdir' must be present in both replicas +function mv_succeeded () { + local dir1=$1 + [[ -d $H0:$B0/${V0}-0/$dir1 && -d $H0:$B0/${V0}-1/$dir1 ]] && return 1 + return 0 +} + +# returns zero on success +# Only one of rm and mv can succeed. This is captured by the XOR below + +function chk_backend_consistency(){ + local dir1=$1 + local dir2=$2 + local rm_status=rm_succeeded $dir1 + local mv_status=mv_succeeded $dir2 + [[ ( $rm_status && ! $mv_status ) || ( ! $rm_status && $mv_status ) ]] && return 0 + return 1 +} + +#concurrent removal/rename of dirs +function rm_mv_correctness () { + ret=0 + for i in {1..100}; do + mkdir $M0/"dir"$i + rmdir $M0/"dir"$i & + mv $M0/"dir"$i $M0/"adir"$i & + wait + tmp_ret=$(chk_backend_consistency "dir"$i "adir"$i) + (( ret += tmp_ret )) + rm -rf $M0/"dir"$i + rm -rf $M0/"adir"$i + done + return $ret +} + +TEST touch $M0/a; +TEST mv $M0/a $M0/b; + +#test rename fop when one of the bricks is down +kill_brick ${V0} ${H0} ${B0}/${V0}-1; +TEST touch $M0/h; +TEST mv $M0/h $M0/1; + +TEST $CLI volume start $V0 force; + +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1; +find $M0 2>/dev/null 1>/dev/null; +find $M0 | xargs stat 2>/dev/null 1>/dev/null; + +TEST rm_mv_correctness; +EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0 +cleanup; + diff --git a/tests/bugs/replicate/bug-767585-gfid.t b/tests/bugs/replicate/bug-767585-gfid.t new file mode 100755 index 00000000000..4176aabb544 --- /dev/null +++ b/tests/bugs/replicate/bug-767585-gfid.t @@ -0,0 +1,42 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +#Test cases to perform gfid-self-heal +#file 'a' should be assigned a fresh gfid +#file 'b' should be healed with gfid1 from brick1 +#file 'c' should be healed with gfid2 from brick2 + +gfid1="0x8428b7193a764bf8be8046fb860b8993" +gfid2="0x85ad91afa2f74694bf52c3326d048209" + +cleanup; +TEST glusterd +TEST pidof glusterd +TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}0 $H0:$B0/${V0}1 +TEST $CLI volume start $V0 +TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0 --direct-io-mode=enable +touch $B0/${V0}0/a $B0/${V0}1/a +touch $B0/${V0}0/b $B0/${V0}1/b +touch $B0/${V0}0/c $B0/${V0}1/c + +TEST setfattr -n trusted.gfid -v $gfid1 $B0/${V0}0/b +TEST setfattr -n trusted.gfid -v $gfid2 $B0/${V0}1/c + +sleep 2 + +TEST stat $M0/a +TEST stat $M0/b +TEST stat $M0/c + +TEST gf_get_gfid_xattr $B0/${V0}0/a +TEST gf_get_gfid_xattr $B0/${V0}1/a + +EXPECT "$gfid1" gf_get_gfid_xattr $B0/${V0}0/b +EXPECT "$gfid1" gf_get_gfid_xattr $B0/${V0}1/b + +EXPECT "$gfid2" gf_get_gfid_xattr $B0/${V0}0/c +EXPECT "$gfid2" gf_get_gfid_xattr $B0/${V0}1/c + +cleanup; diff --git a/tests/bugs/replicate/bug-802417.t b/tests/bugs/replicate/bug-802417.t new file mode 100755 index 00000000000..ad411005ced --- /dev/null +++ b/tests/bugs/replicate/bug-802417.t @@ -0,0 +1,108 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +function write_file() +{ + path="$1"; shift + echo "$*" > "$path" +} + +cleanup; +TEST glusterd +TEST pidof glusterd +TEST $CLI volume info; + +## Start and create a volume +mkdir -p ${B0}/${V0}-0 +mkdir -p ${B0}/${V0}-1 +mkdir -p ${B0}/${V0}-2 +TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}-{0,1,2} + +## Verify volume is created +EXPECT "$V0" volinfo_field $V0 'Volume Name'; +EXPECT 'Created' volinfo_field $V0 'Status'; + +## Make sure io-cache and write-behind don't interfere. +TEST $CLI volume set $V0 performance.io-cache off; +TEST $CLI volume set $V0 performance.write-behind off; +TEST $CLI volume set $V0 performance.stat-prefetch off + +## Make sure automatic self-heal doesn't perturb our results. +TEST $CLI volume set $V0 cluster.self-heal-daemon off +TEST $CLI volume set $V0 cluster.data-self-heal on +TEST $CLI volume set $V0 cluster.background-self-heal-count 0 + +## Start volume and verify +TEST $CLI volume start $V0; +EXPECT 'Started' volinfo_field $V0 'Status'; + +## Mount native +TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 $M0 + +## Create a file with some recognizably stale data. +TEST write_file $M0/a_file "old_data" + +## Kill two of the bricks and write some newer data. +TEST kill_brick ${V0} ${H0} ${B0}/${V0}-1 +TEST kill_brick ${V0} ${H0} ${B0}/${V0}-2 +TEST write_file $M0/a_file "new_data" + +## Bring all the bricks up and kill one so we do a partial self-heal. +TEST $CLI volume start $V0 force +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0 +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1 +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 2 +TEST kill_brick ${V0} ${H0} ${B0}/${V0}-2 +TEST dd if=${M0}/a_file of=/dev/null + + +obs_path_0=${B0}/${V0}-0/a_file +obs_path_1=${B0}/${V0}-1/a_file +obs_path_2=${B0}/${V0}-2/a_file + +tgt_xattr_0="trusted.afr.${V0}-client-0" +tgt_xattr_1="trusted.afr.${V0}-client-1" +tgt_xattr_2="trusted.afr.${V0}-client-2" + +actual=$(afr_get_changelog_xattr $obs_path_0 $tgt_xattr_0) +EXPECT "0x000000000000000000000000|^\$" echo $actual + +actual=$(afr_get_changelog_xattr $obs_path_0 $tgt_xattr_1) +EXPECT "0x000000000000000000000000|^\$" echo $actual + +actual=$(afr_get_changelog_xattr $obs_path_0 $tgt_xattr_2) +EXPECT "0x000000030000000000000000" echo $actual + +actual=$(afr_get_changelog_xattr $obs_path_1 $tgt_xattr_0) +EXPECT "0x000000000000000000000000|^\$" echo $actual + +actual=$(afr_get_changelog_xattr $obs_path_1 $tgt_xattr_1) +EXPECT "0x000000000000000000000000|^\$" echo $actual + +actual=$(afr_get_changelog_xattr $obs_path_1 $tgt_xattr_2) +EXPECT "0x000000010000000000000000" echo $actual + +actual=$(afr_get_changelog_xattr $obs_path_2 $tgt_xattr_0) +EXPECT "0x000000000000000000000000|^\$" echo $actual + +actual=$(afr_get_changelog_xattr $obs_path_2 $tgt_xattr_1) +EXPECT "0x000000000000000000000000|^\$" echo $actual + +actual=$(afr_get_changelog_xattr $obs_path_2 $tgt_xattr_2) +EXPECT "0x000000000000000000000000|^\$" echo $actual + +if [ "$EXIT_EARLY" = "1" ]; then + exit 0; +fi + +## Finish up +EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0 +TEST $CLI volume stop $V0; +EXPECT 'Stopped' volinfo_field $V0 'Status'; + +TEST $CLI volume delete $V0; +TEST ! $CLI volume info $V0; + +cleanup; diff --git a/tests/bugs/replicate/bug-821056.t b/tests/bugs/replicate/bug-821056.t new file mode 100644 index 00000000000..02a9c78b6f0 --- /dev/null +++ b/tests/bugs/replicate/bug-821056.t @@ -0,0 +1,52 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +cleanup; +TEST glusterd +TEST pidof glusterd + +TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1} +TEST $CLI volume set $V0 eager-lock off +TEST $CLI volume set $V0 cluster.self-heal-daemon off +TEST $CLI volume set $V0 performance.quick-read off +TEST $CLI volume set $V0 performance.open-behind off +TEST $CLI volume set $V0 performance.io-cache off +TEST $CLI volume set $V0 performance.write-behind on +TEST $CLI volume set $V0 performance.stat-prefetch off +TEST $CLI volume set $V0 performance.read-ahead off +TEST $CLI volume set $V0 cluster.background-self-heal-count 0 +TEST $CLI volume start $V0 +TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id=$V0 $M0 --direct-io-mode=enable +touch $M0/a + +#Open file with fd as 5 +exec 5>$M0/a +realpath=$(gf_get_gfid_backend_file_path $B0/${V0}0 "a") + +kill_brick $V0 $H0 $B0/${V0}0 +TEST $CLI volume start $V0 force +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0 + +EXPECT "Y" gf_check_file_opened_in_brick $V0 $H0 $B0/${V0}0 "$realpath" + +kill_brick $V0 $H0 $B0/${V0}0 +TEST gf_rm_file_and_gfid_link $B0/${V0}0 "a" +TEST $CLI volume start $V0 force +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0 +ls -l $M0/a 2>&1 > /dev/null #Make sure the file is re-created +EXPECT "N" gf_check_file_opened_in_brick $V0 $H0 $B0/${V0}0 "$realpath" +EXPECT "N" gf_check_file_opened_in_brick $V0 $H0 $B0/${V0}0 $B0/${V0}0/a + +for i in {1..1024}; do + echo "open sesame" >&5 +done + +EXPECT_WITHIN $REOPEN_TIMEOUT "Y" gf_check_file_opened_in_brick $V0 $H0 $B0/${V0}0 "$realpath" +#close the fd +exec 5>&- + +#Check that anon-fd based file is not leaking. +EXPECT_WITHIN $REOPEN_TIMEOUT "N" gf_check_file_opened_in_brick $V0 $H0 $B0/${V0}0 "$realpath" +cleanup; diff --git a/tests/bugs/replicate/bug-830665.t b/tests/bugs/replicate/bug-830665.t new file mode 100755 index 00000000000..3d2ec1145da --- /dev/null +++ b/tests/bugs/replicate/bug-830665.t @@ -0,0 +1,120 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../nfs.rc +. $(dirname $0)/../../volume.rc + +cleanup; + +function recreate { + rm -rf $1 && mkdir -p $1 +} + +TEST glusterd +TEST pidof glusterd +TEST $CLI volume info; + +## Start and create a volume +recreate ${B0}/${V0}-0 +recreate ${B0}/${V0}-1 +TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}-{0,1} + +function volinfo_field() +{ + local vol=$1; + local field=$2; + + $CLI volume info $vol | grep "^$field: " | sed 's/.*: //'; +} + +#EXPECT_WITHIN fails the test if the command it executes fails. This function +#returns "" when the file doesn't exist +function friendly_cat { + if [ ! -f $1 ]; + then + echo ""; + else + cat $1; + fi +} + +## Verify volume is created +EXPECT "$V0" volinfo_field $V0 'Volume Name'; +EXPECT 'Created' volinfo_field $V0 'Status'; + +## Make sure stat-prefetch doesn't prevent self-heal checks. +TEST $CLI volume set $V0 performance.stat-prefetch off; + +## Make sure automatic self-heal doesn't perturb our results. +TEST $CLI volume set $V0 cluster.self-heal-daemon off +TEST $CLI volume set $V0 cluster.background-self-heal-count 0 + +## Start volume and verify +TEST $CLI volume start $V0; +EXPECT 'Started' volinfo_field $V0 'Status'; + + +EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available; +## Mount NFS +TEST mount_nfs $H0:/$V0 $N0 nolock; + +## Create some files and directories +echo "test_data" > $N0/a_file; +mkdir $N0/a_dir; +echo "more_test_data" > $N0/a_dir/another_file; + +## Unmount and stop the volume. +EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $N0 +TEST $CLI volume stop $V0; + +# Recreate the brick. Note that because of http://review.gluster.org/#change,4202 +# we need to preserve and restore the volume ID or else the brick (and thus the +# entire not-very-HA-any-more volume) won't start. When that bug is fixed, we can +# remove the [gs]etxattr calls. +volid=$(getfattr -e hex -n trusted.glusterfs.volume-id $B0/${V0}-0 2> /dev/null \ + | grep = | cut -d= -f2) +rm -rf $B0/${V0}-0; +mkdir $B0/${V0}-0; +setfattr -n trusted.glusterfs.volume-id -v $volid $B0/${V0}-0 + +## Restart and remount. Note that we use actimeo=0 so that the stat calls +## we need for self-heal don't get blocked by the NFS client. +TEST $CLI volume start $V0; +EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available; +TEST mount_nfs $H0:/$V0 $N0 nolock,actimeo=0; + +## The Linux NFS client has a really charming habit of caching stuff right +## after mount, even though we set actimeo=0 above. Life would be much easier +## if NFS developers cared as much about correctness as they do about shaving +## a few seconds off of benchmarks. +ls -l $N0 &> /dev/null; +sleep 5; + +## Force entry self-heal. +TEST $CLI volume set $V0 cluster.self-heal-daemon on +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0 +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1 +TEST gluster volume heal $V0 full +#ls -lR $N0 > /dev/null; + +## Do NOT check through the NFS mount here. That will force a new self-heal +## check, but we want to test whether self-heal already happened. + +## Make sure everything's in order on the recreated brick. +EXPECT_WITHIN $HEAL_TIMEOUT 'test_data' friendly_cat $B0/${V0}-0/a_file; +EXPECT_WITHIN $HEAL_TIMEOUT 'more_test_data' friendly_cat $B0/${V0}-0/a_dir/another_file; + +if [ "$EXIT_EARLY" = "1" ]; then + exit 0; +fi + +## Finish up +EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $N0 +TEST $CLI volume stop $V0; +EXPECT 'Stopped' volinfo_field $V0 'Status'; + +TEST $CLI volume delete $V0; +TEST ! $CLI volume info $V0; + +cleanup; diff --git a/tests/bugs/replicate/bug-853680.t b/tests/bugs/replicate/bug-853680.t new file mode 100755 index 00000000000..806c3d142a1 --- /dev/null +++ b/tests/bugs/replicate/bug-853680.t @@ -0,0 +1,53 @@ +#!/bin/bash +# +# Bug 853680 +# +# Test that io-threads least-rate-limit throttling functions as expected. Set +# a limit, perform a few operations with a least-priority mount and verify +# said operations take a minimum amount of time according to the limit. + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +cleanup; + +TEST glusterd + +TEST $CLI volume create $V0 $H0:$B0/${V0}1 +TEST $CLI volume start $V0 + +#Accept min val +TEST $CLI volume set $V0 performance.least-rate-limit 0 +#Accept some value in between +TEST $CLI volume set $V0 performance.least-rate-limit 1035 +#Accept max val INT_MAX +TEST $CLI volume set $V0 performance.least-rate-limit 2147483647 + +#Reject other values +TEST ! $CLI volume set $V0 performance.least-rate-limit 2147483648 +TEST ! $CLI volume set $V0 performace.least-rate-limit -8 +TEST ! $CLI volume set $V0 performance.least-rate-limit abc +TEST ! $CLI volume set $V0 performance.least-rate-limit 0.0 +TEST ! $CLI volume set $V0 performance.least-rate-limit -10.0 +TEST ! $CLI volume set $V0 performance.least-rate-limit 1% + +# set rate limit to 1 operation/sec +TEST $CLI volume set $V0 performance.least-rate-limit 1 + +# use client-pid=-1 for least priority mount +TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0 --client-pid=-1 + +# create a few files and verify this takes more than a few seconds +date1=`date +%s` +TEST touch $M0/file{0..2} +date2=`date +%s` + +optime=$(($date2 - $date1)) +TEST [ $optime -ge 3 ] + +EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0 + +TEST $CLI volume stop $V0 +TEST $CLI volume delete $V0 + +cleanup; diff --git a/tests/bugs/replicate/bug-859581.t b/tests/bugs/replicate/bug-859581.t new file mode 100755 index 00000000000..d8b45a257a1 --- /dev/null +++ b/tests/bugs/replicate/bug-859581.t @@ -0,0 +1,53 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +cleanup; + +TEST glusterd +TEST pidof glusterd +TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2} +EXPECT 'Created' volinfo_field $V0 'Status'; +TEST $CLI volume set $V0 performance.stat-prefetch off +TEST $CLI volume start $V0 +EXPECT 'Started' volinfo_field $V0 'Status'; + +TEST glusterfs --direct-io-mode=yes --use-readdirp=no --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id $V0 $M0; + +mkdir -p $M0/dir1/dir2 + +TEST rm -f $(gf_get_gfid_backend_file_path $B0/${V0}1 "dir1") +TEST rmdir $B0/${V0}1/dir1/dir2 + +TEST stat $M0/dir1/dir2 + +TEST [ -d $B0/${V0}1/dir1/dir2 ] +TEST [ ! -d $(gf_get_gfid_backend_file_path $B0/${V0}1 "dir1") ] + +# Stop the volume to flush caches and force symlink recreation +EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0 +TEST $CLI volume stop $V0 +EXPECT 'Stopped' volinfo_field $V0 'Status'; +TEST $CLI volume start $V0 +EXPECT 'Started' volinfo_field $V0 'Status'; +TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id $V0 $M0; + +# Till now, protocol/server was not doing inode linking as part of readdirp. +# But pas part of user servicable snapshots patcth, changes to do inode linking +# in protocol/server in readdirp, were introduced. So now to make sure +# the gfid handle of dir1 is healed, explicit lookup has to be sent on it. +# Otherwise, whenever ls -l is done just on the mount point $M0, lookup on the +# entries received as part of readdirp, is not sent, because the inodes for +# those entries were linked as part of readdirp itself. i.e instead of doing +# "ls -l $M0", it has to be the below command. +ls -l $M0/dir1; + +TEST [ -h $(gf_get_gfid_backend_file_path $B0/${V0}1 "dir1") ] + +EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0 +TEST $CLI volume stop $V0 +TEST $CLI volume delete $V0 + +cleanup + diff --git a/tests/bugs/replicate/bug-865825.t b/tests/bugs/replicate/bug-865825.t new file mode 100755 index 00000000000..ffb2e0f6437 --- /dev/null +++ b/tests/bugs/replicate/bug-865825.t @@ -0,0 +1,82 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +cleanup; + +TEST glusterd +TEST pidof glusterd +TEST $CLI volume info; + +## Start and create a volume +mkdir -p ${B0}/${V0}-0 +mkdir -p ${B0}/${V0}-1 +mkdir -p ${B0}/${V0}-2 +TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}-{0,1,2} + +function volinfo_field() +{ + local vol=$1; + local field=$2; + + $CLI volume info $vol | grep "^$field: " | sed 's/.*: //'; +} + + +## Verify volume is created +EXPECT "$V0" volinfo_field $V0 'Volume Name'; +EXPECT 'Created' volinfo_field $V0 'Status'; + +## Make sure io-cache and write-behind don't interfere. +TEST $CLI volume set $V0 cluster.background-self-heal-count 0 +TEST $CLI volume set $V0 performance.io-cache off; +TEST $CLI volume set $V0 performance.quick-read off; +TEST $CLI volume set $V0 performance.write-behind off; +TEST $CLI volume set $V0 performance.stat-prefetch off + +## Make sure automatic self-heal doesn't perturb our results. +TEST $CLI volume set $V0 cluster.self-heal-daemon off + +## Start volume and verify +TEST $CLI volume start $V0; +EXPECT 'Started' volinfo_field $V0 'Status'; + +## Mount native +TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 $M0 + +## Create a file with some recognizable contents. +echo "test_data" > $M0/a_file; + +## Unmount. +EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0 + +## Mess with the flags as though brick-0 accuses brick-2 while brick-1 is +## missing its brick-2 changelog altogether. +value=0x000000010000000000000000 +setfattr -n trusted.afr.${V0}-client-2 -v $value $B0/${V0}-0/a_file +setfattr -x trusted.afr.${V0}-client-2 $B0/${V0}-1/a_file +echo "wrong_data" > $B0/${V0}-2/a_file + +gluster volume set $V0 cluster.self-heal-daemon on +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0 +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1 +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 2 +gluster volume heal $V0 full + +## Make sure brick 2 now has the correct contents. +EXPECT_WITHIN $HEAL_TIMEOUT "test_data" cat $B0/${V0}-2/a_file + +if [ "$EXIT_EARLY" = "1" ]; then + exit 0; +fi + +## Finish up +TEST $CLI volume stop $V0; +EXPECT 'Stopped' volinfo_field $V0 'Status'; + +TEST $CLI volume delete $V0; +TEST ! $CLI volume info $V0; + +cleanup; diff --git a/tests/bugs/replicate/bug-880898.t b/tests/bugs/replicate/bug-880898.t new file mode 100644 index 00000000000..123e7e16425 --- /dev/null +++ b/tests/bugs/replicate/bug-880898.t @@ -0,0 +1,23 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc + +cleanup; + +TEST glusterd +TEST $CLI volume create $V0 replica 2 $H0:$B0/brick1 $H0:$B0/brick2 +TEST $CLI volume start $V0 +pkill glusterfs +uuid="" +for line in $(cat $GLUSTERD_WORKDIR/glusterd.info) +do + if [[ $line == UUID* ]] + then + uuid=`echo $line | sed -r 's/^.{5}//'` + fi +done + +#Command execution should fail reporting that the bricks are not running. +TEST ! $CLI volume heal $V0 info + +cleanup; diff --git a/tests/bugs/replicate/bug-884328.t b/tests/bugs/replicate/bug-884328.t new file mode 100644 index 00000000000..acc8e542240 --- /dev/null +++ b/tests/bugs/replicate/bug-884328.t @@ -0,0 +1,12 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +cleanup; +TEST glusterd +TEST pidof glusterd + +TEST check_option_help_presence "cluster.quorum-type" +TEST check_option_help_presence "cluster.quorum-count" +cleanup; diff --git a/tests/bugs/replicate/bug-886998.t b/tests/bugs/replicate/bug-886998.t new file mode 100644 index 00000000000..bcac235ff09 --- /dev/null +++ b/tests/bugs/replicate/bug-886998.t @@ -0,0 +1,52 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +cleanup; + +# This tests that the replicate trash directory(.landfill) has following +# properties. +# Note: This is to have backward compatibility with 3.3 glusterfs +# In the latest releases this dir is present inside .glusterfs of brick. +# 1) lookup of trash dir fails +# 2) readdir does not show this directory +# 3) Self-heal does not do any self-heal of these directories. +gfid1="0xc2e75dde97f346e7842d1076a8e699f8" +TEST glusterd +TEST pidof glusterd +TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}0 $H0:$B0/${V0}1 +TEST $CLI volume start $V0 +TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0 --direct-io-mode=enable + +TEST mkdir $B0/${V0}1/.landfill +TEST setfattr -n trusted.gfid -v $gfid1 $B0/${V0}1/.landfill +TEST mkdir $B0/${V0}0/.landfill +TEST setfattr -n trusted.gfid -v $gfid1 $B0/${V0}0/.landfill + +TEST ! stat $M0/.landfill +EXPECT "" echo $(ls -a $M0 | grep ".landfill") + +TEST rmdir $B0/${V0}0/.landfill +#Force a conservative merge and it should not create .landfill +TEST setfattr -n trusted.afr.$V0-client-0 -v 0x000000000000000000000000 $B0/${V0}0/ +TEST setfattr -n trusted.afr.$V0-client-1 -v 0x000000000000000000000001 $B0/${V0}0/ + +TEST setfattr -n trusted.afr.$V0-client-0 -v 0x000000000000000000000001 $B0/${V0}1/ +TEST setfattr -n trusted.afr.$V0-client-1 -v 0x000000000000000000000000 $B0/${V0}1/ + +EXPECT "" echo $(ls -a $M0 | grep ".landfill") +TEST ! stat $B0/${V0}0/.landfill +TEST stat $B0/${V0}1/.landfill + +#TEST that the dir is not deleted even when xattrs suggest to delete +TEST setfattr -n trusted.afr.$V0-client-0 -v 0x000000000000000000000000 $B0/${V0}0/ +TEST setfattr -n trusted.afr.$V0-client-1 -v 0x000000000000000000000001 $B0/${V0}0/ + +TEST setfattr -n trusted.afr.$V0-client-0 -v 0x000000000000000000000000 $B0/${V0}1/ +TEST setfattr -n trusted.afr.$V0-client-1 -v 0x000000000000000000000000 $B0/${V0}1/ + +EXPECT "" echo $(ls -a $M0 | grep ".landfill") +TEST ! stat $B0/${V0}0/.landfill +TEST stat $B0/${V0}1/.landfill +cleanup; diff --git a/tests/bugs/replicate/bug-888174.t b/tests/bugs/replicate/bug-888174.t new file mode 100644 index 00000000000..8c70265513d --- /dev/null +++ b/tests/bugs/replicate/bug-888174.t @@ -0,0 +1,62 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +#This tests if flush, fsync wakes up the delayed post-op or not. +#If it is not woken up, INODELK from the next command waits +#for post-op-delay secs. There would be pending changelog even after the command +#completes. + +cleanup; +TEST glusterd +TEST pidof glusterd + +TEST $CLI volume create $V0 replica 2 $H0:$B0/r2_0 $H0:$B0/r2_1 + +TEST $CLI volume set $V0 cluster.eager-lock on + +TEST $CLI volume set $V0 performance.flush-behind off +EXPECT "off" volume_option $V0 performance.flush-behind + +TEST $CLI volume set $V0 cluster.post-op-delay-secs 3 +EXPECT "3" volume_option $V0 cluster.post-op-delay-secs + +TEST $CLI volume start $V0 +TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id=$V0 $M0 + +#Check that INODELK MAX latency is not in the order of seconds +TEST gluster volume profile $V0 start +for i in {1..5} +do + echo hi > $M0/a +done +#Test if the MAX INODELK fop latency is of the order of seconds. +inodelk_max_latency=$($CLI volume profile $V0 info | grep INODELK | awk 'BEGIN {max = 0} {if ($6 > max) max=$6;} END {print max}' | cut -d. -f 1 | egrep "[0-9]{7,}") + +TEST [ -z $inodelk_max_latency ] + +TEST dd of=$M0/a if=/dev/urandom bs=1024k count=10 conv=fsync +#Check for no trace of pending changelog. Flush should make sure of it. +EXPECT "0x000000000000000000000000" afr_get_changelog_xattr $B0/r2_0/a trusted.afr.dirty +EXPECT "0x000000000000000000000000" afr_get_changelog_xattr $B0/r2_1/a trusted.afr.dirty + + +dd of=$M0/a if=/dev/urandom bs=1024k count=1024 2>/dev/null & +p=$! +#trigger graph switches, tests for fsync not leaving any pending flags +TEST $CLI volume set $V0 performance.quick-read off +TEST $CLI volume set $V0 performance.io-cache off +TEST $CLI volume set $V0 performance.stat-prefetch off +TEST $CLI volume set $V0 performance.read-ahead off + +kill -TERM $p +#wait for dd to exit +wait > /dev/null 2>&1 + +#Goal is to check if there is permanent FOOL changelog +sleep 5 +EXPECT "0x000000000000000000000000" afr_get_changelog_xattr $B0/r2_0/a trusted.afr.dirty +EXPECT "0x000000000000000000000000" afr_get_changelog_xattr $B0/r2_1/a trusted.afr.dirty + +cleanup; diff --git a/tests/bugs/replicate/bug-913051.t b/tests/bugs/replicate/bug-913051.t new file mode 100644 index 00000000000..1c218397276 --- /dev/null +++ b/tests/bugs/replicate/bug-913051.t @@ -0,0 +1,67 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc +. $(dirname $0)/../../fileio.rc + +cleanup; + +#Test that afr opens the file on the bricks that were offline at the time of +# open after the brick comes online. This tests for writev, readv triggering +# open-fd-fix in afr. +TEST glusterd +TEST pidof glusterd +TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}0 $H0:$B0/${V0}1 +TEST $CLI volume set $V0 cluster.self-heal-daemon off +TEST $CLI volume set $V0 performance.quick-read off +TEST $CLI volume set $V0 performance.open-behind off +TEST $CLI volume set $V0 performance.io-cache off +TEST $CLI volume set $V0 performance.write-behind off +TEST $CLI volume set $V0 performance.stat-prefetch off +TEST $CLI volume set $V0 performance.read-ahead off +TEST $CLI volume set $V0 cluster.background-self-heal-count 0 +TEST $CLI volume start $V0 +TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id=$V0 $M0 --direct-io-mode=enable +TEST kill_brick $V0 $H0 $B0/${V0}0 + +TEST mkdir $M0/dir +TEST touch $M0/dir/a +TEST touch $M0/dir/b +echo abc > $M0/dir/b + +TEST wfd=`fd_available` +TEST fd_open $wfd "w" $M0/dir/a +TEST rfd=`fd_available` +TEST fd_open $rfd "r" $M0/dir/b + +TEST $CLI volume start $V0 force +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0 + +#check that the files are not opned on brick-0 +TEST stat $M0/dir/a +realpatha=$(gf_get_gfid_backend_file_path $B0/${V0}0 "dir/a") +EXPECT "N" gf_check_file_opened_in_brick $V0 $H0 $B0/${V0}0 "$realpatha" +EXPECT "N" gf_check_file_opened_in_brick $V0 $H0 $B0/${V0}0 $B0/${V0}0/dir/a + +TEST stat $M0/dir/b +realpathb=$(gf_get_gfid_backend_file_path $B0/${V0}0 "dir/b") +EXPECT "N" gf_check_file_opened_in_brick $V0 $H0 $B0/${V0}0 "$realpathb" +EXPECT "N" gf_check_file_opened_in_brick $V0 $H0 $B0/${V0}0 $B0/${V0}0/dir/b + +#attempt self-heal so that the files are created on brick-0 + +TEST dd if=$M0/dir/a of=/dev/null bs=1024k +TEST dd if=$M0/dir/b of=/dev/null bs=1024k + +#trigger writev for attempting open-fd-fix in afr +TEST fd_write $wfd "open sesame" + +#trigger readv for attempting open-fd-fix in afr +TEST fd_cat $rfd + +EXPECT_WITHIN $REOPEN_TIMEOUT "Y" gf_check_file_opened_in_brick $V0 $H0 $B0/${V0}0 "$realpatha" +EXPECT_WITHIN $REOPEN_TIMEOUT "Y" gf_check_file_opened_in_brick $V0 $H0 $B0/${V0}0 "$realpathb" + +TEST fd_close $wfd +TEST fd_close $rfd +cleanup; diff --git a/tests/bugs/replicate/bug-916226.t b/tests/bugs/replicate/bug-916226.t new file mode 100644 index 00000000000..893905f9a47 --- /dev/null +++ b/tests/bugs/replicate/bug-916226.t @@ -0,0 +1,26 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +cleanup; + +TEST glusterd +TEST pidof glusterd + +TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}0 $H0:$B0/${V0}1 $H0:$B0/${V0}2 $H0:$B0/${V0}3 +TEST $CLI volume set $V0 cluster.eager-lock on +TEST $CLI volume start $V0 + +## Mount FUSE +TEST glusterfs -s $H0 --volfile-id $V0 $M0; + +TEST mkdir $M0/dir{1..10}; +TEST touch $M0/dir{1..10}/files{1..10}; + +TEST $CLI volume add-brick $V0 $H0:$B0/${V0}4 $H0:/$B0/${V0}5 + +TEST $CLI volume rebalance $V0 start force +EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" rebalance_status_field $V0 + +cleanup; diff --git a/tests/bugs/replicate/bug-918437-sh-mtime.t b/tests/bugs/replicate/bug-918437-sh-mtime.t new file mode 100644 index 00000000000..04ac02f6337 --- /dev/null +++ b/tests/bugs/replicate/bug-918437-sh-mtime.t @@ -0,0 +1,71 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +function get_mtime { + local f=$1 + stat $f | grep Modify | awk '{print $2 $3}' | cut -f1 -d'.' +} + +function file_exists { + if [ -f $1 ]; then echo "Y"; else echo "N"; fi +} +cleanup; + +## Tests if mtime is correct after self-heal. +TEST glusterd +TEST pidof glusterd +TEST mkdir -p $B0/gfs0/brick0{1,2} +TEST $CLI volume create $V0 replica 2 transport tcp $H0:$B0/gfs0/brick01 $H0:$B0/gfs0/brick02 +TEST $CLI volume set $V0 nfs.disable on +TEST $CLI volume set $V0 performance.stat-prefetch off +TEST $CLI volume set $V0 cluster.background-self-heal-count 0 +TEST $CLI volume set $V0 cluster.self-heal-daemon off +TEST $CLI volume start $V0 +TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --direct-io-mode=enable +# file 'a' is healed from brick02 to brick01 where as file 'b' is healed from +# brick01 to brick02 + +TEST cp -p /etc/passwd $M0/a +TEST cp -p /etc/passwd $M0/b + +#Store mtimes before self-heals +TEST modify_atstamp=$(get_mtime $B0/gfs0/brick02/a) +TEST modify_btstamp=$(get_mtime $B0/gfs0/brick02/b) + +TEST $CLI volume stop $V0 +TEST gf_rm_file_and_gfid_link $B0/gfs0/brick01 a +TEST gf_rm_file_and_gfid_link $B0/gfs0/brick02 b + +TEST $CLI volume start $V0 force +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0 +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1 + +TEST $CLI volume set $V0 cluster.self-heal-daemon on +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0 +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1 + +#TODO remove these 2 lines once heal-full is fixed in v2. +TEST stat $M0/a +TEST stat $M0/b + +TEST gluster volume heal $V0 full +EXPECT_WITHIN $HEAL_TIMEOUT "Y" file_exists $B0/gfs0/brick01/a +EXPECT_WITHIN $HEAL_TIMEOUT "Y" file_exists $B0/gfs0/brick02/b +EXPECT_WITHIN $HEAL_TIMEOUT 0 afr_get_pending_heal_count $V0 + +size=`stat -c '%s' /etc/passwd` +EXPECT $size stat -c '%s' $B0/gfs0/brick01/a + +TEST modify_atstamp1=$(get_mtime $B0/gfs0/brick01/a) +TEST modify_atstamp2=$(get_mtime $B0/gfs0/brick02/a) +EXPECT $modify_atstamp echo $modify_atstamp1 +EXPECT $modify_atstamp echo $modify_atstamp2 + +TEST modify_btstamp1=$(get_mtime $B0/gfs0/brick01/b) +TEST modify_btstamp2=$(get_mtime $B0/gfs0/brick02/b) +EXPECT $modify_btstamp echo $modify_btstamp1 +EXPECT $modify_btstamp echo $modify_btstamp2 +cleanup; diff --git a/tests/bugs/replicate/bug-921231.t b/tests/bugs/replicate/bug-921231.t new file mode 100644 index 00000000000..93c642beb1e --- /dev/null +++ b/tests/bugs/replicate/bug-921231.t @@ -0,0 +1,31 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +# This test writes to same file with 2 fds and tests that eager-lock is not +# causing extra delay because of post-op-delay-secs +cleanup; + +function write_to_file { + dd of=$M0/1 if=/dev/zero bs=1024k count=128 oflag=append 2>&1 >/dev/null +} + +TEST glusterd +TEST pidof glusterd +TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}0 $H0:$B0/${V0}1 +TEST $CLI volume set $V0 eager-lock on +TEST $CLI volume set $V0 post-op-delay-secs 3 +TEST $CLI volume set $V0 client-log-level DEBUG +TEST $CLI volume start $V0 +TEST $CLI volume profile $V0 start +TEST $CLI volume set $V0 ensure-durability off +TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0 +write_to_file & +write_to_file & +wait +#Test if the MAX [F]INODELK fop latency is of the order of seconds. +inodelk_max_latency=$($CLI volume profile $V0 info | grep INODELK | awk 'BEGIN {max = 0} {if ($6 > max) max=$6;} END {print max}' | cut -d. -f 1 | egrep "[0-9]{7,}") +TEST [ -z $inodelk_max_latency ] + +cleanup; diff --git a/tests/bugs/replicate/bug-957877.t b/tests/bugs/replicate/bug-957877.t new file mode 100644 index 00000000000..12901723880 --- /dev/null +++ b/tests/bugs/replicate/bug-957877.t @@ -0,0 +1,33 @@ +#!/bin/bash +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc +. $(dirname $0)/../../afr.rc +cleanup; + +TEST glusterd +TEST pidof glusterd +TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1} +TEST $CLI volume start $V0; + +TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 $M0; +kill_brick $V0 $H0 $B0/${V0}0 +TEST touch $M0/f1 +TEST setfattr -n "user.foo" -v "test" $M0/f1 + +BRICK=$B0"/${V0}1" + +TEST $CLI volume start $V0 force +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0 +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1 +TEST $CLI volume heal $V0 + +# Wait for self-heal to complete +EXPECT_WITHIN $HEAL_TIMEOUT '1' count_sh_entries $BRICK; + +TEST getfattr -n "user.foo" $B0/${V0}0/f1; + +TEST $CLI volume stop $V0; +TEST $CLI volume delete $V0; + +cleanup; diff --git a/tests/bugs/replicate/bug-966018.t b/tests/bugs/replicate/bug-966018.t new file mode 100644 index 00000000000..be4d0b97b88 --- /dev/null +++ b/tests/bugs/replicate/bug-966018.t @@ -0,0 +1,35 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc +. $(dirname $0)/../../nfs.rc + +#This tests if eager-lock blocks metadata operations on nfs/fuse mounts. +#If it is not woken up, INODELK from the next command waits +#for post-op-delay secs. + +cleanup; +TEST glusterd +TEST pidof glusterd + +TEST $CLI volume create $V0 replica 2 $H0:$B0/r2_0 $H0:$B0/r2_1 +TEST $CLI volume set $V0 ensure-durability off +TEST $CLI volume set $V0 cluster.eager-lock on +TEST $CLI volume set $V0 cluster.post-op-delay-secs 3 + +TEST $CLI volume start $V0 +TEST $CLI volume profile $V0 start +EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available; +TEST mount_nfs $H0:/$V0 $N0 nolock; +TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id=$V0 $M0 +echo 1 > $N0/1 && chmod +x $N0/1 +echo 1 > $M0/1 && chmod +x $M0/1 + +#Check that INODELK MAX latency is not in the order of seconds +#Test if the MAX INODELK fop latency is of the order of seconds. +inodelk_max_latency=$($CLI volume profile $V0 info | grep INODELK | awk 'BEGIN {max = 0} {if ($6 > max) max=$6;} END {print max}' | cut -d. -f 1 | egrep "[0-9]{7,}") + +TEST [ -z $inodelk_max_latency ] +EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $N0 + +cleanup; diff --git a/tests/bugs/replicate/bug-976800.t b/tests/bugs/replicate/bug-976800.t new file mode 100644 index 00000000000..35a40a3c72e --- /dev/null +++ b/tests/bugs/replicate/bug-976800.t @@ -0,0 +1,28 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +# This test checks if there are any open fds on the brick +# even after the file is closed on the mount. This particular +# test tests dd with "fsync" to check afr's fsync codepath +cleanup; + +function is_fd_open { + local v=$1 + local h=$2 + local b=$3 + local bpid=$(get_brick_pid $v $h $b) + ls -l /proc/$bpid/fd | grep -w "\-> $b/1" +} + +TEST glusterd +TEST pidof glusterd +TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1} +TEST $CLI volume set $V0 ensure-durability off +TEST $CLI volume set $V0 eager-lock off +TEST $CLI volume start $V0 +TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0 +TEST dd of=$M0/1 if=/dev/zero bs=1k count=1 conv=fsync +TEST ! is_fd_open $V0 $H0 $B0/${V0}0 +cleanup; diff --git a/tests/bugs/replicate/bug-977797.t b/tests/bugs/replicate/bug-977797.t new file mode 100755 index 00000000000..3ff14ecf3d5 --- /dev/null +++ b/tests/bugs/replicate/bug-977797.t @@ -0,0 +1,95 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +cleanup; + +## Start and create a volume +TEST glusterd; +TEST pidof glusterd; +TEST $CLI volume info; + +TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2}; + +## Verify volume is is created +EXPECT "$V0" volinfo_field $V0 'Volume Name'; +EXPECT 'Created' volinfo_field $V0 'Status'; + +## Start volume and verify +TEST $CLI volume start $V0; +EXPECT 'Started' volinfo_field $V0 'Status'; + +TEST $CLI volume set $V0 self-heal-daemon off +TEST $CLI volume set $V0 open-behind off +TEST $CLI volume set $V0 quick-read off +TEST $CLI volume set $V0 read-ahead off +TEST $CLI volume set $V0 write-behind off +TEST $CLI volume set $V0 io-cache off +TEST $CLI volume set $V0 background-self-heal-count 0 + +TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0 + + +TEST mkdir -p $M0/a +TEST `echo "GLUSTERFS" > $M0/a/file` + +TEST kill_brick $V0 $H0 $B0/$V0"1" + +TEST chown root $M0/a +TEST chown root $M0/a/file +TEST `echo "GLUSTER-FILE-SYSTEM" > $M0/a/file` +TEST mkdir $M0/a/b + +TEST $CLI volume start $V0 force +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0; + + + +TEST kill_brick $V0 $H0 $B0/$V0"2" + +TEST chmod 757 $M0/a +TEST chmod 757 $M0/a/file + +TEST $CLI volume start $V0 force +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1; + +TEST dd if=$M0/a/file of=/dev/null bs=1024k + +b1c0dir=$(afr_get_specific_changelog_xattr $B0/$V0"1"/a \ + trusted.afr.$V0-client-0 "entry") +b1c1dir=$(afr_get_specific_changelog_xattr $B0/$V0"1"/a \ + trusted.afr.$V0-client-1 "entry") +b2c0dir=$(afr_get_specific_changelog_xattr \ + $B0/$V0"2"/a trusted.afr.$V0-client-0 "entry") +b2c1dir=$(afr_get_specific_changelog_xattr \ + $B0/$V0"2"/a trusted.afr.$V0-client-1 "entry") + + +b1c0f=$(afr_get_specific_changelog_xattr $B0/$V0"1"/a/file \ + trusted.afr.$V0-client-0 "data") +b1c1f=$(afr_get_specific_changelog_xattr $B0/$V0"1"/a/file \ + trusted.afr.$V0-client-1 "data") +b2c0f=$(afr_get_specific_changelog_xattr $B0/$V0"2"/a/file \ + trusted.afr.$V0-client-0 "data") +b2c1f=$(afr_get_specific_changelog_xattr $B0/$V0"2"/a/file \ + trusted.afr.$V0-client-1 "data") + +EXPECT "00000000|^$" echo $b1c0f +EXPECT "00000000|^$" echo $b1c1f +EXPECT "00000000|^$" echo $b2c0f +EXPECT "00000000|^$" echo $b2c1f + +EXPECT "00000000|^$" echo $b1c0dir +EXPECT "00000000|^$" echo $b1c1dir +EXPECT "00000000|^$" echo $b2c0dir +EXPECT "00000000|^$" echo $b2c1dir + +## Finish up +TEST $CLI volume stop $V0; +EXPECT 'Stopped' volinfo_field $V0 'Status'; + +TEST $CLI volume delete $V0; +TEST ! $CLI volume info $V0; + +cleanup; diff --git a/tests/bugs/replicate/bug-978794.t b/tests/bugs/replicate/bug-978794.t new file mode 100644 index 00000000000..8e43e74bf79 --- /dev/null +++ b/tests/bugs/replicate/bug-978794.t @@ -0,0 +1,29 @@ +#!/bin/bash +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc +. $(dirname $0)/../../fileio.rc + + +# This test opens 100 fds and triggers graph switches to check if fsync +# as part of graph-switch causes crash or not. + +cleanup; + +TEST glusterd +TEST pidof glusterd +TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1} +TEST $CLI volume start $V0 +TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 +TEST touch $M0/{1..100} +for i in {1..100}; do fd[$i]=`fd_available`; fd_open ${fd[$i]} 'w' $M0/$i; done +TEST $CLI volume add-brick $V0 $H0:$B0/${V0}{2,3} +TEST $CLI volume rebalance $V0 start force +EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" rebalance_status_field $V0 +TEST cat $M0/{1..100} +for i in {1..100}; do fd_write ${fd[$i]} 'abc'; done +TEST $CLI volume add-brick $V0 $H0:$B0/${V0}{4,5} +TEST $CLI volume rebalance $V0 start force +EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" rebalance_status_field $V0 +for i in {1..100}; do fd_write ${fd[$i]} 'abc'; done +TEST cat $M0/{1..100} +cleanup diff --git a/tests/bugs/replicate/bug-979365.t b/tests/bugs/replicate/bug-979365.t new file mode 100755 index 00000000000..b1396c23348 --- /dev/null +++ b/tests/bugs/replicate/bug-979365.t @@ -0,0 +1,47 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +#This script checks that ensure-durability option enables/disables afr +#sending fsyncs +cleanup; + +function num_fsyncs { + $CLI volume profile $V0 info | grep -w FSYNC | wc -l +} + +TEST glusterd +TEST pidof glusterd +TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1} +TEST $CLI volume set $V0 ensure-durability on +TEST $CLI volume set $V0 eager-lock off +TEST $CLI volume start $V0 +TEST $CLI volume profile $V0 start +TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 +TEST kill_brick $V0 $H0 $B0/${V0}0 +TEST dd of=$M0/a if=/dev/zero bs=1024k count=10 +#fsyncs take a while to complete. +sleep 5 + +# There can be zero or more fsyncs, depending on the order +# in which the writes reached the server, in turn deciding +# whether they were treated as "appending" writes or not. + +TEST [[ $(num_fsyncs) -ge 0 ]] +#Stop the volume to erase the profile info of old operations +TEST $CLI volume profile $V0 stop +TEST $CLI volume stop $V0 +EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0 +#Disable ensure-durability now to disable fsyncs in afr. +TEST $CLI volume set $V0 ensure-durability off +TEST $CLI volume start $V0 +TEST kill_brick $V0 $H0 $B0/${V0}0 +TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 +TEST $CLI volume profile $V0 start +TEST dd of=$M0/a if=/dev/zero bs=1024k count=10 +#fsyncs take a while to complete. +sleep 5 +TEST [[ $(num_fsyncs) -eq 0 ]] + +cleanup diff --git a/tests/bugs/replicate/bug-986905.t b/tests/bugs/replicate/bug-986905.t new file mode 100755 index 00000000000..f4f7386ebc4 --- /dev/null +++ b/tests/bugs/replicate/bug-986905.t @@ -0,0 +1,27 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +#This script checks if hardlinks that are created while a brick is down are +#healed properly. + +cleanup; +function get_inum { + ls -i $1 | awk '{print $1}' +} + +TEST glusterd +TEST pidof glusterd +TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1} +TEST $CLI volume start $V0 +TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0 +TEST kill_brick $V0 $H0 $B0/${V0}0 +TEST touch $M0/a +TEST ln $M0/a $M0/link_a +TEST $CLI volume start $V0 force +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0 +TEST ls -l $M0 +inum=$(get_inum $B0/${V0}0/a) +EXPECT "$inum" get_inum $B0/${V0}0/link_a +cleanup diff --git a/tests/bugs/rpc/bug-1043886.t b/tests/bugs/rpc/bug-1043886.t new file mode 100755 index 00000000000..e6bd45440b9 --- /dev/null +++ b/tests/bugs/rpc/bug-1043886.t @@ -0,0 +1,55 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../nfs.rc + +cleanup; + +TEST glusterd +TEST pidof glusterd +TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2}; +TEST $CLI volume start $V0 + +## Mount FUSE with caching disabled +TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id $V0 $M0; + +EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available; + +## Mount volume as NFS export +TEST mount_nfs $H0:/$V0 $N0 nolock; + +# just a random uid/gid +uid=22162 +gid=5845 + +mkdir $N0/other; +chown $uid:$gid $N0/other; + +TEST $CLI volume set $V0 server.root-squash on; +TEST $CLI volume set $V0 server.anonuid $uid; +TEST $CLI volume set $V0 server.anongid $gid; + +EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available; + +# create files and directories in the root of the glusterfs and nfs mount +# which is owned by root and hence the right behavior is getting EACCESS +# as the fops are executed as nfsnobody. +touch $M0/file 2>/dev/null; +TEST [ $? -ne 0 ] +mkdir $M0/dir 2>/dev/null; +TEST [ $? -ne 0 ] + +# Here files and directories should be getting created as other directory is owned +# by tmp_user as server.anonuid and server.anongid have the value of tmp_user uid and gid +TEST touch $M0/other/file 2>/dev/null; +TEST [ "$(stat -c %u:%g $N0/other/file)" = "$uid:$gid" ]; +TEST mkdir $M0/other/dir 2>/dev/null; +TEST [ "$(stat -c %u:%g $N0/other/dir)" = "$uid:$gid" ]; + +## Before killing daemon to avoid deadlocks +EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" umount_nfs $N0 + +TEST $CLI volume stop $V0; +TEST $CLI volume delete $V0; + +cleanup; diff --git a/tests/bugs/rpc/bug-847624.t b/tests/bugs/rpc/bug-847624.t new file mode 100755 index 00000000000..627c47b335f --- /dev/null +++ b/tests/bugs/rpc/bug-847624.t @@ -0,0 +1,25 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../nfs.rc +. $(dirname $0)/../../volume.rc +cleanup + +#1 +TEST glusterd +TEST pidof glusterd +#3 +TEST $CLI volume create $V0 $H0:$B0/$V0 +TEST $CLI volume set $V0 nfs.drc on +TEST $CLI volume start $V0 +EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available; +TEST mount_nfs $H0:/$V0 $N0 nolock +cd $N0 +#7 +TEST dbench -t 10 10 +TEST rm -rf $N0/* +cd +EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $N0 +#10 +TEST $CLI volume set $V0 nfs.drc-size 10000 +cleanup diff --git a/tests/bugs/rpc/bug-884452.t b/tests/bugs/rpc/bug-884452.t new file mode 100644 index 00000000000..c161a68190d --- /dev/null +++ b/tests/bugs/rpc/bug-884452.t @@ -0,0 +1,47 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +cleanup; + +TEST glusterd +TEST pidof glusterd + +TEST $CLI volume create $V0 $H0:$B0/$V0 +TEST $CLI volume start $V0 + +TEST glusterfs -s $H0 --volfile-id $V0 $M0 +TEST touch $M0/{1..10000} + +RUN_LS_LOOP_FILE="$M0/run-ls-loop" +function ls-loop +{ + while [ -f $RUN_LS_LOOP_FILE ]; do + ls -lR $M0 1>/dev/null 2>&1 + done; +} + +touch $RUN_LS_LOOP_FILE +ls-loop & + +function vol-status-loop +{ + for i in {1..1000}; do + $CLI volume status $V0 clients >/dev/null 2>&1 + if [ $? -ne 0 ]; then + return 1 + fi + done; + + return 0 +} + +TEST vol-status-loop + +rm -f $RUN_LS_LOOP_FILE +wait + +EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0 + +cleanup; diff --git a/tests/bugs/rpc/bug-921072.t b/tests/bugs/rpc/bug-921072.t new file mode 100755 index 00000000000..46a3442f180 --- /dev/null +++ b/tests/bugs/rpc/bug-921072.t @@ -0,0 +1,124 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../nfs.rc +. $(dirname $0)/../../volume.rc + +cleanup; + +#1 +TEST glusterd +TEST pidof glusterd + +TEST $CLI volume create $V0 $H0:$B0/$V0 +TEST $CLI volume start $V0 +EXPECT_WITHIN $NFS_EXPORT_TIMEOUT 1 is_nfs_export_available +TEST mount_nfs $H0:/$V0 $N0 nolock +EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $N0 + +# based on ip addresses (1-4) +# case 1: allow only localhost ip +TEST $CLI volume set $V0 nfs.rpc-auth-allow 127.0.0.1 +EXPECT_WITHIN $NFS_EXPORT_TIMEOUT 1 is_nfs_export_available + +TEST mount_nfs localhost:/$V0 $N0 nolock +EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $N0 + +# case 2: allow only non-localhost ip +TEST $CLI volume set $V0 nfs.rpc-auth-allow 192.168.1.1 +EXPECT_WITHIN $NFS_EXPORT_TIMEOUT 1 is_nfs_export_available +#11 +TEST ! mount_nfs localhost:/$V0 $N0 nolock +TEST $CLI volume reset --mode=script $V0 +# case 3: reject only localhost ip +TEST $CLI volume set $V0 nfs.rpc-auth-reject 127.0.0.1 +EXPECT_WITHIN $NFS_EXPORT_TIMEOUT 1 is_nfs_export_available + +TEST ! mount_nfs localhost:/$V0 $N0 nolock + +# case 4: reject only non-localhost ip +TEST $CLI volume set $V0 nfs.rpc-auth-reject 192.168.1.1 +EXPECT_WITHIN $NFS_EXPORT_TIMEOUT 1 is_nfs_export_available + +TEST mount_nfs localhost:/$V0 $N0 nolock +EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $N0 + + + +# NEED TO CHECK BOTH IP AND NAME BASED AUTH. +# CASES WITH NFS.ADDR-NAMELOOKUP ON (5-12) +TEST $CLI volume reset --mode=script $V0 +TEST $CLI volume set $V0 nfs.addr-namelookup on +EXPECT_WITHIN $NFS_EXPORT_TIMEOUT 1 is_nfs_export_available +#20 +TEST mount_nfs localhost:/$V0 $N0 nolock +EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $N0 + +# case 5: allow only localhost +TEST $CLI volume set $V0 nfs.rpc-auth-allow localhost +EXPECT_WITHIN $NFS_EXPORT_TIMEOUT 1 is_nfs_export_available + +TEST mount_nfs localhost:/$V0 $N0 nolock +EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $N0 + +# case 6: allow only somehost +TEST $CLI volume set $V0 nfs.rpc-auth-allow somehost +EXPECT_WITHIN $NFS_EXPORT_TIMEOUT 1 is_nfs_export_available + +TEST ! mount_nfs localhost:/$V0 $N0 nolock + +# case 7: reject only localhost +TEST $CLI volume reset --mode=script $V0 +TEST $CLI volume set $V0 nfs.addr-namelookup on +TEST $CLI volume set $V0 nfs.rpc-auth-reject localhost +EXPECT_WITHIN $NFS_EXPORT_TIMEOUT 1 is_nfs_export_available +#30 +TEST ! mount_nfs localhost:/$V0 $N0 nolock + +# case 8: reject only somehost +TEST $CLI volume set $V0 nfs.rpc-auth-reject somehost +EXPECT_WITHIN $NFS_EXPORT_TIMEOUT 1 is_nfs_export_available + +TEST mount_nfs localhost:/$V0 $N0 nolock +EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $N0 + +# based on ip addresses: repeat of cases 1-4 +# case 9: allow only localhost ip +TEST $CLI volume reset --mode=script $V0 +TEST $CLI volume set $V0 nfs.addr-namelookup on +TEST $CLI volume set $V0 nfs.rpc-auth-allow 127.0.0.1 +EXPECT_WITHIN $NFS_EXPORT_TIMEOUT 1 is_nfs_export_available + +TEST mount_nfs localhost:/$V0 $N0 nolock +TEST mkdir -p $N0/subdir +EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $N0 + +# case 10: allow a non-localhost ip +TEST $CLI volume set $V0 nfs.rpc-auth-allow 192.168.1.1 +EXPECT_WITHIN $NFS_EXPORT_TIMEOUT 1 is_nfs_export_available +#41 +TEST ! mount_nfs localhost:/$V0 $N0 nolock + +# case 11: reject only localhost ip +TEST $CLI volume reset --mode=script $V0 +TEST $CLI volume set $V0 nfs.addr-namelookup on +TEST $CLI volume set $V0 nfs.rpc-auth-reject 127.0.0.1 +EXPECT_WITHIN $NFS_EXPORT_TIMEOUT 1 is_nfs_export_available + +TEST ! mount_nfs localhost:/$V0 $N0 nolock +TEST ! mount_nfs localhost:/$V0/subdir $N0 nolock + +# case 12: reject only non-localhost ip +TEST $CLI volume set $V0 nfs.rpc-auth-reject 192.168.1.1 +EXPECT_WITHIN $NFS_EXPORT_TIMEOUT 1 is_nfs_export_available + +TEST mount_nfs localhost:/$V0 $N0 nolock +EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $N0 + +TEST mount_nfs localhost:/$V0/subdir $N0 nolock +EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $N0 + +TEST $CLI volume stop --mode=script $V0 +#52 +TEST $CLI volume delete --mode=script $V0 +cleanup diff --git a/tests/bugs/rpc/bug-954057.t b/tests/bugs/rpc/bug-954057.t new file mode 100755 index 00000000000..9c48207b711 --- /dev/null +++ b/tests/bugs/rpc/bug-954057.t @@ -0,0 +1,44 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +#This script checks if use-readdirp option works as accepted in mount options + + +TEST glusterd +TEST pidof glusterd +TEST $CLI volume create $V0 $H0:$B0/${V0} +TEST $CLI volume start $V0 + +TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 + +TEST mkdir $M0/dir +TEST mkdir $M0/nobody +TEST chown nfsnobody:nfsnobody $M0/nobody +TEST `echo "file" >> $M0/file` +TEST cp $M0/file $M0/new +TEST chmod 700 $M0/new +TEST cat $M0/new + +TEST $CLI volume set $V0 server.root-squash enable +TEST `echo 3 > /proc/sys/vm/drop_caches` +TEST ! mkdir $M0/other +TEST mkdir $M0/nobody/other +TEST cat $M0/file +TEST ! cat $M0/new +TEST `echo "nobody" >> $M0/nobody/file` + +#mount the client without root-squashing +TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 --no-root-squash=yes $M1 +TEST mkdir $M1/m1_dir +TEST `echo "file" >> $M1/m1_file` +TEST cp $M0/file $M1/new +TEST chmod 700 $M1/new +TEST cat $M1/new + +TEST $CLI volume set $V0 server.root-squash disable +TEST mkdir $M0/other +TEST cat $M0/new + +cleanup diff --git a/tests/bugs/snapshot/bug-1045333.t b/tests/bugs/snapshot/bug-1045333.t new file mode 100755 index 00000000000..ad2d0021aaa --- /dev/null +++ b/tests/bugs/snapshot/bug-1045333.t @@ -0,0 +1,44 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../snapshot.rc + +cleanup; +TEST verify_lvm_version; +TEST glusterd; +TEST pidof glusterd; + +TEST setup_lvm 1 + +TEST $CLI volume create $V0 $H0:$L1 +TEST $CLI volume start $V0 + + +S1="${V0}-snap1" #Create snapshot with name contains hyphen(-) +S2="-${V0}-snap2" #Create snapshot with name starts with hyphen(-) +#Create snapshot with a long name +S3="${V0}_single_gluster_volume_is_accessible_by_multiple_clients_offline_snapshot_is_a_long_name" + +TEST $CLI snapshot create $S1 $V0 +TEST snapshot_exists 0 $S1 + +TEST $CLI snapshot create $S2 $V0 +TEST snapshot_exists 0 $S2 + +TEST $CLI snapshot create $S3 $V0 +TEST snapshot_exists 0 $S3 + + +TEST glusterfs -s $H0 --volfile-id=/snaps/$S1/$V0 $M0 +TEST glusterfs -s $H0 --volfile-id=/snaps/$S2/$V0 $M1 +TEST glusterfs -s $H0 --volfile-id=/snaps/$S3/$V0 $M2 + +#Clean up +#TEST $CLI snapshot delete $S1 +#TEST $CLI snapshot delete $S2 +#TEST $CLI snapshot delete $S3 + +TEST $CLI volume stop $V0 force +#TEST $CLI volume delete $V0 + +cleanup; diff --git a/tests/bugs/snapshot/bug-1049834.t b/tests/bugs/snapshot/bug-1049834.t new file mode 100755 index 00000000000..cdb8a3babf8 --- /dev/null +++ b/tests/bugs/snapshot/bug-1049834.t @@ -0,0 +1,44 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../cluster.rc +. $(dirname $0)/../../volume.rc +. $(dirname $0)/../../snapshot.rc + +cleanup; +TEST verify_lvm_version +TEST launch_cluster 2 +TEST setup_lvm 2 + +TEST $CLI_1 peer probe $H2 +EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count + +TEST $CLI_1 volume create $V0 $H1:$L1 $H2:$L2 +EXPECT 'Created' volinfo_field $V0 'Status' + +TEST $CLI_1 volume start $V0 +EXPECT 'Started' volinfo_field $V0 'Status' + +#Setting the snap-max-hard-limit to 4 +TEST $CLI_1 snapshot config $V0 snap-max-hard-limit 4 +PID_1=$! +wait $PID_1 + +#Creating 3 snapshots on the volume (which is the soft-limit) +TEST create_n_snapshots $V0 3 $V0_snap +TEST snapshot_n_exists $V0 3 $V0_snap + +#Creating the 4th snapshot on the volume and expecting it to be created +# but with the deletion of the oldest snapshot i.e 1st snapshot +TEST $CLI_1 snapshot create ${V0}_snap4 ${V0} +TEST snapshot_exists 1 ${V0}_snap4 +TEST ! snapshot_exists 1 ${V0}_snap1 +TEST $CLI_1 snapshot delete ${V0}_snap4 +TEST $CLI_1 snapshot create ${V0}_snap1 ${V0} +TEST snapshot_exists 1 ${V0}_snap1 + +#Deleting the 4 snaps +#TEST delete_n_snapshots $V0 4 $V0_snap +#TEST ! snapshot_n_exists $V0 4 $V0_snap + +cleanup; diff --git a/tests/bugs/snapshot/bug-1064768.t b/tests/bugs/snapshot/bug-1064768.t new file mode 100644 index 00000000000..53140a0e13e --- /dev/null +++ b/tests/bugs/snapshot/bug-1064768.t @@ -0,0 +1,20 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc + +cleanup; + +TEST glusterd +TEST pidof glusterd +TEST $CLI volume create $V0 replica 2 $H0:$B0/brick0 $H0:$B0/brick1 +TEST $CLI volume start $V0 +EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'Started' volinfo_field $V0 'Status'; + +TEST $CLI volume profile $V0 start +TEST $CLI volume profile $V0 info +TEST $CLI volume profile $V0 stop + +TEST $CLI volume status +TEST $CLI volume stop $V0 +EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'Stopped' volinfo_field $V0 'Status'; +cleanup; diff --git a/tests/bugs/snapshot/bug-1155042-dont-display-deactivated-snapshots.t b/tests/bugs/snapshot/bug-1155042-dont-display-deactivated-snapshots.t new file mode 100644 index 00000000000..cf35caad0aa --- /dev/null +++ b/tests/bugs/snapshot/bug-1155042-dont-display-deactivated-snapshots.t @@ -0,0 +1,36 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc +. $(dirname $0)/../../snapshot.rc + +cleanup; + +TEST init_n_bricks 2 +TEST setup_lvm 2 +TEST glusterd; + +TEST $CLI volume create $V0 $H0:$L1 $H0:$L2 +TEST $CLI volume start $V0 + +# enable uss and mount the volume +TEST $CLI volume set $V0 features.uss enable +TEST $GFS --volfile-server=$H0 --volfile-id=$V0 $M0 + +# create 10 snapshots and check if all are being reflected +# in the USS world +gluster snapshot config activate-on-create enable +for i in {1..10}; do $CLI snapshot create snap$i $V0; done +EXPECT 10 uss_count_snap_displayed $M0 + +# snapshots should not be displayed after deactivation +for i in {1..10}; do $CLI snapshot deactivate snap$i --mode=script; done +EXPECT 0 uss_count_snap_displayed $M0 + +# activate all the snapshots and check if all the activated snapshots +# are displayed again +for i in {1..10}; do $CLI snapshot activate snap$i --mode=script; done +EXPECT 10 uss_count_snap_displayed $M0 + +cleanup; + diff --git a/tests/bugs/snapshot/bug-1157991.t b/tests/bugs/snapshot/bug-1157991.t new file mode 100755 index 00000000000..77440bc2301 --- /dev/null +++ b/tests/bugs/snapshot/bug-1157991.t @@ -0,0 +1,30 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../snapshot.rc + +cleanup; +TEST verify_lvm_version; +TEST glusterd; +TEST pidof glusterd; + +TEST setup_lvm 1 + +TEST $CLI volume create $V0 $H0:$L1 +TEST $CLI volume start $V0 + +TEST $CLI snapshot create snap1 $V0 +EXPECT 'Stopped' snapshot_status snap1; + +TEST $CLI snapshot config activate-on-create enable +TEST $CLI snapshot create snap2 $V0 +EXPECT 'Started' snapshot_status snap2; + +#Clean up +TEST $CLI snapshot delete snap1 +TEST $CLI snapshot delete snap2 + +TEST $CLI volume stop $V0 force +TEST $CLI volume delete $V0 + +cleanup; diff --git a/tests/bugs/snapshot/bug-1162462.t b/tests/bugs/snapshot/bug-1162462.t new file mode 100755 index 00000000000..aed79fdd2c7 --- /dev/null +++ b/tests/bugs/snapshot/bug-1162462.t @@ -0,0 +1,38 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc +. $(dirname $0)/../../snapshot.rc + +cleanup; + +TEST init_n_bricks 3; +TEST setup_lvm 3; +TEST glusterd; +TEST pidof glusterd; + +TEST $CLI volume create $V0 $H0:$L1 $H0:$L2 $H0:$L3; +TEST $CLI volume start $V0; +TEST $CLI volume set $V0 features.uss enable; +TEST $GFS --volfile-server=$H0 --volfile-id=$V0 $M0; + +mkdir $M0/test +echo "file1" > $M0/file1 +ln -s $M0/file1 $M0/test/file_symlink +ls -l $M0/ > /dev/null +ls -l $M0/test/ > /dev/null + +TEST $CLI snapshot create snap1 $V0; +$CLI snapshot activate snap1; +EXPECT 'Started' snapshot_status snap1; + +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" snap_client_connected_status $V0 +ls $M0/.snaps/snap1/test/ > /dev/null +ls -l $M0/.snaps/snap1/test/ > /dev/null +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" snap_client_connected_status $V0 + +TEST $CLI snapshot delete snap1; +TEST $CLI volume stop $V0; +TEST $CLI volume delete $V0; + +cleanup; diff --git a/tests/bugs/snapshot/bug-1162498.t b/tests/bugs/snapshot/bug-1162498.t new file mode 100644 index 00000000000..06b3d74691c --- /dev/null +++ b/tests/bugs/snapshot/bug-1162498.t @@ -0,0 +1,56 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../snapshot.rc + +cleanup; +TEST verify_lvm_version; +TEST glusterd; +TEST pidof glusterd; + +TEST setup_lvm 1 + +TEST $CLI volume create $V0 $H0:$L1 +TEST $CLI volume start $V0 + +TEST $CLI snapshot config activate-on-create enable +TEST $CLI volume set $V0 features.uss enable + +TEST glusterfs -s $H0 --volfile-id=$V0 $M0 + +TEST mkdir $M0/xyz + +TEST $CLI snapshot create snap1 $V0 +TEST $CLI snapshot create snap2 $V0 + +TEST rmdir $M0/xyz + +TEST $CLI snapshot create snap3 $V0 +TEST $CLI snapshot create snap4 $V0 + +TEST mkdir $M0/xyz +TEST ls $M0/xyz/.snaps/ + +TEST $CLI volume stop $V0 +TEST $CLI snapshot restore snap2 +TEST $CLI volume start $V0 + +umount -f $M0 +TEST glusterfs -s $H0 --volfile-id=$V0 $M0 + +#Dir xyz exists in snap1 +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "0" STAT $M0/xyz + +TEST ls $M0/xyz/.snaps/ +TEST mkdir $M0/abc +TEST ls $M0/abc/.snaps/ + +#Clean up +TEST $CLI snapshot delete snap1 +TEST $CLI snapshot delete snap3 +TEST $CLI snapshot delete snap4 +TEST $CLI volume stop $V0 force +TEST $CLI volume delete $V0 + +cleanup; + diff --git a/tests/bugs/snapshot/bug-1164613.t b/tests/bugs/snapshot/bug-1164613.t new file mode 100644 index 00000000000..9cf122bc4b8 --- /dev/null +++ b/tests/bugs/snapshot/bug-1164613.t @@ -0,0 +1,34 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../snapshot.rc + +cleanup; +TEST verify_lvm_version; +TEST glusterd; +TEST pidof glusterd; + +TEST setup_lvm 1 + +TEST $CLI volume create $V0 $H0:$L1 +TEST $CLI volume start $V0 +TEST glusterfs -s $H0 --volfile-id=$V0 $M0 + +TEST touch $M0/testfile + +TEST $CLI snapshot create snaps $V0 +TEST $CLI snapshot activate snaps +TEST $CLI volume set $V0 features.uss enable +TEST $CLI volume set $V0 snapshot-directory snaps + +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "0" STAT $M0/snaps/snaps/testfile + +umount -f $M0 + +#Clean up +TEST $CLI snapshot delete snaps +TEST $CLI volume stop $V0 force +TEST $CLI volume delete $V0 + +cleanup; + diff --git a/tests/bugs/snapshot/bug-1166197.t b/tests/bugs/snapshot/bug-1166197.t new file mode 100755 index 00000000000..6592382df6c --- /dev/null +++ b/tests/bugs/snapshot/bug-1166197.t @@ -0,0 +1,48 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../snapshot.rc +. $(dirname $0)/../../volume.rc +. $(dirname $0)/../../nfs.rc + +cleanup; +CURDIR=`pwd` + +TEST verify_lvm_version; +TEST glusterd; +TEST pidof glusterd; + +TEST setup_lvm 1 + +TEST $CLI volume create $V0 $H0:$L1 +TEST $CLI volume start $V0 +TEST $CLI snapshot config activate-on-create enable +TEST $CLI volume set $V0 features.uss enable + +EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'Started' volinfo_field $V0 'Status'; +TEST mount_nfs $H0:/$V0 $N0 nolock +TEST mkdir $N0/testdir + +TEST $CLI snapshot create snap1 $V0 +TEST $CLI snapshot create snap2 $V0 + +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "0" STAT $N0/testdir/.snaps + +TEST cd $N0/testdir +TEST cd .snaps +TEST ls + +TEST $CLI snapshot deactivate snap2 +TEST ls + +TEST cd $CURDIR + +#Clean up +EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" umount_nfs $N0 +TEST $CLI snapshot delete snap1 +TEST $CLI snapshot delete snap2 +TEST $CLI volume stop $V0 force +TEST $CLI volume delete $V0 + +cleanup; + diff --git a/tests/bugs/snapshot/bug-1167580-set-proper-uid-and-gid-during-nfs-access.t b/tests/bugs/snapshot/bug-1167580-set-proper-uid-and-gid-during-nfs-access.t new file mode 100644 index 00000000000..b15d3a85151 --- /dev/null +++ b/tests/bugs/snapshot/bug-1167580-set-proper-uid-and-gid-during-nfs-access.t @@ -0,0 +1,201 @@ +#!/bin/bash +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../nfs.rc +. $(dirname $0)/../../volume.rc +. $(dirname $0)/../../snapshot.rc + +# This function returns a value "Y" if user can execute +# the given command. Else it will return "N" +# @arg-1 : Name of the user +# @arg-2 : Path of the file +# @arg-3 : command to be executed +function check_if_permitted () { + local usr=$1 + local path=$2 + local cmd=$3 + local var + local ret + var=$(su - $usr -c "$cmd $path") + ret=$? + + if [ "$cmd" == "cat" ] + then + if [ "$var" == "Test" ] + then + echo "Y" + else + echo "N" + fi + else + if [ "$ret" == "0" ] + then + echo "Y" + else + echo "N" + fi + fi +} + +# Create a directory in /tmp to specify which directory to make +# as home directory for user +home_dir=$(cat /dev/urandom | tr -dc 'a-zA-Z' | fold -w 8 | head -n 1) +home_dir="/tmp/bug-1167580-$home_dir" +mkdir $home_dir + +function get_new_user() { + local temp=$(cat /dev/urandom | tr -dc 'a-zA-Z' | fold -w 8 | head -n 1) + id $temp + if [ "$?" == "0" ] + then + get_new_user + else + echo $temp + fi +} + +function create_user() { + local user=$1 + local group=$2 + + if [ "$group" == "" ] + then + useradd -d $home_dir/$user $user + else + useradd -d $home_dir/$user -G $group $user + fi + + return $? +} + +cleanup; + +TEST setup_lvm 1 +TEST glusterd + +TEST $CLI volume create $V0 $H0:$L1 +TEST $CLI volume start $V0 + +# Mount the volume as both fuse and nfs mount +EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available +TEST glusterfs -s $H0 --volfile-id $V0 $M0 +TEST mount_nfs $H0:/$V0 $N0 nolock + +# Create 2 user +user1=$(get_new_user) +create_user $user1 +user2=$(get_new_user) +create_user $user2 + +# create a file for which only user1 has access +echo "Test" > $M0/README +chown $user1 $M0/README +chmod 700 $M0/README + +# enable uss and take a snapshot +TEST $CLI volume set $V0 uss enable +TEST $CLI snapshot config activate-on-create on +TEST $CLI snapshot create snap1 $V0 + +# try to access the file using user1 account. +# It should succeed with both normal mount and snapshot world. +# There is time delay in which snapd might not have got the notification +# from glusterd about snapshot create hence using "EXPECT_WITHIN" +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" check_if_permitted $user1 $M0/README cat +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" check_if_permitted $user1 $N0/README cat +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" check_if_permitted $user1 $M0/.snaps/snap1/README cat +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" check_if_permitted $user1 $N0/.snaps/snap1/README cat + + +# try to access the file using user2 account +# It should fail from both normal mount and snapshot world +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "N" check_if_permitted $user2 $M0/README cat +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "N" check_if_permitted $user2 $N0/README cat +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "N" check_if_permitted $user2 $M0/.snaps/snap1/README cat +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "N" check_if_permitted $user2 $N0/.snaps/snap1/README cat + +# We need to test another scenario where user belonging to one group +# tries to access files from user belonging to another group +# instead of using the already created users and making the test case look complex +# I thought of using two different users. + +# The test case written below does the following things +# 1) Create 2 users (user{3,4}), belonging to 2 different groups (group{3,4}) +# 2) Take a snapshot "snap2" +# 3) Create a file for which only users belonging to group3 have +# permission to read +# 4) Test various combinations of Read-Write, Fuse-NFS mount, User{3,4,5} +# from both normal mount, and USS world. + +echo "Test" > $M0/file3 + +chmod 740 $M0/file3 + +group3=$(get_new_user) +groupadd $group3 + +group4=$(get_new_user) +groupadd $group4 + +user3=$(get_new_user) +create_user $user3 $group3 + +user4=$(get_new_user) +create_user $user4 $group4 + +user5=$(get_new_user) +create_user $user5 + +chgrp $group3 $M0/file3 + +TEST $CLI snapshot create snap2 $V0 + +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" check_if_permitted $user3 $M0/file3 cat +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" check_if_permitted $user3 $M0/.snaps/snap2/file3 cat +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "N" check_if_permitted $user3 $M0/file3 "echo Hello >" +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "N" check_if_permitted $user3 $M0/.snaps/snap2/file3 "echo Hello >" +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" check_if_permitted $user3 $N0/file3 cat +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" check_if_permitted $user3 $N0/.snaps/snap2/file3 cat +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "N" check_if_permitted $user3 $N0/file3 "echo Hello >" +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "N" check_if_permitted $user3 $N0/.snaps/snap2/file3 "echo Hello >" + + +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "N" check_if_permitted $user4 $M0/file3 cat +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "N" check_if_permitted $user4 $M0/.snaps/snap2/file3 cat +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "N" check_if_permitted $user4 $M0/file3 "echo Hello >" +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "N" check_if_permitted $user4 $M0/.snaps/snap2/file3 "echo Hello >" +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "N" check_if_permitted $user4 $N0/file3 cat +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "N" check_if_permitted $user4 $N0/.snaps/snap2/file3 cat +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "N" check_if_permitted $user4 $N0/file3 "echo Hello >" +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "N" check_if_permitted $user4 $N0/.snaps/snap2/file3 "echo Hello >" + +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "N" check_if_permitted $user5 $M0/file3 cat +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "N" check_if_permitted $user5 $M0/.snaps/snap2/file3 cat +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "N" check_if_permitted $user5 $M0/file3 "echo Hello >" +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "N" check_if_permitted $user5 $M0/.snaps/snap2/file3 "echo Hello >" +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "N" check_if_permitted $user5 $N0/file3 cat +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "N" check_if_permitted $user5 $N0/.snaps/snap2/file3 cat +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "N" check_if_permitted $user5 $N0/file3 "echo Hello >" +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "N" check_if_permitted $user5 $N0/.snaps/snap2/file3 "echo Hello >" + +# cleanup +/usr/sbin/userdel -f -r $user1 +/usr/sbin/userdel -f -r $user2 +/usr/sbin/userdel -f -r $user3 +/usr/sbin/userdel -f -r $user4 +/usr/sbin/userdel -f -r $user5 + +#cleanup all the home directory which is created as part of this test case +if [ -d "$home_dir" ] +then + rm -rf $home_dir +fi + + +groupdel $group3 +groupdel $group4 + +TEST $CLI snapshot delete all + +cleanup; + + diff --git a/tests/bugs/snapshot/bug-1178079.t b/tests/bugs/snapshot/bug-1178079.t new file mode 100644 index 00000000000..a1a6b0b9d49 --- /dev/null +++ b/tests/bugs/snapshot/bug-1178079.t @@ -0,0 +1,24 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +cleanup; + +#Create a volume +TEST glusterd; +TEST pidof glusterd; +TEST $CLI volume info; + +TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1..2}; +TEST $CLI volume start $V0; + +TEST $CLI volume set $V0 features.uss on; + +TEST glusterfs -s $H0 --volfile-id $V0 $M0; + +TEST touch $M0/file; + +TEST getfattr -d -m . -e hex $M0/file; + +cleanup; diff --git a/tests/bugs/stripe/bug-1002207.t b/tests/bugs/stripe/bug-1002207.t new file mode 100644 index 00000000000..1f8e46bae02 --- /dev/null +++ b/tests/bugs/stripe/bug-1002207.t @@ -0,0 +1,53 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +cleanup; + +## Start and create a volume +TEST glusterd; +TEST pidof glusterd; +TEST $CLI volume create $V0 stripe 2 $H0:$B0/${V0}{1,2,3,4,5,6,7,8}; + +## Verify volume is is created +EXPECT "$V0" volinfo_field $V0 'Volume Name'; +EXPECT 'Created' volinfo_field $V0 'Status'; + +## Start volume and verify +TEST $CLI volume start $V0; +EXPECT 'Started' volinfo_field $V0 'Status'; + +TEST glusterfs --attribute-timeout=0 --entry-timeout=0 -s $H0 --volfile-id=$V0 $M0; +TEST dd if=/dev/zero of=$M0/file$i.data bs=1024 count=1024; + +function xattr_query_check() +{ + local path=$1 + local xa_name=$2 + + local ret=$(getfattr -n $xa_name $path 2>&1 | grep -o "$xa_name: No such attribute" | wc -l) + echo $ret +} + +function set_xattr() +{ + local path=$1 + local xa_name=$2 + local xa_val=$3 + + setfattr -n $xa_name -v $xa_val $path + echo $? +} + +EXPECT 0 set_xattr $M0/file$i.data "trusted.name" "testofafairlylongxattrstringthatbutnotlongenoughtofailmemoryallocation" +EXPECT 0 xattr_query_check $M0/file$i.data "trusted.name" + +## Finish up +TEST $CLI volume stop $V0; +EXPECT 'Stopped' volinfo_field $V0 'Status'; + +TEST $CLI volume delete $V0; +TEST ! $CLI volume info $V0; + +cleanup; diff --git a/tests/bugs/stripe/bug-1111454.t b/tests/bugs/stripe/bug-1111454.t new file mode 100644 index 00000000000..05f69345e4b --- /dev/null +++ b/tests/bugs/stripe/bug-1111454.t @@ -0,0 +1,18 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +#symlink resolution should succeed +cleanup; + +TEST glusterd +TEST pidof glusterd +TEST $CLI volume create $V0 stripe 2 $H0:$B0/${V0}{0,1} +TEST $CLI volume start $V0 +TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0 +TEST mkdir $M0/dir +TEST touch $M0/dir/file +TEST ln -s file $M0/dir/symlinkfile +TEST ls -lR $M0 +cleanup diff --git a/tests/bugs/trace/bug-797171.t b/tests/bugs/trace/bug-797171.t new file mode 100755 index 00000000000..29f96b1be57 --- /dev/null +++ b/tests/bugs/trace/bug-797171.t @@ -0,0 +1,41 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +cleanup; + +TEST glusterd +TEST pidof glusterd +TEST $CLI volume info; + +TEST $CLI volume create $V0 $H0:$B0/brick1; +TEST $CLI volume set $V0 debug.trace marker; +TEST $CLI volume set $V0 debug.log-history on + +TEST $CLI volume start $V0; + +TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id $V0 \ +$M0; + +touch $M0/{1..22}; +rm -f $M0/*; + +pid_file=$(ls $GLUSTERD_WORKDIR/vols/$V0/run); +brick_pid=$(cat $GLUSTERD_WORKDIR/vols/$V0/run/$pid_file); + +mkdir $statedumpdir/statedump_tmp/; +echo "path=$statedumpdir/statedump_tmp" > $statedumpdir/glusterdump.options; +echo "all=yes" >> $statedumpdir/glusterdump.options; + +TEST $CLI volume statedump $V0 history; + +file_name=$(ls $statedumpdir/statedump_tmp); +TEST grep "xlator.debug.trace.history" $statedumpdir/statedump_tmp/$file_name; + +EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0 + +rm -rf $statedumpdir/statedump_tmp; +rm -f $statedumpdir/glusterdump.options; + +cleanup; diff --git a/tests/bugs/transport/bug-873367.t b/tests/bugs/transport/bug-873367.t new file mode 100755 index 00000000000..d4c07024ed0 --- /dev/null +++ b/tests/bugs/transport/bug-873367.t @@ -0,0 +1,45 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +SSL_BASE=/etc/ssl +SSL_KEY=$SSL_BASE/glusterfs.key +SSL_CERT=$SSL_BASE/glusterfs.pem +SSL_CA=$SSL_BASE/glusterfs.ca + +cleanup; +rm -f $SSL_BASE/glusterfs.* +mkdir -p $B0/1 +mkdir -p $M0 + +TEST openssl genrsa -out $SSL_KEY 1024 +TEST openssl req -new -x509 -key $SSL_KEY -subj /CN=Anyone -out $SSL_CERT +ln $SSL_CERT $SSL_CA + +TEST glusterd +TEST pidof glusterd +TEST $CLI volume info; + +TEST $CLI volume create $V0 $H0:$B0/1 +TEST $CLI volume set $V0 server.ssl on +TEST $CLI volume set $V0 client.ssl on +TEST $CLI volume set $V0 ssl.certificate-depth 6 +TEST $CLI volume set $V0 ssl.cipher-list HIGH +TEST $CLI volume set $V0 auth.ssl-allow Anyone +TEST $CLI volume start $V0 + +TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 $M0 +echo some_data > $M0/data_file +EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0 + +# If the bug is not fixed, the next mount will fail. + +TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 $M0 +EXPECT some_data cat $M0/data_file + +EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0 +TEST $CLI volume stop $V0 +TEST $CLI volume delete $V0 + +cleanup; diff --git a/tests/bugs/unclassified/bug-1034085.t b/tests/bugs/unclassified/bug-1034085.t new file mode 100644 index 00000000000..aacaa24d642 --- /dev/null +++ b/tests/bugs/unclassified/bug-1034085.t @@ -0,0 +1,31 @@ +#!/bin/bash +#Test case: Check the creation of indices/xattrop dir as soon as brick comes up. + +. $(dirname $0)/../../include.rc + +cleanup; + +#Create a volume +TEST glusterd; +TEST pidof glusterd +TEST $CLI volume info; + +TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}-{0,1}; +EXPECT 'Created' volinfo_field $V0 'Status'; + +TEST mkdir -p $B0/${V0}-0/.glusterfs/indices/ +TEST touch $B0/${V0}-0/.glusterfs/indices/xattrop + +#Volume start should not work when xattrop dir not created +TEST ! $CLI volume start $V0; + +TEST rm $B0/${V0}-0/.glusterfs/indices/xattrop + +#Volume start should work now +TEST $CLI volume start $V0; +EXPECT 'Started' volinfo_field $V0 'Status'; + +#Check for the existence of indices/xattrop dir +TEST [ -d $B0/${V0}-0/.glusterfs/indices/xattrop/ ]; + +cleanup; diff --git a/tests/bugs/unclassified/bug-874498.t b/tests/bugs/unclassified/bug-874498.t new file mode 100644 index 00000000000..8d409d033f7 --- /dev/null +++ b/tests/bugs/unclassified/bug-874498.t @@ -0,0 +1,64 @@ +#!/bin/bash +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc +. $(dirname $0)/../../afr.rc + +cleanup; + +TEST glusterd +TEST pidof glusterd +TEST $CLI volume info; +TEST $CLI volume create $V0 replica 2 $H0:$B0/brick1 $H0:$B0/brick2; +TEST $CLI volume start $V0; + + +TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 $M0; +B0_hiphenated=`echo $B0 | tr '/' '-'` +kill -9 `cat $GLUSTERD_WORKDIR/vols/$V0/run/$H0$B0_hiphenated-brick1.pid` ; + +echo "GLUSTER FILE SYSTEM" > $M0/FILE1 +echo "GLUSTER FILE SYSTEM" > $M0/FILE2 + +FILEN=$B0"/brick2" +XATTROP=$FILEN/.glusterfs/indices/xattrop + +function get_gfid() +{ +path_of_file=$1 + +gfid_value=`getfattr -d -m . $path_of_file -e hex 2>/dev/null | grep trusted.gfid | cut --complement -c -15 | sed 's/\([a-f0-9]\{8\}\)\([a-f0-9]\{4\}\)\([a-f0-9]\{4\}\)\([a-f0-9]\{4\}\)/\1-\2-\3-\4-/'` + +echo $gfid_value +} + +GFID_ROOT=`get_gfid $B0/brick2` +GFID_FILE1=`get_gfid $B0/brick2/FILE1` +GFID_FILE2=`get_gfid $B0/brick2/FILE2` + + +count=0 +for i in `ls $XATTROP` +do + if [ "$i" == "$GFID_ROOT" ] || [ "$i" == "$GFID_FILE1" ] || [ "$i" == "$GFID_FILE2" ] + then + count=$(( count + 1 )) + fi +done + +EXPECT "3" echo $count + + +TEST $CLI volume start $V0 force +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0 +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1 +TEST $CLI volume heal $V0 + + +##Expected number of entries are 0 in the .glusterfs/indices/xattrop directory +EXPECT_WITHIN $HEAL_TIMEOUT '1' count_sh_entries $FILEN; + +TEST $CLI volume stop $V0; +TEST $CLI volume delete $V0; + +cleanup; diff --git a/tests/bugs/unclassified/bug-991622.t b/tests/bugs/unclassified/bug-991622.t new file mode 100644 index 00000000000..17b37a7767d --- /dev/null +++ b/tests/bugs/unclassified/bug-991622.t @@ -0,0 +1,35 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc +. $(dirname $0)/../../fileio.rc + +#This tests that no fd leaks are observed in unlink/rename in open-behind +function leaked_fds { + ls -l /proc/$(get_brick_pid $V0 $H0 $B0/$V0)/fd | grep deleted +} + +cleanup; +TEST glusterd +TEST pidof glusterd + +TEST $CLI volume create $V0 $H0:$B0/$V0 +TEST $CLI volume set $V0 open-behind on +TEST $CLI volume start $V0 +TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id=$V0 $M0 --direct-io-mode=enable + +TEST fd1=`fd_available` +TEST fd_open $fd1 'w' "$M0/testfile1" +TEST fd_write $fd1 "content" + +TEST fd2=`fd_available` +TEST fd_open $fd2 'w' "$M0/testfile2" +TEST fd_write $fd2 "content" + +TEST touch $M0/a +TEST rm $M0/testfile1 +TEST mv $M0/a $M0/testfile2 +TEST fd_close $fd1 +TEST fd_close $fd2 +TEST ! leaked_fds +cleanup; diff --git a/tests/bugs/write-behind/bug-1058663.c b/tests/bugs/write-behind/bug-1058663.c new file mode 100644 index 00000000000..5e522e98048 --- /dev/null +++ b/tests/bugs/write-behind/bug-1058663.c @@ -0,0 +1,119 @@ +#include +#include +#include +#include +#include +#include +#include + +#define FILE_SIZE 1048576 + +/* number of tests to run */ +#define RUN_LOOP 1000 + +/* number of SIGBUS before exiting */ +#define MAX_SIGBUS 1 +static int expect_sigbus; +static int sigbus_received; + +/* test for truncate()/seek()/write()/mmap() + * There should ne no SIGBUS triggered. + */ +void seek_write(char *filename) +{ + int fd; + uint8_t *map; + int i; + + fd = open(filename, O_RDWR|O_CREAT|O_TRUNC, 0600); + lseek(fd, FILE_SIZE - 1, SEEK_SET); + write(fd, "\xff", 1); + + map = mmap(NULL, FILE_SIZE, PROT_READ, MAP_PRIVATE, fd, 0); + for (i = 0; i < (FILE_SIZE - 1); i++) { + if (map[i] != 0) /* should never be true */ + abort(); + } + munmap(map, FILE_SIZE); + + close(fd); +} + +int read_after_eof(char *filename) +{ + int ret = 0; + int fd; + char *data; + uint8_t *map; + + fd = open(filename, O_RDWR|O_CREAT|O_TRUNC, 0600); + lseek(fd, FILE_SIZE - 1, SEEK_SET); + write(fd, "\xff", 1); + + /* trigger verify that reading after EOF fails */ + ret = read(fd, data, FILE_SIZE / 2); + if (ret != 0) + return 1; + + /* map an area of 1 byte after FILE_SIZE */ + map = mmap(NULL, 1, PROT_READ, MAP_PRIVATE, fd, FILE_SIZE); + /* map[0] is an access after EOF, it should trigger SIGBUS */ + if (map[0] != 0) + /* it is expected that we exit before we get here */ + if (!sigbus_received) + return 1; + munmap(map, FILE_SIZE); + + close(fd); + + return ret; +} + +/* signal handler for SIGBUS */ +void catch_sigbus(int signum) +{ + switch (signum) { +#ifdef __NetBSD__ + /* Depending on architecture, we can get SIGSEGV */ + case SIGSEGV: /* FALLTHROUGH */ +#endif + case SIGBUS: + sigbus_received++; + if (!expect_sigbus) + exit(EXIT_FAILURE); + if (sigbus_received >= MAX_SIGBUS) + exit(EXIT_SUCCESS); + break; + default: + printf("Unexpected signal received: %d\n", signum); + } +} + +int main(int argc, char **argv) +{ + int i = 0; + + if (argc == 1) { + printf("Usage: %s \n", argv[0]); + return EXIT_FAILURE; + } + +#ifdef __NetBSD__ + /* Depending on architecture, we can get SIGSEGV */ + signal(SIGSEGV, catch_sigbus); +#endif + signal(SIGBUS, catch_sigbus); + + /* the next test should not trigger SIGBUS */ + expect_sigbus = 0; + for (i = 0; i < RUN_LOOP; i++) { + seek_write(argv[1]); + } + + /* the next test should trigger SIGBUS */ + expect_sigbus = 1; + if (read_after_eof(argv[1])) + return EXIT_FAILURE; + + return EXIT_SUCCESS; +} diff --git a/tests/bugs/write-behind/bug-1058663.t b/tests/bugs/write-behind/bug-1058663.t new file mode 100644 index 00000000000..a900a6d7afa --- /dev/null +++ b/tests/bugs/write-behind/bug-1058663.t @@ -0,0 +1,28 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +cleanup; + +## Start and create a volume +TEST glusterd; +TEST pidof glusterd; +TEST $CLI volume info; + +TEST $CLI volume create $V0 $H0:$B0/$V0; +TEST $CLI volume start $V0; + +TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id $V0 $M0 + +# compile the test program and run it +TEST $CC $(dirname $0)/bug-1058663.c -o $(dirname $0)/bug-1058663; +TEST $(dirname $0)/bug-1058663 $M0/bug-1058663.bin; +TEST rm -f $(dirname $0)/M0/bug-1058663.bin; + +EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0 + +TEST $CLI volume stop $V0; +TEST $CLI volume delete $V0; + +cleanup; -- cgit