summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--tests/functional/snapshot/test_snap_delete_existing_scheduler.py116
1 files changed, 51 insertions, 65 deletions
diff --git a/tests/functional/snapshot/test_snap_delete_existing_scheduler.py b/tests/functional/snapshot/test_snap_delete_existing_scheduler.py
index a321dcd39..af5d86701 100644
--- a/tests/functional/snapshot/test_snap_delete_existing_scheduler.py
+++ b/tests/functional/snapshot/test_snap_delete_existing_scheduler.py
@@ -14,25 +14,15 @@
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-"""
-Description:
-
- Test Cases in this module tests the
- snapshot scheduler behavior when shared volume is mounted/not
- mounted. scheduler command such as initialise scheduler,
- enable scheduler, status of scheduler.
-"""
-import time
+from time import sleep
from glusto.core import Glusto as g
from glustolibs.gluster.exceptions import ExecutionError
-from glustolibs.gluster.gluster_base_class import (GlusterBaseClass,
- runs_on)
+from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
from glustolibs.misc.misc_libs import upload_scripts
from glustolibs.io.utils import validate_io_procs
from glustolibs.gluster.volume_ops import get_volume_info
from glustolibs.gluster.shared_storage_ops import (enable_shared_storage,
is_shared_volume_mounted,
- is_shared_volume_unmounted,
disable_shared_storage)
from glustolibs.gluster.snap_scheduler import (scheduler_enable,
scheduler_init,
@@ -84,7 +74,13 @@ class SnapshotDeleteExistingScheduler(GlusterBaseClass):
# Validate shared storage mounted
g.log.info("Starting to validate shared storage mounted")
- ret = is_shared_volume_mounted(self.mnode)
+ count = 0
+ while count < 5:
+ ret = is_shared_volume_mounted(self.mnode)
+ if ret:
+ break
+ sleep(2)
+ count += 1
if not ret:
raise ExecutionError("Failed to mount shared volume")
g.log.info("Successfully mounted shared volume")
@@ -107,27 +103,16 @@ class SnapshotDeleteExistingScheduler(GlusterBaseClass):
"on all nodes")
g.log.info("Successfully disabled snapshot scheduler on all nodes")
- # disable shared storage
- g.log.info("starting to disable shared storage")
- count = 0
- while count < 80:
+ # Check if shared storage is enabled
+ # Disable if true
+ g.log.info("Checking if shared storage is mounted")
+ ret = is_shared_volume_mounted(self.mnode)
+ if ret:
+ g.log.info("Disabling shared storage")
ret = disable_shared_storage(self.mnode)
- if ret:
- break
- time.sleep(2)
- count += 1
-
- if not ret:
- raise ExecutionError("Unexpected: Failed to disable "
- "shared storage")
- g.log.info("Expected: Successfully disabled shared storage")
-
- # Validate shared volume unmounted
- g.log.info("Validate shared volume unmounted")
- ret = is_shared_volume_unmounted(self.mnode)
- if not ret:
- raise ExecutionError("Failed to unmount shared storage")
- g.log.info("Successfully unmounted shared storage")
+ if not ret:
+ raise ExecutionError("Failed to disable shared storage")
+ g.log.info("Successfully disabled shared storage")
# Unmount and cleanup-volume
g.log.info("Starting to Unmount and cleanup-volume")
@@ -139,56 +124,59 @@ class SnapshotDeleteExistingScheduler(GlusterBaseClass):
def test_snap_delete_existing_scheduler(self):
# pylint: disable=too-many-statements
"""
+ Description:
+
+ Validating snapshot scheduler behavior when existing schedule
+ is deleted.
+
Steps:
- 1. enable shared volume
- 2. create a volume
- 3. initialise snap scheduler on all nodes
- 4. enable snap scheduler
- 5. check snap scheduler status
- 6. perform io on mounts
- 7. schedule a job of creating snapshot
- every 30 mins
- 8. list jobs created
- 9. delete scheduled job
- 10. validate io is successful
- 11. list job should not list
- any existing snapshot jobs
+ * Enable shared volume
+ * Create a volume
+ * Initialise snap_scheduler on all nodes
+ * Enable snap_scheduler
+ * Validate snap_scheduler status
+ * Perform IO on mounts
+ * Schedule a job of creating snapshot every 30 mins
+ * Perform snap_scheduler list
+ * Delete scheduled job
+ * Validate IO is successful
+ * Perform snap_scheduler list
"""
# Initialise snap scheduler
- g.log.info("Initialising snap scheduler on all servers")
+ g.log.info("Initialising snap_scheduler on all servers")
count = 0
while count < 80:
ret = scheduler_init(self.servers)
if ret:
break
- time.sleep(2)
+ sleep(2)
count += 1
self.assertTrue(ret, "Failed to initialise scheduler on all servers")
g.log.info("Successfully initialised scheduler on all servers")
# Enable snap scheduler
- g.log.info("Enabling snap scheduler")
+ g.log.info("Enabling snap_scheduler")
ret, _, _ = scheduler_enable(self.mnode)
self.assertEqual(ret, 0, "Failed to enable scheduler on node %s"
% self.mnode)
g.log.info("Successfully enabled scheduler on node %s", self.mnode)
- # Check snapshot scheduler status
- g.log.info("checking status of snapshot scheduler")
+ # Validate snapshot scheduler status
+ g.log.info("Validating status of snap_scheduler")
for server in self.servers:
count = 0
while count < 40:
ret, status, _ = scheduler_status(server)
if status.strip().split(":")[2] == ' Enabled':
break
- time.sleep(2)
+ sleep(2)
count += 2
self.assertEqual(status.strip().split(":")[2], ' Enabled',
- "Failed to check status of scheduler")
- g.log.info("Successfully checked scheduler status")
+ "Failed to validate status of scheduler")
+ g.log.info("Successfully validated scheduler status")
- # write files on all mounts
+ # Write files on all mounts
g.log.info("Starting IO on all mounts...")
all_mounts_procs = []
for mount_obj in self.mounts:
@@ -199,28 +187,26 @@ class SnapshotDeleteExistingScheduler(GlusterBaseClass):
user=mount_obj.user)
all_mounts_procs.append(proc)
- # add a job to schedule snapshot every 30 mins
+ # Add a job to schedule snapshot every 30 mins
g.log.info("Starting to add new job")
self.scheduler = r"*/30 * * * *"
self.job_name = "Job1"
ret, _, _ = scheduler_add_jobs(self.mnode, self.job_name,
- self.scheduler,
- self.volname)
+ self.scheduler, self.volname)
self.assertEqual(ret, 0, "Failed to add job")
g.log.info("Successfully added Job on volume %s", self.volname)
- # scheduler list
+ # Perform snap_scheduler list
g.log.info("Starting to list all scheduler jobs")
ret, _, _ = scheduler_list(self.mnode)
self.assertEqual(ret, 0, "Failed to list scheduler jobs")
g.log.info("Successfully listed all jobs")
- # delete scheduled job
+ # Delete scheduled job
g.log.info("Starting to delete scheduled jobs")
ret, _, _ = scheduler_delete(self.mnode, self.job_name)
self.assertEqual(ret, 0, "Failed to delete scheduled job")
- g.log.info("Successfully deleted scheduled job %s",
- self.job_name)
+ g.log.info("Successfully deleted scheduled job %s", self.job_name)
# Validate IO
self.assertTrue(
@@ -228,11 +214,11 @@ class SnapshotDeleteExistingScheduler(GlusterBaseClass):
"IO failed on some of the clients"
)
- # scheduler list (no active jobs should be there)
+ # Perform snap_scheduler list (no active jobs should be present)
g.log.info("Starting to list all scheduler jobs")
ret, out, _ = scheduler_list(self.mnode)
self.assertEqual(ret, 0, "Failed to list scheduler jobs")
ret1 = out.strip().split(":")
- self.assertEqual(ret1[1], " No snapshots scheduled", "Unexpected:"
- "Failed to delete scheduled job %s" % self.job_name)
+ self.assertEqual(ret1[1], " No snapshots scheduled", "Unexpected: "
+ "Jobs are getting listed even after being deleted")
g.log.info("Expected: No snapshots Jobs scheduled")