diff options
| author | Avra Sengupta <asengupt@redhat.com> | 2016-08-26 14:05:07 +0530 | 
|---|---|---|
| committer | Rajesh Joseph <rjoseph@redhat.com> | 2016-09-01 00:33:42 -0700 | 
| commit | c1278de9a5fb6a64455f42b8b17a8d05b74c2420 (patch) | |
| tree | 0009628ce94e746e22959065049804104e4d8b54 /xlators | |
| parent | ee0d8ca53f685f8f27c93b3d7c808f2a78c1ae43 (diff) | |
snapshot/eventsapi: Integrate snapshot events with eventsapi
1. EVENT_SNAPSHOT_CREATED : snapshot_name=snap1 volume_name=test_vol
                            snapshot_uuid=26dd6c52-6021-40b1-a507-001a80401d70
2. EVENT_SNAPSHOT_CREATE_FAILED : snapshot_name=snap1 volume_name=test_vol
                                  error=Snapshot snap1 already exists
3. EVENT_SNAPSHOT_ACTIVATED : snapshot_name=snap1
                              snapshot_uuid=26dd6c52-6021-40b1-a507-001a80401d70
4. EVENT_SNAPSHOT_ACTIVATE_FAILED: snapshot_name=snap1
                                   error=Snapshot snap1 is already activated.
5. EVENT_SNAPSHOT_DEACTIVATED : snapshot_name=snap1
                              snapshot_uuid=26dd6c52-6021-40b1-a507-001a80401d70
6. EVENT_SNAPSHOT_DEACTIVATE_FAILED : snapshot_name=snap3
                                      error=Snapshot (snap3) does not exist.
7. EVENT_SNAPSHOT_SOFT_LIMIT_REACHED : volume_name=test_vol
                                  volume_id=2ace2616-5591-4b9b-be2a-38592dda5758
8. EVENT_SNAPSHOT_HARD_LIMIT_REACHED : volume_name=test_vol
                                  volume_id=2ace2616-5591-4b9b-be2a-38592dda5758
9. EVENT_SNAPSHOT_RESTORED : snapshot_name=snap1 volume_name=test_vol
                             snapshot_uuid=3a840ec5-08da-4f2b-850d-1d5539a5d14d
10. EVENT_SNAPSHOT_RESTORE_FAILED : snapshot_name=snap10
                                    error=Snapshot (snap10) does not exist
11. EVENT_SNAPSHOT_DELETED : snapshot_name=snap1
                             snapshot_uuid=d9ff3d4f-f579-4345-a4da-4f9353f0950c
12. EVENT_SNAPSHOT_DELETE_FAILED : snapshot_name=snap2
                                   error=Snapshot (snap2) does not exist
13. EVENT_SNAPSHOT_CLONED : clone_uuid=93ba9f06-cb9c-4ace-aa52-2616e7f31022
                            snapshot_name=snap1 clone_name=clone2
14. EVENT_SNAPSHOT_CLONE_FAILED : snapshot_name=snap1 clone_name=clone2
                                  error=Volume with name:clone2 already exists
15. EVENT_SNAPSHOT_CONFIG_UPDATED : auto-delete=enable config_type=system_config
                                    config_type=volume_config hard_limit=100
16. EVENT_SNAPSHOT_CONFIG_UPDATE_FAILED :
                   error=Invalid snap-max-soft-limit 110. Expected range 1 - 100
17. EVENT_SNAPSHOT_SCHEDULER_INITIALISED : status=Success
18. EVENT_SNAPSHOT_SCHEDULER_INIT_FAILED
19. EVENT_SNAPSHOT_SCHEDULER_ENABLED : status=Successfuly Enabled
20. EVENT_SNAPSHOT_SCHEDULER_ENABLE_FAILED :
                                   error=Snapshot scheduler is already enabled.
21. EVENT_SNAPSHOT_SCHEDULER_SCHEDULE_ADDED : status=Successfuly added job job1
22. EVENT_SNAPSHOT_SCHEDULER_SCHEDULE_ADD_FAILED :
                    status=Failed to add job job1 error=The job already exists.
23. EVENT_SNAPSHOT_SCHEDULER_SCHEDULE_EDITED :
                                             status=Successfuly edited job job1
24. EVENT_SNAPSHOT_SCHEDULER_SCHEDULE_EDIT_FAILED :
                                                 status=Failed to edit job job2
                                                 error=The job cannot be found.
25. EVENT_SNAPSHOT_SCHEDULER_SCHEDULE_DELETED :
                                            status=Successfuly deleted job job1
26. EVENT_SNAPSHOT_SCHEDULER_SCHEDULE_DELETE_FAILED :
                                               status=Failed to delete job job1
                                               error=The job cannot be found.
27. EVENT_SNAPSHOT_SCHEDULER_DISABLED : status=Successfuly Disabled
28. EVENT_SNAPSHOT_SCHEDULER_DISABLE_FAILED :
                                   error=Snapshot scheduler is already disabled.
Change-Id: I3479cc3fb7af3c76ded67cf289f99547d0a55d21
BUG: 1370567
Signed-off-by: Avra Sengupta <asengupt@redhat.com>
Reviewed-on: http://review.gluster.org/15329
Tested-by: Aravinda VK <avishwan@redhat.com>
Smoke: Gluster Build System <jenkins@build.gluster.org>
NetBSD-regression: NetBSD Build System <jenkins@build.gluster.org>
CentOS-regression: Gluster Build System <jenkins@build.gluster.org>
Reviewed-by: Rajesh Joseph <rjoseph@redhat.com>
Diffstat (limited to 'xlators')
| -rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-snapshot-utils.c | 1 | ||||
| -rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-snapshot.c | 105 | 
2 files changed, 99 insertions, 7 deletions
diff --git a/xlators/mgmt/glusterd/src/glusterd-snapshot-utils.c b/xlators/mgmt/glusterd/src/glusterd-snapshot-utils.c index 836184726a2..3f9e51e50cf 100644 --- a/xlators/mgmt/glusterd/src/glusterd-snapshot-utils.c +++ b/xlators/mgmt/glusterd/src/glusterd-snapshot-utils.c @@ -4042,6 +4042,7 @@ glusterd_is_snap_soft_limit_reached (glusterd_volinfo_t *volinfo, dict_t *dict)                                  "set soft limit exceed flag in "                                  "response dictionary");                  } +                  goto out;          }          ret = 0; diff --git a/xlators/mgmt/glusterd/src/glusterd-snapshot.c b/xlators/mgmt/glusterd/src/glusterd-snapshot.c index 5a9ba003578..4908b527114 100644 --- a/xlators/mgmt/glusterd/src/glusterd-snapshot.c +++ b/xlators/mgmt/glusterd/src/glusterd-snapshot.c @@ -61,6 +61,7 @@  #include "xdr-generic.h"  #include "lvm-defaults.h" +#include "events.h"  char snap_mount_dir[PATH_MAX];  struct snap_create_args_ { @@ -7946,6 +7947,7 @@ glusterd_handle_snap_limit (dict_t *dict, dict_t *rsp_dict)          int                 i                   = 0;          char               *volname             = NULL;          char                key[PATH_MAX]       = {0, }; +        char                msg[PATH_MAX]       = {0, };          glusterd_volinfo_t *volinfo             = NULL;          uint64_t            limit               = 0;          int64_t             count               = 0; @@ -8020,6 +8022,10 @@ glusterd_handle_snap_limit (dict_t *dict, dict_t *rsp_dict)                          "Deleting snapshot %s.", limit, volinfo->volname,                          snap->snapname); +                snprintf (msg, sizeof(msg), "snapshot_name=%s;" +                          "snapshot_uuid=%s", snap->snapname, +                          uuid_utoa(snap->snap_id)); +                  LOCK (&snap->lock);                  {                          snap->snap_status = GD_SNAP_STATUS_DECOMMISSION; @@ -8042,6 +8048,13 @@ glusterd_handle_snap_limit (dict_t *dict, dict_t *rsp_dict)                                          snap->snapname);                  }          unlock: UNLOCK (&snap->lock); +                if (is_origin_glusterd (dict) == _gf_true) { +                        if (ret) +                                gf_event (EVENT_SNAPSHOT_DELETE_FAILED, +                                          "%s", msg); +                        else +                                gf_event (EVENT_SNAPSHOT_DELETED, "%s", msg); +                }          }  out: @@ -8127,13 +8140,20 @@ int32_t  glusterd_snapshot_create_postvalidate (dict_t *dict, int32_t op_ret,                                         char **op_errstr, dict_t *rsp_dict)  { -        xlator_t        *this           = NULL; -        glusterd_conf_t *priv           = NULL; -        int              ret            = -1; -        int32_t          cleanup        = 0; -        glusterd_snap_t *snap           = NULL; -        char            *snapname       = NULL; -        char            *auto_delete    = NULL; +        xlator_t             *this                = NULL; +        glusterd_conf_t      *priv                = NULL; +        int                   ret                 = -1; +        int32_t               cleanup             = 0; +        glusterd_snap_t      *snap                = NULL; +        char                 *snapname            = NULL; +        char                 *auto_delete         = NULL; +        char                 *volname             = NULL; +        glusterd_volinfo_t   *volinfo             = NULL; +        uint64_t              opt_hard_max        = GLUSTERD_SNAPS_MAX_HARD_LIMIT; +        uint64_t              opt_max_soft        = GLUSTERD_SNAPS_DEF_SOFT_LIMIT_PERCENT; +        int64_t               effective_max_limit = 0; +        int64_t               soft_limit          = 0; +        int32_t               snap_activate       = _gf_false;          this = THIS; @@ -8200,6 +8220,77 @@ glusterd_snapshot_create_postvalidate (dict_t *dict, int32_t op_ret,                  goto out;          } +        /* +         * If activate_on_create was enabled, and we have reached this  * +         * section of the code, that means, that after successfully     * +         * creating the snapshot, we have also successfully started the * +         * snapshot bricks on all nodes. So from originator node we can * +         * send EVENT_SNAPSHOT_ACTIVATED event.                         * +         *                                                              * +         * Also check, if hard limit and soft limit is reached in case  * +         * of successfuly creating the snapshot, and generate the event * +         */ +        if (is_origin_glusterd (dict) == _gf_true) { +                snap_activate = dict_get_str_boolean (priv->opts, +                                              GLUSTERD_STORE_KEY_SNAP_ACTIVATE, +                                              _gf_false); + +                if (snap_activate == _gf_true) { +                        gf_event (EVENT_SNAPSHOT_ACTIVATED, "snapshot_name=%s;" +                                  "snapshot_uuid=%s", snap->snapname, +                                  uuid_utoa(snap->snap_id)); +                } + +                ret = dict_get_str (dict, "volname1", &volname); +                if (ret) { +                        gf_msg (this->name, GF_LOG_ERROR, 0, +                                GD_MSG_DICT_GET_FAILED, +                                "Failed to get volname."); +                        goto out; +                } + +                ret = glusterd_volinfo_find (volname, &volinfo); +                if (ret) { +                        gf_msg (this->name, GF_LOG_ERROR, 0, +                                GD_MSG_VOL_NOT_FOUND, +                                "Failed to get volinfo."); +                        goto out; +                } + +                /* config values snap-max-hard-limit and snap-max-soft-limit are +                 * optional and hence we are not erroring out if values are not +                 * present +                 */ +                gd_get_snap_conf_values_if_present (priv->opts, &opt_hard_max, +                                                    &opt_max_soft); + +                if (volinfo->snap_max_hard_limit < opt_hard_max) +                        effective_max_limit = volinfo->snap_max_hard_limit; +                else +                        effective_max_limit = opt_hard_max; + +                /* +                 * Check for hard limit. If it is reached after taking * +                 * this snapshot, then generate event for the same. If * +                 * it is not reached, then check for the soft limit,   * +                 * and generate event accordingly.                     * +                 */ +                if (volinfo->snap_count >= effective_max_limit) { +                        gf_event (EVENT_SNAPSHOT_HARD_LIMIT_REACHED, +                                  "volume_name=%s;volume_id=%s", +                                  volname, +                                  uuid_utoa(volinfo->volume_id)); +                } else { +                        soft_limit = (opt_max_soft * effective_max_limit)/100; +                        if (volinfo->snap_count >= soft_limit) { +                                gf_event (EVENT_SNAPSHOT_SOFT_LIMIT_REACHED, +                                          "volume_name=%s;volume_id=%s", +                                          volname, +                                          uuid_utoa(volinfo->volume_id)); +                        } +                } +        } +          /* "auto-delete" might not be set by user explicitly,           * in that case it's better to consider the default value.           * Hence not erroring out if Key is not found.  | 
