diff options
Diffstat (limited to 'cli/src/cli-rpc-ops.c')
| -rw-r--r-- | cli/src/cli-rpc-ops.c | 1595 |
1 files changed, 1376 insertions, 219 deletions
diff --git a/cli/src/cli-rpc-ops.c b/cli/src/cli-rpc-ops.c index fe3db41b6..bfeb854ad 100644 --- a/cli/src/cli-rpc-ops.c +++ b/cli/src/cli-rpc-ops.c @@ -22,6 +22,8 @@ #define VOL_TOP_PERF_SPEED_WIDTH 4 #define VOL_TOP_PERF_TIME_WIDTH 26 +#define INDENT_MAIN_HEAD "%-25s %s " + #include "cli.h" #include "compat-errno.h" #include "cli-cmd.h" @@ -58,10 +60,6 @@ char *cli_vol_status_str[] = {"Created", "Stopped", }; -char *cli_volume_backend[] = {"", - "Volume Group", -}; - char *cli_vol_task_status_str[] = {"not started", "in progress", "stopped", @@ -285,7 +283,7 @@ gf_cli_output_pool_list (dict_t *dict, int count) else connected_str = "Disconnected"; - cli_out ("%s\t%s\t%s ", uuid_buf, hostname_buf, + cli_out ("%s\t%-9s\t%s ", uuid_buf, hostname_buf, connected_str); i++; } @@ -500,7 +498,11 @@ gf_cli_get_volume_cbk (struct rpc_req *req, struct iovec *iov, char key[1024] = {0}; char err_str[2048] = {0}; gf_cli_rsp rsp = {0}; - int32_t backend = 0; + char *caps = NULL; + int k __attribute__((unused)) = 0; + // snap_volume variable helps in showing whether a volume is a normal + //volume or a volume for the snapshot + int32_t snap_volume = 0; if (-1 == req->rpc_status) goto out; @@ -622,6 +624,11 @@ xml_output: if (ret) goto out; + snprintf (key, sizeof (key), "volume%d.snap_volume", i); + ret = dict_get_int32 (dict, key, &snap_volume); + if (ret) + goto out; + snprintf (key, 256, "volume%d.brick_count", i); ret = dict_get_int32 (dict, key, &brick_count); if (ret) @@ -652,9 +659,6 @@ xml_output: if (ret) goto out; - snprintf (key, 256, "volume%d.backend", i); - ret = dict_get_int32 (dict, key, &backend); - vol_type = type; // Distributed (stripe/replicate/stripe-replica) setups @@ -665,9 +669,44 @@ xml_output: cli_out ("Type: %s", cli_vol_type_str[vol_type]); cli_out ("Volume ID: %s", volume_id_str); cli_out ("Status: %s", cli_vol_status_str[status]); + if (snap_volume) + cli_out ("Snap Volume: %s", "yes"); + else + cli_out ("Snap Volume: %s", "no"); - if (backend) +#ifdef HAVE_BD_XLATOR + k = 0; + memset (key, 0, sizeof (key)); + snprintf (key, sizeof (key), "volume%d.xlator%d", i, k); + ret = dict_get_str (dict, key, &caps); + if (ret) goto next; + do { + j = 0; + cli_out ("Xlator %d: %s", k + 1, caps); + do { + memset (key, 0, sizeof (key)); + snprintf (key, sizeof (key), + "volume%d.xlator%d.caps%d", + i, k, j++); + ret = dict_get_str (dict, key, &caps); + if (ret) + break; + cli_out ("Capability %d: %s", j, caps); + } while (1); + + memset (key, 0, sizeof (key)); + snprintf (key, sizeof (key), + "volume%d.xlator%d", i, ++k); + ret = dict_get_str (dict, key, &caps); + if (ret) + break; + } while (1); + +next: +#else + caps = 0; /* Avoid compiler warnings when BD not enabled */ +#endif if (type == GF_CLUSTER_TYPE_STRIPE_REPLICATE) { cli_out ("Number of Bricks: %d x %d x %d = %d", @@ -689,12 +728,6 @@ xml_output: ((transport == 0)?"tcp": (transport == 1)?"rdma": "tcp,rdma")); - -next: - if (backend) { - cli_out ("Backend Type: Block, %s", - cli_volume_backend[backend]); - } j = 1; GF_FREE (local->get_vol.volname); @@ -710,6 +743,12 @@ next: goto out; cli_out ("Brick%d: %s", j, brick); +#ifdef HAVE_BD_XLATOR + snprintf (key, 256, "volume%d.vg%d", i, j); + ret = dict_get_str (dict, key, &caps); + if (!ret) + cli_out ("Brick%d VG: %s", j, caps); +#endif j++; } @@ -1213,7 +1252,7 @@ gf_cli_defrag_volume_cbk (struct rpc_req *req, struct iovec *iov, char msg[1024] = {0,}; gf_defrag_status_t status_rcd = GF_DEFRAG_STATUS_NOT_STARTED; int32_t counter = 0; - char *node_uuid = NULL; + char *node_name = NULL; char key[256] = {0,}; int32_t i = 1; uint64_t failures = 0; @@ -1345,11 +1384,11 @@ gf_cli_defrag_volume_cbk (struct rpc_req *req, struct iovec *iov, "-----------", "-----------", "-----------", "-----------", "-----------", "------------", "--------------"); do { - snprintf (key, 256, "node-uuid-%d", i); - ret = dict_get_str (dict, key, &node_uuid); + snprintf (key, 256, "node-name-%d", i); + ret = dict_get_str (dict, key, &node_name); if (ret) gf_log (frame->this->name, GF_LOG_TRACE, - "failed to get node-uuid"); + "failed to get node-name"); memset (key, 0, 256); snprintf (key, 256, "files-%d", i); @@ -1402,7 +1441,7 @@ gf_cli_defrag_volume_cbk (struct rpc_req *req, struct iovec *iov, status = cli_vol_task_status_str[status_rcd]; size_str = gf_uint64_2human_readable(size); cli_out ("%40s %16"PRIu64 " %13s" " %13"PRIu64 " %13"PRIu64 - " %13"PRIu64 " %20s %18.2f", node_uuid, files, + " %13"PRIu64 " %20s %18.2f", node_name, files, size_str, lookup, failures, skipped, status, elapsed); GF_FREE(size_str); @@ -1411,10 +1450,18 @@ gf_cli_defrag_volume_cbk (struct rpc_req *req, struct iovec *iov, done: - if (rsp.op_ret) - cli_err ("volume rebalance: %s: failed: %s", volname, msg); - else - cli_out ("volume rebalance: %s: success: %s", volname, msg); + if (global_state->mode & GLUSTER_MODE_XML) + cli_xml_output_str ("volRebalance", msg, + rsp.op_ret, rsp.op_errno, + rsp.op_errstr); + else { + if (rsp.op_ret) + cli_err ("volume rebalance: %s: failed: %s", volname, + msg); + else + cli_out ("volume rebalance: %s: success: %s", volname, + msg); + } ret = rsp.op_ret; out: @@ -1492,7 +1539,7 @@ gf_cli_reset_volume_cbk (struct rpc_req *req, struct iovec *iov, gf_log ("cli", GF_LOG_INFO, "Received resp to reset"); - if (rsp.op_ret && strcmp (rsp.op_errstr, "")) + if (strcmp (rsp.op_errstr, "")) snprintf (msg, sizeof (msg), "%s", rsp.op_errstr); else snprintf (msg, sizeof (msg), "reset volume %s", @@ -1510,7 +1557,7 @@ gf_cli_reset_volume_cbk (struct rpc_req *req, struct iovec *iov, if (rsp.op_ret) cli_err ("volume reset: failed: %s", msg); else - cli_out ("volume reset: success"); + cli_out ("volume reset: success: %s", msg); ret = rsp.op_ret; @@ -1716,7 +1763,7 @@ gf_cli3_remove_brick_status_cbk (struct rpc_req *req, struct iovec *iov, char key[256] = {0,}; int32_t i = 1; int32_t counter = 0; - char *node_uuid = 0; + char *node_name = 0; gf_defrag_status_t status_rcd = GF_DEFRAG_STATUS_NOT_STARTED; uint64_t failures = 0; uint64_t skipped = 0; @@ -1828,11 +1875,11 @@ xml_output: "-----------","------------", "--------------"); do { - snprintf (key, 256, "node-uuid-%d", i); - ret = dict_get_str (dict, key, &node_uuid); + snprintf (key, 256, "node-name-%d", i); + ret = dict_get_str (dict, key, &node_name); if (ret) gf_log (frame->this->name, GF_LOG_TRACE, - "failed to get node-uuid"); + "failed to get node-name"); memset (key, 0, 256); snprintf (key, 256, "files-%d", i); @@ -1904,7 +1951,7 @@ xml_output: if (strcmp (status, "not started")) { cli_out ("%40s %16"PRIu64 " %13s" " %13"PRIu64 " %13" - PRIu64 " %13"PRIu64 " %14s %16.2f", node_uuid, + PRIu64 " %13"PRIu64 " %14s %16.2f", node_name, files, size_str, lookup, failures, skipped, status, elapsed); } @@ -2924,142 +2971,6 @@ out: return ret; } -#ifdef HAVE_BD_XLATOR -int -gf_cli_bd_op_cbk (struct rpc_req *req, struct iovec *iov, - int count, void *myframe) -{ - gf_cli_rsp rsp = {0,}; - int ret = -1; - cli_local_t *local = NULL; - dict_t *dict = NULL; - dict_t *input_dict = NULL; - gf_xl_bd_op_t bd_op = GF_BD_OP_INVALID; - char *operation = NULL; - call_frame_t *frame = NULL; - - if (-1 == req->rpc_status) - goto out; - - frame = myframe; - - ret = xdr_to_generic (*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp); - if (ret < 0) { - gf_log (frame->this->name, GF_LOG_ERROR, - "Failed to decode xdr response"); - goto out; - } - - dict = dict_new (); - if (!dict) { - ret = -1; - goto out; - } - - if (frame) - local = frame->local; - - if (local) { - input_dict = local->dict; - ret = dict_get_int32 (input_dict, "bd-op", - (int32_t *)&bd_op); - } - - switch (bd_op) { - case GF_BD_OP_NEW_BD: - operation = gf_strdup ("create"); - break; - case GF_BD_OP_DELETE_BD: - operation = gf_strdup ("delete"); - break; - case GF_BD_OP_CLONE_BD: - operation = gf_strdup ("clone"); - break; - case GF_BD_OP_SNAPSHOT_BD: - operation = gf_strdup ("snapshot"); - break; - default: - break; - } - - ret = dict_unserialize (rsp.dict.dict_val, rsp.dict.dict_len, &dict); - if (ret) - goto out; - - gf_log ("cli", GF_LOG_INFO, "Received resp to %s bd op", operation); - - if (global_state->mode & GLUSTER_MODE_XML) { - ret = cli_xml_output_dict ("BdOp", dict, rsp.op_ret, - rsp.op_errno, rsp.op_errstr); - if (ret) - gf_log ("cli", GF_LOG_ERROR, - "Error outputting to xml"); - goto out; - } - - if (rsp.op_ret && strcmp (rsp.op_errstr, "")) - cli_err ("%s", rsp.op_errstr); - else - cli_out ("BD %s has been %s", operation, - (rsp.op_ret) ? "unsuccessful": - "successful."); - ret = rsp.op_ret; - -out: - cli_cmd_broadcast_response (ret); - - if (dict) - dict_unref (dict); - - if (operation) - GF_FREE (operation); - - if (rsp.dict.dict_val) - free (rsp.dict.dict_val); - if (rsp.op_errstr) - free (rsp.op_errstr); - return ret; -} - -int32_t -gf_cli_bd_op (call_frame_t *frame, xlator_t *this, - void *data) -{ - gf_cli_req req = { {0,} }; - int ret = 0; - dict_t *dict = NULL; - - if (!frame || !this || !data) { - ret = -1; - goto out; - } - - dict = dict_ref ((dict_t *)data); - if (!dict) - goto out; - - ret = dict_allocate_and_serialize (dict, - &req.dict.dict_val, - &req.dict.dict_len); - - - ret = cli_to_glusterd (&req, frame, gf_cli_bd_op_cbk, - (xdrproc_t)xdr_gf_cli_req, dict, - GLUSTER_CLI_BD_OP, this, cli_rpc_prog, - NULL); - -out: - if (dict) - dict_unref (dict); - - if (req.dict.dict_val) - GF_FREE (req.dict.dict_val); - - gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret); - return ret; -} -#endif - int32_t gf_cli_create_volume (call_frame_t *frame, xlator_t *this, void *data) @@ -6299,43 +6210,57 @@ out: return; } - static void -cli_print_volume_tasks (dict_t *dict) { - int ret = -1; - int tasks = 0; - char *op = 0; - char *task_id_str = NULL; - int status = 0; - char key[1024] = {0,}; - int i = 0; +cli_print_volume_status_tasks (dict_t *dict) +{ + int ret = -1; + int i = 0; + int j = 0; + int count = 0; + int task_count = 0; + int status = 0; + char *op = NULL; + char *task_id_str = NULL; + char *volname = NULL; + char key[1024] = {0,}; + char task[1024] = {0,}; + char *brick = NULL; + char *src_brick = NULL; + char *dest_brick = NULL; - ret = dict_get_int32 (dict, "tasks", &tasks); + ret = dict_get_str (dict, "volname", &volname); + if (ret) + goto out; + + ret = dict_get_int32 (dict, "tasks", &task_count); if (ret) { - gf_log ("cli", GF_LOG_ERROR, - "Failed to get tasks count"); + gf_log ("cli", GF_LOG_ERROR, "Failed to get tasks count"); return; } - if (tasks == 0) { + cli_out ("Task Status of Volume %s", volname); + cli_print_line (CLI_BRICK_STATUS_LINE_LEN); + + if (task_count == 0) { cli_out ("There are no active volume tasks"); + cli_out (" "); return; } - cli_out ("%15s%40s%15s", "Task", "ID", "Status"); - cli_out ("%15s%40s%15s", "----", "--", "------"); - for (i = 0; i < tasks; i++) { + for (i = 0; i < task_count; i++) { memset (key, 0, sizeof (key)); snprintf (key, sizeof (key), "task%d.type", i); ret = dict_get_str(dict, key, &op); if (ret) return; + cli_out ("%-20s : %-20s", "Task", op); memset (key, 0, sizeof (key)); snprintf (key, sizeof (key), "task%d.id", i); ret = dict_get_str (dict, key, &task_id_str); if (ret) return; + cli_out ("%-20s : %-20s", "ID", task_id_str); memset (key, 0, sizeof (key)); snprintf (key, sizeof (key), "task%d.status", i); @@ -6343,22 +6268,63 @@ cli_print_volume_tasks (dict_t *dict) { if (ret) return; + snprintf (task, sizeof (task), "task%d", i); + /* Replace brick only has two states - In progress and Complete Ref: xlators/mgmt/glusterd/src/glusterd-replace-brick.c */ + if (!strcmp (op, "Replace brick")) { - if (status) { - status = GF_DEFRAG_STATUS_COMPLETE; - } else { - status = GF_DEFRAG_STATUS_STARTED; - } - } + if (status) + status = GF_DEFRAG_STATUS_COMPLETE; + else + status = GF_DEFRAG_STATUS_STARTED; - cli_out ("%15s%40s%15s", op, task_id_str, + memset (key, 0, sizeof (key)); + snprintf (key, sizeof (key), "%s.src-brick", task); + ret = dict_get_str (dict, key, &src_brick); + if (ret) + goto out; + + cli_out ("%-20s : %-20s", "Source Brick", src_brick); + + memset (key, 0, sizeof (key)); + snprintf (key, sizeof (key), "%s.dst-brick", task); + ret = dict_get_str (dict, key, &dest_brick); + if (ret) + goto out; + + cli_out ("%-20s : %-20s", "Destination Brick", + dest_brick); + + } else if (!strcmp (op, "Remove brick")) { + memset (key, 0, sizeof (key)); + snprintf (key, sizeof (key), "%s.count", task); + ret = dict_get_int32 (dict, key, &count); + if (ret) + goto out; + + cli_out ("%-20s", "Removed bricks:"); + + for (j = 1; j <= count; j++) { + memset (key, 0, sizeof (key)); + snprintf (key, sizeof (key),"%s.brick%d", + task, j); + ret = dict_get_str (dict, key, &brick); + if (ret) + goto out; + + cli_out ("%-20s", brick); + } + } + cli_out ("%-20s : %-20s", "Status", cli_vol_task_status_str[status]); + cli_out (" "); } +out: + return; } static int @@ -6462,23 +6428,6 @@ gf_cli_status_cbk (struct rpc_req *req, struct iovec *iov, if ((cmd & GF_CLI_STATUS_NFS) || (cmd & GF_CLI_STATUS_SHD)) notbrick = _gf_true; - ret = dict_get_int32 (dict, "count", &count); - if (ret) - goto out; - if (count == 0) { - ret = -1; - goto out; - } - - ret = dict_get_int32 (dict, "brick-index-max", &brick_index_max); - if (ret) - goto out; - ret = dict_get_int32 (dict, "other-count", &other_count); - if (ret) - goto out; - - index_max = brick_index_max + other_count; - if (global_state->mode & GLUSTER_MODE_XML) { if (!local->all) { ret = cli_xml_output_vol_status_begin (local, @@ -6491,11 +6440,21 @@ gf_cli_status_cbk (struct rpc_req *req, struct iovec *iov, goto out; } } - ret = cli_xml_output_vol_status (local, dict); - if (ret) { - gf_log ("cli", GF_LOG_ERROR, - "Error outputting to xml"); - goto out; + if (cmd & GF_CLI_STATUS_TASKS) { + ret = cli_xml_output_vol_status_tasks_detail (local, + dict); + if (ret) { + gf_log ("cli", GF_LOG_ERROR,"Error outputting " + "to xml"); + goto out; + } + } else { + ret = cli_xml_output_vol_status (local, dict); + if (ret) { + gf_log ("cli", GF_LOG_ERROR, + "Error outputting to xml"); + goto out; + } } if (!local->all) { @@ -6531,6 +6490,10 @@ gf_cli_status_cbk (struct rpc_req *req, struct iovec *iov, cli_print_volume_status_callpool (dict, notbrick); goto cont; break; + case GF_CLI_STATUS_TASKS: + cli_print_volume_status_tasks (dict); + goto cont; + break; default: break; } @@ -6539,6 +6502,17 @@ gf_cli_status_cbk (struct rpc_req *req, struct iovec *iov, if (ret) goto out; + ret = dict_get_int32 (dict, "brick-index-max", &brick_index_max); + if (ret) + goto out; + + ret = dict_get_int32 (dict, "other-count", &other_count); + if (ret) + goto out; + + index_max = brick_index_max + other_count; + + cli_out ("Status of volume: %s", volname); if ((cmd & GF_CLI_STATUS_DETAIL) == 0) { @@ -6612,7 +6586,7 @@ gf_cli_status_cbk (struct rpc_req *req, struct iovec *iov, cli_out (" "); if ((cmd & GF_CLI_STATUS_MASK) == GF_CLI_STATUS_NONE) - cli_print_volume_tasks (dict); + cli_print_volume_status_tasks (dict); cont: ret = rsp.op_ret; @@ -6890,6 +6864,97 @@ gf_cli_umount (call_frame_t *frame, xlator_t *this, void *data) } void +cmd_heal_volume_statistics_out (dict_t *dict, int brick) +{ + + uint64_t num_entries = 0; + int ret = 0; + char key[256] = {0}; + char *hostname = NULL; + uint64_t i = 0; + uint64_t healed_count = 0; + uint64_t split_brain_count = 0; + uint64_t heal_failed_count = 0; + char *start_time_str = NULL; + char *end_time_str = NULL; + char *crawl_type = NULL; + int progress = -1; + + snprintf (key, sizeof key, "%d-hostname", brick); + ret = dict_get_str (dict, key, &hostname); + if (ret) + goto out; + cli_out ("------------------------------------------------"); + cli_out ("\nCrawl statistics for brick no %d", brick); + cli_out ("Hostname of brick %s", hostname); + + snprintf (key, sizeof key, "statistics-%d-count", brick); + ret = dict_get_uint64 (dict, key, &num_entries); + if (ret) + goto out; + + for (i = 0; i < num_entries; i++) + { + snprintf (key, sizeof key, "statistics_crawl_type-%d-%"PRIu64, + brick, i); + ret = dict_get_str (dict, key, &crawl_type); + if (ret) + goto out; + + snprintf (key, sizeof key, "statistics_healed_cnt-%d-%"PRIu64, + brick,i); + ret = dict_get_uint64 (dict, key, &healed_count); + if (ret) + goto out; + + snprintf (key, sizeof key, "statistics_sb_cnt-%d-%"PRIu64, + brick, i); + ret = dict_get_uint64 (dict, key, &split_brain_count); + if (ret) + goto out; + snprintf (key, sizeof key, "statistics_heal_failed_cnt-%d-%"PRIu64, + brick, i); + ret = dict_get_uint64 (dict, key, &heal_failed_count); + if (ret) + goto out; + snprintf (key, sizeof key, "statistics_strt_time-%d-%"PRIu64, + brick, i); + ret = dict_get_str (dict, key, &start_time_str); + if (ret) + goto out; + snprintf (key, sizeof key, "statistics_end_time-%d-%"PRIu64, + brick, i); + ret = dict_get_str (dict, key, &end_time_str); + if (ret) + goto out; + snprintf (key, sizeof key, "statistics_inprogress-%d-%"PRIu64, + brick, i); + ret = dict_get_int32 (dict, key, &progress); + if (ret) + goto out; + + cli_out ("\nStarting time of crawl: %s", start_time_str); + if (progress == 1) + cli_out ("Crawl is in progress"); + else + cli_out ("Ending time of crawl: %s", end_time_str); + + cli_out ("Type of crawl: %s", crawl_type); + cli_out ("No. of entries healed: %"PRIu64, + healed_count); + cli_out ("No. of entries in split-brain: %"PRIu64, + split_brain_count); + cli_out ("No. of heal failed entries: %"PRIu64, + heal_failed_count); + + } + + +out: + return; +} + +void cmd_heal_volume_brick_out (dict_t *dict, int brick) { uint64_t num_entries = 0; @@ -6955,6 +7020,53 @@ out: return; } + +void +cmd_heal_volume_statistics_heal_count_out (dict_t *dict, int brick) +{ + uint64_t num_entries = 0; + int ret = 0; + char key[256] = {0}; + char *hostname = NULL; + char *path = NULL; + char *status = NULL; + char *shd_status = NULL; + + snprintf (key, sizeof key, "%d-hostname", brick); + ret = dict_get_str (dict, key, &hostname); + if (ret) + goto out; + snprintf (key, sizeof key, "%d-path", brick); + ret = dict_get_str (dict, key, &path); + if (ret) + goto out; + cli_out ("\nBrick %s:%s", hostname, path); + + snprintf (key, sizeof key, "%d-status", brick); + ret = dict_get_str (dict, key, &status); + if (status && strlen (status)) + cli_out ("Status: %s", status); + + snprintf (key, sizeof key, "%d-shd-status",brick); + ret = dict_get_str (dict, key, &shd_status); + + if(!shd_status) + { + snprintf (key, sizeof key, "%d-hardlinks", brick); + ret = dict_get_uint64 (dict, key, &num_entries); + if (ret) + cli_out ("No gathered input for this brick"); + else + cli_out ("Number of entries: %"PRIu64, num_entries); + + + } + +out: + return; +} + + int gf_cli_heal_volume_cbk (struct rpc_req *req, struct iovec *iov, int count, void *myframe) @@ -7033,6 +7145,15 @@ gf_cli_heal_volume_cbk (struct rpc_req *req, struct iovec *iov, case GF_AFR_OP_SPLIT_BRAIN_FILES: heal_op_str = "list of split brain entries"; break; + case GF_AFR_OP_STATISTICS: + heal_op_str = "crawl statistics"; + break; + case GF_AFR_OP_STATISTICS_HEAL_COUNT: + heal_op_str = "count of entries to be healed"; + break; + case GF_AFR_OP_STATISTICS_HEAL_COUNT_PER_REPLICA: + heal_op_str = "count of entries to be healed per replica"; + break; case GF_AFR_OP_INVALID: heal_op_str = "invalid heal op"; break; @@ -7093,8 +7214,28 @@ gf_cli_heal_volume_cbk (struct rpc_req *req, struct iovec *iov, goto out; } - for (i = 0; i < brick_count; i++) - cmd_heal_volume_brick_out (dict, i); + switch (heal_op) { + case GF_AFR_OP_STATISTICS: + for (i = 0; i < brick_count; i++) + cmd_heal_volume_statistics_out (dict, i); + break; + case GF_AFR_OP_STATISTICS_HEAL_COUNT: + case GF_AFR_OP_STATISTICS_HEAL_COUNT_PER_REPLICA: + for (i = 0; i < brick_count; i++) + cmd_heal_volume_statistics_heal_count_out (dict, + i); + break; + case GF_AFR_OP_INDEX_SUMMARY: + case GF_AFR_OP_HEALED_FILES: + case GF_AFR_OP_HEAL_FAILED_FILES: + case GF_AFR_OP_SPLIT_BRAIN_FILES: + for (i = 0; i < brick_count; i++) + cmd_heal_volume_brick_out (dict, i); + break; + default: + break; + } + ret = rsp.op_ret; out: @@ -7391,6 +7532,1024 @@ out: return ret; } +int32_t +cli_snapshot_remove_reply (gf_cli_rsp *rsp, dict_t *dict, call_frame_t *frame) +{ + int32_t ret = -1; + char *snap_name = NULL; + + GF_ASSERT (rsp); + GF_ASSERT (dict); + GF_ASSERT (frame); + + if (rsp->op_ret) { + cli_err("snapshot delete: failed: %s", + rsp->op_errstr ? rsp->op_errstr : + "Please check log file for details"); + ret = rsp->op_ret; + goto out; + } + + ret = dict_get_str (dict, "snapname", &snap_name); + if (ret) { + gf_log ("cli", GF_LOG_ERROR, "Failed to get snapname"); + goto out; + } + + cli_out ("snapshot delete: %s: snap removed successfully", + snap_name); + ret = 0; + +out: + return ret; +} + +int +cli_snapshot_config_display (dict_t *dict, gf_cli_rsp *rsp) +{ + char buf[PATH_MAX] = ""; + char *volname = NULL; + int ret = -1; + int config_command = 0; + uint64_t value = 0; + uint64_t hard_limit = 0; + uint64_t soft_limit = 0; + uint64_t i = 0; + uint64_t voldisplaycount = 0; + + GF_ASSERT (dict); + GF_ASSERT (rsp); + + if (rsp->op_ret) { + cli_err ("Snapshot Config : failed: %s", + rsp->op_errstr ? rsp->op_errstr : + "Please check log file for details"); + ret = rsp->op_ret; + goto out; + } + + ret = dict_get_int32 (dict, "config-command", &config_command); + if (ret) { + gf_log ("cli", GF_LOG_ERROR, "Could not fetch config type"); + goto out; + } + + ret = dict_get_str (dict, "volname", &volname); + /* Ignore the error, as volname is optional */ + + if (!volname) { + volname = "System"; + } + + ret = dict_get_uint64 (dict, "snap-max-hard-limit", &hard_limit); + /* Ignore the error, as the key specified is optional */ + ret = dict_get_uint64 (dict, "snap-max-soft-limit", &soft_limit); + + if (!hard_limit && !soft_limit + && config_command != GF_SNAP_CONFIG_DISPLAY) { + ret = -1; + gf_log(THIS->name, GF_LOG_ERROR, + "Could not fetch config-key"); + goto out; + } + + switch (config_command) { + case GF_SNAP_CONFIG_TYPE_SET: + if (hard_limit && soft_limit) { + cli_out ("snapshot config: snap-max-hard-limit " + "& snap-max-soft-limit for system set " + "successfully"); + } else if (hard_limit){ + cli_out ("snapshot config: %s " + "for snap-max-hard-limit set successfully", + volname); + } else if (soft_limit) { + cli_out ("snapshot config: %s " + "for snap-max-soft-limit set successfully", + volname); + } + break; + + case GF_SNAP_CONFIG_DISPLAY : + cli_out ("\nSnapshot System Configuration:"); + ret = dict_get_uint64 (dict, "snap-max-hard-limit", + &value); + if (ret) { + gf_log ("cli", GF_LOG_ERROR, "Could not fetch " + "snap_max_hard_limit for %s", volname); + ret = -1; + goto out; + } + cli_out ("snap-max-hard-limit : %"PRIu64, value); + + ret = dict_get_uint64 (dict, "snap-max-soft-limit", + &soft_limit); + if (ret) { + gf_log ("cli", GF_LOG_ERROR, "Could not fetch " + "snap-max-soft-limit for %s", volname); + ret = -1; + goto out; + } + cli_out ("snap-max-soft-limit : %"PRIu64"%%\n", + soft_limit); + + cli_out ("Snapshot Volume Configuration:"); + + ret = dict_get_uint64 (dict, "voldisplaycount", + &voldisplaycount); + if (ret) { + gf_log ("cli", GF_LOG_ERROR, + "Could not fetch voldisplaycount"); + ret = -1; + goto out; + } + + for (i = 0; i < voldisplaycount; i++) { + snprintf (buf, sizeof(buf), "volume%ld-volname", i); + ret = dict_get_str (dict, buf, &volname); + if (ret) { + gf_log ("cli", GF_LOG_ERROR, "Could not fetch " + " %s", buf); + ret = -1; + goto out; + } + cli_out ("\nVolume : %s", volname); + + snprintf (buf, sizeof(buf), + "volume%ld-snap-max-hard-limit", i); + ret = dict_get_uint64 (dict, buf, &value); + if (ret) { + gf_log ("cli", GF_LOG_ERROR, "Could not fetch " + " %s", buf); + ret = -1; + goto out; + } + cli_out ("snap-max-hard-limit : %"PRIu64, value); + + snprintf (buf, sizeof(buf), + "volume%ld-active-hard-limit", i); + ret = dict_get_uint64 (dict, buf, &value); + if (ret) { + gf_log ("cli", GF_LOG_ERROR, "Could not fetch" + " effective snap_max_hard_limit for " + "%s", volname); + ret = -1; + goto out; + } + cli_out ("Effective snap-max-hard-limit : %"PRIu64, + value); + + snprintf (buf, sizeof(buf), + "volume%ld-snap-max-soft-limit", i); + ret = dict_get_uint64 (dict, buf, &value); + if (ret) { + gf_log ("cli", GF_LOG_ERROR, "Could not fetch " + " %s", buf); + ret = -1; + goto out; + } + cli_out ("Effective snap-max-soft-limit : %"PRIu64" " + "(%"PRIu64"%%)", value, soft_limit); + } + break; + default : + break; + } + + ret = 0; +out: + return ret; +} + +/* This function is used to print the volume related information + * of a snap. + * + * arg - 0, dict : Response Dictionary. + * arg - 1, prefix str : snaplist.snap{0..}.vol{0..}.* + */ +int +cli_get_each_volinfo_in_snap (dict_t *dict, char *keyprefix, + gf_boolean_t snap_driven) { + char key[PATH_MAX] = ""; + char *get_buffer = NULL; + int value = 0; + int ret = -1; + char indent[5] = "\t"; + char *volname = NULL; + + GF_ASSERT (dict); + GF_ASSERT (keyprefix); + + if (snap_driven) { + ret = snprintf (key, sizeof (key), "%s.volname", keyprefix); + if (ret < 0) { + goto out; + } + + ret = dict_get_str (dict, key, &get_buffer); + if (ret) { + gf_log ("cli", GF_LOG_ERROR, "Failed to get %s", key); + goto out; + } + cli_out ("%s" INDENT_MAIN_HEAD "%s", indent, + "Snap Volume Name", ":", get_buffer); + + ret = snprintf (key, sizeof (key), + "%s.origin-volname", keyprefix); + if (ret < 0) { + goto out; + } + + ret = dict_get_str (dict, key, &volname); + if (ret) { + gf_log ("cli", GF_LOG_WARNING, "Failed to get %s", key); + cli_out ("%-12s", "Origin:"); + } + cli_out ("%s" INDENT_MAIN_HEAD "%s", indent, + "Origin Volume name", ":", volname); + + + ret = snprintf (key, sizeof (key), "%s.snapcount", + keyprefix); + if (ret < 0) { + goto out; + } + + ret = dict_get_int32 (dict, key, &value); + if (ret) { + gf_log ("cli", GF_LOG_ERROR, "Failed to get %s", key); + goto out; + } + cli_out ("%s%s %s %s %d", indent, "Snaps taken for", + volname, ":", value); + + ret = snprintf (key, sizeof (key), "%s.snaps-available", + keyprefix); + if (ret < 0) { + goto out; + } + + ret = dict_get_int32 (dict, key, &value); + if (ret) { + gf_log ("cli", GF_LOG_ERROR, "Failed to get %s", key); + goto out; + } + cli_out ("%s%s %s %s %d", indent, "Snaps available for", + volname, ":", value); + } + + + ret = snprintf (key, sizeof (key), "%s.vol-status", keyprefix); + if (ret < 0) { + goto out; + } + + ret = dict_get_str (dict, key, &get_buffer); + if (ret) { + gf_log ("cli", GF_LOG_ERROR, "Failed to get %s", key); + goto out; + } + cli_out ("%s" INDENT_MAIN_HEAD "%s", indent, "Status", + ":", get_buffer); +out : + return ret; +} + +/* This function is used to print snap related information + * arg - 0, dict : Response dictionary. + * arg - 1, prefix_str : snaplist.snap{0..}.* + */ +int +cli_get_volinfo_in_snap (dict_t *dict, char *keyprefix) { + + char key[PATH_MAX] = ""; + int i = 0; + int volcount = 0; + int ret = -1; + + GF_ASSERT (dict); + GF_ASSERT (keyprefix); + + ret = snprintf (key, sizeof (key), "%s.vol-count", keyprefix); + if (ret < 0) { + goto out; + } + + ret = dict_get_int32 (dict, key, &volcount); + for (i = 1 ; i <= volcount ; i++) { + ret = snprintf (key, sizeof (key), + "%s.vol%d", keyprefix, i); + if (ret < 0) { + goto out; + } + ret = cli_get_each_volinfo_in_snap (dict, key, _gf_true); + if (ret) { + gf_log ("cli", GF_LOG_ERROR, "Could not list " + "details of volume in a snap"); + goto out; + } + cli_out (" "); + } + +out : + return ret; +} + +int +cli_get_each_snap_info (dict_t *dict, char *prefix_str, + gf_boolean_t snap_driven) { + char key_buffer[PATH_MAX] = ""; + char *get_buffer = NULL; + int ret = -1; + char indent[5] = ""; + + GF_ASSERT (dict); + GF_ASSERT (prefix_str); + + if (!snap_driven) + strcat (indent, "\t"); + + ret = snprintf (key_buffer, sizeof (key_buffer), "%s.snapname", + prefix_str); + if (ret < 0 ) { + goto out; + } + + ret = dict_get_str (dict, key_buffer, &get_buffer); + if (ret) { + gf_log ("cli", GF_LOG_ERROR, "Unable to fetch snapname %s ", + key_buffer); + goto out; + } + cli_out ("%s" INDENT_MAIN_HEAD "%s", indent, "Snapshot", + ":", get_buffer); + + ret = snprintf (key_buffer, sizeof (key_buffer), "%s.snap-id", + prefix_str); + if (ret < 0 ) { + goto out; + } + + ret = dict_get_str (dict, key_buffer, &get_buffer); + if (ret) { + gf_log ("cli", GF_LOG_ERROR, "Unable to fetch snap-id %s ", + key_buffer); + goto out; + } + cli_out ("%s" INDENT_MAIN_HEAD "%s", indent, "Snap UUID", + ":", get_buffer); + + ret = snprintf (key_buffer, sizeof (key_buffer), "%s.snap-desc", + prefix_str); + if (ret < 0 ) { + goto out; + } + + ret = dict_get_str (dict, key_buffer, &get_buffer); + if (!ret) { + /* Ignore error for description */ + cli_out ("%s" INDENT_MAIN_HEAD "%s", indent, + "Description", ":", get_buffer); + } + + ret = snprintf (key_buffer, sizeof (key_buffer), "%s.snap-time", + prefix_str); + if (ret < 0 ) { + goto out; + } + + ret = dict_get_str (dict, key_buffer, &get_buffer); + if (ret) { + gf_log ("cli", GF_LOG_ERROR, "Unable to fetch snap-time %s ", + prefix_str); + goto out; + } + cli_out ("%s" INDENT_MAIN_HEAD "%s", indent, "Created", + ":", get_buffer); + + if (snap_driven) { + cli_out ("%-12s", "Snap Volumes:\n"); + ret = cli_get_volinfo_in_snap (dict, prefix_str); + if (ret) { + gf_log ("cli", GF_LOG_ERROR, "Unable to list details " + "of the snaps"); + goto out; + } + } +out : + return ret; +} + +/* This is a generic function to print snap related information. + * arg - 0, dict : Response Dictionary + */ +int +cli_call_snapshot_info (dict_t *dict, gf_boolean_t bool_snap_driven) { + int snap_count = 0; + char key[PATH_MAX] = ""; + int ret = -1; + int i = 0; + + GF_ASSERT (dict); + + ret = dict_get_int32 (dict, "snap-count", &snap_count); + if (ret) { + gf_log ("cli", GF_LOG_ERROR, "Unable to get snap-count"); + goto out; + } + + if (snap_count == 0) { + cli_out ("No snapshots present"); + } + + for (i = 1 ; i <= snap_count ; i++) { + ret = snprintf (key, sizeof (key), "snap%d", i); + if (ret < 0) { + goto out; + } + ret = cli_get_each_snap_info (dict, key, bool_snap_driven); + if (ret) { + gf_log ("cli", GF_LOG_ERROR, + "Unable to print snap details"); + goto out; + } + } +out : + return ret; +} + +int +cli_get_snaps_in_volume (dict_t *dict) { + int ret = -1; + int i = 0; + int count = 0; + int avail = 0; + char key[PATH_MAX] = ""; + char *get_buffer = NULL; + + GF_ASSERT (dict); + + ret = dict_get_str (dict, "origin-volname", &get_buffer); + if (ret) { + gf_log ("cli", GF_LOG_ERROR, "Could not fetch origin-volname"); + goto out; + } + cli_out (INDENT_MAIN_HEAD "%s", "Volume Name", ":", get_buffer); + + ret = dict_get_int32 (dict, "snap-count", &avail); + if (ret) { + gf_log ("cli", GF_LOG_ERROR, "Could not fetch snap-count"); + goto out; + } + cli_out (INDENT_MAIN_HEAD "%d", "Snaps Taken", ":", avail); + + ret = dict_get_int32 (dict, "snaps-available", &count); + if (ret) { + gf_log ("cli", GF_LOG_ERROR, "Could not fetch snaps-available"); + goto out; + } + cli_out (INDENT_MAIN_HEAD "%d", "Snaps Available", ":", count); + + for (i = 1 ; i <= avail ; i++) { + snprintf (key, sizeof (key), "snap%d", i); + ret = cli_get_each_snap_info (dict, key, _gf_false); + if (ret) { + gf_log ("cli", GF_LOG_ERROR, + "Unable to print snap details"); + goto out; + } + + ret = snprintf (key, sizeof (key), "snap%d.vol1", i); + if (ret < 0) { + goto out; + } + ret = cli_get_each_volinfo_in_snap (dict, key, _gf_false); + if (ret) { + gf_log ("cli", GF_LOG_ERROR, "Could not get volume " + "related information"); + goto out; + } + + cli_out (" "); + } +out : + return ret; +} + +int +cli_snapshot_list (dict_t *dict) { + int snapcount = 0; + char key[PATH_MAX] = ""; + int ret = -1; + int i = 0; + char *get_buffer = NULL; + + GF_ASSERT (dict); + + ret = dict_get_int32 (dict, "snap-count", &snapcount); + if (ret) { + gf_log ("cli", GF_LOG_ERROR, "Could not fetch snap count"); + goto out; + } + + if (snapcount == 0) { + cli_out ("No snapshots present"); + } + + for (i = 1 ; i <= snapcount ; i++) { + ret = snprintf (key, sizeof (key), "snapname%d",i); + if (ret < 0) { + goto out; + } + + ret = dict_get_str (dict, key, &get_buffer); + if (ret) { + gf_log ("cli", GF_LOG_ERROR, "Could not get %s ", key); + goto out; + } else { + cli_out ("%s", get_buffer); + } + } +out : + return ret; +} + +int +cli_get_snap_volume_status (dict_t *dict, char *key_prefix) +{ + int ret = -1; + char key[PATH_MAX] = ""; + char *buffer = NULL; + int brickcount = 0; + int i = 0; + int pid = 0; + + GF_ASSERT (dict); + GF_ASSERT (key_prefix); + + ret = snprintf (key, sizeof (key), "%s.brickcount", key_prefix); + if (ret < 0) { + goto out; + } + ret = dict_get_int32 (dict, key, &brickcount); + if (ret) { + gf_log ("cli", GF_LOG_ERROR, "Failed to fetch brickcount"); + goto out; + } + + for ( i = 0 ; i < brickcount ; i++ ) { + ret = snprintf (key, sizeof (key), "%s.brick%d.path", + key_prefix, i); + if (ret < 0) { + goto out; + } + + ret = dict_get_str (dict, key, &buffer); + if (ret) { + gf_log ("cli", GF_LOG_INFO, + "Unable to get Brick Path"); + continue; + } + cli_out ("\n\t%-17s %s %s", "Brick Path", ":", buffer); + + ret = snprintf (key, sizeof (key), "%s.brick%d.vgname", + key_prefix, i); + if (ret < 0) { + goto out; + } + + ret = dict_get_str (dict, key, &buffer); + if (ret) { + gf_log ("cli", GF_LOG_INFO, + "Unable to get Volume Group"); + cli_out ("\t%-17s %s %s", "Volume Group", ":", "N/A"); + } else + cli_out ("\t%-17s %s %s", "Volume Group", ":", buffer); + + ret = snprintf (key, sizeof (key), "%s.brick%d.status", + key_prefix, i); + if (ret < 0) { + goto out; + } + + ret = dict_get_str (dict, key, &buffer); + if (ret) { + gf_log ("cli", GF_LOG_INFO, + "Unable to get Brick Running"); + cli_out ("\t%-17s %s %s", "Brick Running", ":", "N/A"); + } else + cli_out ("\t%-17s %s %s", "Brick Running", ":", buffer); + + ret = snprintf (key, sizeof (key), "%s.brick%d.pid", + key_prefix, i); + if (ret < 0) { + goto out; + } + + ret = dict_get_int32 (dict, key, &pid); + if (ret) { + gf_log ("cli", GF_LOG_INFO, + "Unable to get pid"); + cli_out ("\t%-17s %s %s", "Brick PID", ":", "N/A"); + } else + cli_out ("\t%-17s %s %d", "Brick PID", ":", pid); + + ret = snprintf (key, sizeof (key), "%s.brick%d.data", + key_prefix, i); + if (ret < 0) { + goto out; + } + + ret = dict_get_str (dict, key, &buffer); + if (ret) { + gf_log ("cli", GF_LOG_INFO, + "Unable to get Data Percent"); + cli_out ("\t%-17s %s %s", "Data Percentage", ":", "N/A"); + } else + cli_out ("\t%-17s %s %s", "Data Percentage", ":", buffer); + + ret = snprintf (key, sizeof (key), "%s.brick%d.lvsize", + key_prefix, i); + if (ret < 0) { + goto out; + } + ret = dict_get_str (dict, key, &buffer); + if (ret) { + gf_log ("cli", GF_LOG_INFO, "Unable to get LV Size"); + cli_out ("\t%-17s %s %s", "LV Size", ":", "N/A"); + } else + cli_out ("\t%-17s %s %s", "LV Size", ":", buffer); + + } +out : + return ret; +} + + + +int +cli_get_single_snap_status (dict_t *dict, char *keyprefix) +{ + int ret = -1; + char key[PATH_MAX] = ""; + int i = 0; + int volcount = 0; + char *get_buffer = NULL; + + GF_ASSERT (dict); + GF_ASSERT (keyprefix); + + ret = snprintf (key, sizeof (key), "%s.snapname", keyprefix); + if (ret < 0) { + goto out; + } + + ret = dict_get_str (dict, key, &get_buffer); + if (ret) { + gf_log ("cli", GF_LOG_ERROR, "Unable to get snapname"); + goto out; + } + cli_out ("\nSnap Name : %s", get_buffer); + + ret = snprintf (key, sizeof (key), "%s.uuid", keyprefix); + if (ret < 0) { + goto out; + } + + ret = dict_get_str (dict, key, &get_buffer); + if (ret) { + gf_log ("cli", GF_LOG_ERROR, "Unable to get snap UUID"); + goto out; + } + cli_out ("Snap UUID : %s", get_buffer); + + ret = snprintf (key, sizeof (key), "%s.volcount", keyprefix); + if (ret < 0) { + goto out; + } + + ret = dict_get_int32 (dict, key, &volcount); + if (ret) { + gf_log ("cli", GF_LOG_ERROR, "Unable to get volume count"); + goto out; + } + + for (i = 0 ; i < volcount ; i++) { + ret = snprintf (key, sizeof (key), "%s.vol%d", keyprefix, i); + if (ret < 0) { + goto out; + } + + ret = cli_get_snap_volume_status (dict, key); + if (ret) { + gf_log ("cli", GF_LOG_ERROR, + "Could not get snap volume status"); + goto out; + } + } +out : + return ret; +} + +int +cli_snap_status_all (dict_t *dict) { + int ret = -1; + char key[PATH_MAX] = ""; + int snapcount = 0; + int i = 0; + + GF_ASSERT (dict); + + ret = dict_get_int32 (dict, "status.snapcount", &snapcount); + if (ret) { + gf_log ("cli", GF_LOG_ERROR, "Could not get snapcount"); + goto out; + } + + if (snapcount == 0) { + cli_out ("No snapshots present"); + } + + for (i = 0 ; i < snapcount; i++) { + ret = snprintf (key, sizeof (key), "status.snap%d",i); + if (ret < 0) { + goto out; + } + ret = cli_get_single_snap_status (dict, key); + } +out: + return ret; +} + + +int +cli_snapshot_status_display (dict_t *dict, gf_cli_rsp *rsp) +{ + char key[PATH_MAX] = ""; + int ret = -1; + int status_cmd = -1; + + GF_ASSERT (dict); + GF_ASSERT (rsp); + + if (rsp->op_ret) { + cli_err ("Snapshot Status : failed: %s", + rsp->op_errstr ? rsp->op_errstr : + "Please check log file for details"); + ret = rsp->op_ret; + goto out; + } + + ret = dict_get_int32 (dict, "cmd", &status_cmd); + if (ret) { + gf_log ("cli", GF_LOG_ERROR, "Could not fetch status type"); + goto out; + } + switch (status_cmd) { + case GF_SNAP_STATUS_TYPE_ALL : + { + ret = cli_snap_status_all (dict); + if (ret) { + gf_log ("cli", GF_LOG_ERROR, "Could not fetch " + "status of all snap"); + goto out; + } + break; + } + + case GF_SNAP_STATUS_TYPE_SNAP : + { + ret = snprintf (key, sizeof (key), "status.snap0"); + if (ret < 0) { + goto out; + } + ret = cli_get_single_snap_status (dict, key); + if (ret) { + gf_log ("cli", GF_LOG_ERROR, "Could not fetch " + "status of snap"); + goto out; + } + break; + } + + case GF_SNAP_STATUS_TYPE_VOL : + { + ret = cli_snap_status_all (dict); + if (ret) { + gf_log ("cli", GF_LOG_ERROR, "Could not fetch " + "status of snap in a volume"); + goto out; + } + break; + } + default : + break; + } +out : + return ret; +} + +int +gf_cli_snapshot_cbk (struct rpc_req *req, struct iovec *iov, + int count, void *myframe) +{ + int ret = -1; + gf_cli_rsp rsp = {0, }; + dict_t *dict = NULL; + char *snap_name = NULL; + int32_t type = 0; + call_frame_t *frame = NULL; + gf_boolean_t snap_driven = _gf_false; + + if (req->rpc_status == -1) { + ret = -1; + goto out; + } + + frame = myframe; + + ret = xdr_to_generic (*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp); + if (ret < 0) { + gf_log (frame->this->name, GF_LOG_ERROR, + "Failed to decode xdr response"); + goto out; + } + + dict = dict_new (); + + if (!dict) { + ret = -1; + goto out; + } + + ret = dict_unserialize (rsp.dict.dict_val, rsp.dict.dict_len, &dict); + + if (ret) + goto out; + + ret = dict_get_int32 (dict, "type", &type); + if (ret) { + gf_log (frame->this->name, GF_LOG_ERROR, "failed to get type"); + goto out; + } + + switch (type) { + case GF_SNAP_OPTION_TYPE_CREATE: + if (rsp.op_ret) { + cli_err("snapshot create: failed: %s", + rsp.op_errstr ? rsp.op_errstr : + "Please check log file for details"); + ret = rsp.op_ret; + goto out; + } + + ret = dict_get_str (dict, "snapname", &snap_name); + if (ret) { + gf_log ("cli", GF_LOG_ERROR, + "Failed to get snap name"); + goto out; + } + cli_out ("snapshot create: %s: snap created successfully", + snap_name); + break; + + case GF_SNAP_OPTION_TYPE_RESTORE: + /* TODO: Check if rsp.op_ret needs to be checked here. Or is + * it ok to check this in the start of the function where we + * get rsp.*/ + if (rsp.op_ret) { + cli_err("snapshot restore: failed: %s", + rsp.op_errstr ? rsp.op_errstr : + "Please check log file for details"); + ret = rsp.op_ret; + goto out; + } + + ret = dict_get_str (dict, "snapname", &snap_name); + if (ret) { + gf_log ("cli", GF_LOG_ERROR, + "Failed to get snap name"); + goto out; + } + + cli_out ("Snapshot restore: %s: Snap restored " + "successfully", snap_name); + + ret = 0; + break; + + case GF_SNAP_OPTION_TYPE_INFO: + if (rsp.op_ret) { + cli_err ("Snapshot info : failed: %s", + rsp.op_errstr ? rsp.op_errstr : + "Please check log file for details"); + ret = rsp.op_ret; + goto out; + } + + snap_driven = dict_get_str_boolean (dict, "snap-driven", + _gf_false); + if (snap_driven == _gf_true) { + ret = cli_call_snapshot_info (dict, snap_driven); + if (ret) { + gf_log ("cli", GF_LOG_ERROR, + "Snapshot info failed"); + goto out; + } + } else if (snap_driven == _gf_false) { + ret = cli_get_snaps_in_volume (dict); + if (ret) { + gf_log ("cli", GF_LOG_ERROR, + "Snapshot info failed"); + goto out; + } + } + break; + + case GF_SNAP_OPTION_TYPE_CONFIG: + ret = cli_snapshot_config_display (dict, &rsp); + if (ret) { + gf_log ("cli", GF_LOG_ERROR, "Failed to display " + "snapshot config output."); + goto out; + } + break; + + case GF_SNAP_OPTION_TYPE_LIST: + if (rsp.op_ret) { + cli_err ("Snapshot list : failed: %s", + rsp.op_errstr ? rsp.op_errstr : + "Please check log file for details"); + ret = rsp.op_ret; + goto out; + } + + ret = cli_snapshot_list (dict); + if (ret) { + gf_log ("cli", GF_LOG_ERROR, "Failed to display " + "snapshot list"); + goto out; + } + break; + + case GF_SNAP_OPTION_TYPE_DELETE: + ret = cli_snapshot_remove_reply (&rsp, dict, frame); + if (ret) { + gf_log ("cli", GF_LOG_ERROR, + "Failed to delete snap"); + goto out; + } + break; + + case GF_SNAP_OPTION_TYPE_STATUS: + ret = cli_snapshot_status_display (dict, &rsp); + if (ret) { + gf_log ("cli", GF_LOG_ERROR, "Failed to display " + "snapshot status output."); + goto out; + } + break; + + default: + cli_err ("Unknown command executed"); + ret = -1; + goto out; + } +out: + if (dict) + dict_unref (dict); + cli_cmd_broadcast_response (ret); + + free (rsp.dict.dict_val); + free (rsp.op_errstr); + + return ret; +} + +int32_t +gf_cli_snapshot (call_frame_t *frame, xlator_t *this, + void *data) +{ + gf_cli_req req = {{0,}}; + dict_t *options = NULL; + int ret = -1; + + if (!frame || !this || !data) + goto out; + + options = data; + + ret = cli_to_glusterd (&req, frame, gf_cli_snapshot_cbk, + (xdrproc_t) xdr_gf_cli_req, options, + GLUSTER_CLI_SNAP, this, cli_rpc_prog, + NULL); +out: + gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret); + + GF_FREE (req.dict.dict_val); + return ret; +} + int cli_to_glusterd (gf_cli_req *req, call_frame_t *frame, fop_cbk_fn_t cbkfn, xdrproc_t xdrproc, dict_t *dict, @@ -7502,9 +8661,7 @@ struct rpc_clnt_procedure gluster_cli_actors[GLUSTER_CLI_MAXVALUE] = { [GLUSTER_CLI_CLRLOCKS_VOLUME] = {"CLEARLOCKS_VOLUME", gf_cli_clearlocks_volume}, [GLUSTER_CLI_COPY_FILE] = {"COPY_FILE", gf_cli_copy_file}, [GLUSTER_CLI_SYS_EXEC] = {"SYS_EXEC", gf_cli_sys_exec}, -#ifdef HAVE_BD_XLATOR - [GLUSTER_CLI_BD_OP] = {"BD_OP", gf_cli_bd_op}, -#endif + [GLUSTER_CLI_SNAP] = {"SNAP", gf_cli_snapshot}, }; struct rpc_clnt_program cli_prog = { |
