summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--cli/src/Makefile.am4
-rw-r--r--cli/src/cli-cmd-parser.c129
-rw-r--r--cli/src/cli-cmd-volume.c354
-rw-r--r--cli/src/cli-cmd.c4
-rw-r--r--cli/src/cli-cmd.h2
-rw-r--r--cli/src/cli-quotad-client.c154
-rw-r--r--cli/src/cli-quotad-client.h33
-rw-r--r--cli/src/cli-rpc-ops.c509
-rw-r--r--cli/src/cli.c48
-rw-r--r--cli/src/cli.h9
-rw-r--r--glusterfsd/src/glusterfsd-mgmt.c5
-rw-r--r--libglusterfs/src/common-utils.c48
-rw-r--r--libglusterfs/src/common-utils.h4
-rw-r--r--rpc/xdr/src/cli1-xdr.h5
-rw-r--r--rpc/xdr/src/cli1-xdr.x31
-rw-r--r--tests/bugs/bug-848251.t7
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-handler.c34
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-op-sm.c77
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-quota.c1182
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-rebalance.c2
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-replace-brick.c2
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-rpc-ops.c11
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-store.c162
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-store.h10
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-syncop.c83
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-utils.c997
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-utils.h51
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-volgen.c262
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-volgen.h2
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-volume-ops.c56
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-volume-set.c68
-rw-r--r--xlators/mgmt/glusterd/src/glusterd.c19
-rw-r--r--xlators/mgmt/glusterd/src/glusterd.h102
33 files changed, 3685 insertions, 781 deletions
diff --git a/cli/src/Makefile.am b/cli/src/Makefile.am
index d5189da5e8c..6370c2203bb 100644
--- a/cli/src/Makefile.am
+++ b/cli/src/Makefile.am
@@ -2,7 +2,7 @@ sbin_PROGRAMS = gluster
gluster_SOURCES = cli.c registry.c input.c cli-cmd.c cli-rl.c \
cli-cmd-volume.c cli-cmd-peer.c cli-rpc-ops.c cli-cmd-parser.c\
- cli-cmd-system.c cli-cmd-misc.c cli-xml-output.c
+ cli-cmd-system.c cli-cmd-misc.c cli-xml-output.c cli-quotad-client.c
gluster_LDADD = $(top_builddir)/libglusterfs/src/libglusterfs.la $(GF_LDADD)\
$(RLLIBS) $(top_builddir)/rpc/xdr/src/libgfxdr.la \
@@ -10,7 +10,7 @@ gluster_LDADD = $(top_builddir)/libglusterfs/src/libglusterfs.la $(GF_LDADD)\
$(GF_GLUSTERFS_LIBS) $(XML_LIBS)
gluster_LDFLAGS = $(GF_LDFLAGS)
-noinst_HEADERS = cli.h cli-mem-types.h cli-cmd.h
+noinst_HEADERS = cli.h cli-mem-types.h cli-cmd.h cli-quotad-client.h
AM_CPPFLAGS = $(GF_CPPFLAGS) \
-I$(top_srcdir)/libglusterfs/src -I$(top_srcdir)/rpc/rpc-lib/src\
diff --git a/cli/src/cli-cmd-parser.c b/cli/src/cli-cmd-parser.c
index cd0370acc78..dd7b11bccd4 100644
--- a/cli/src/cli-cmd-parser.c
+++ b/cli/src/cli-cmd-parser.c
@@ -485,8 +485,12 @@ cli_cmd_quota_parse (const char **words, int wordcount, dict_t **options)
uint64_t value = 0;
gf_quota_type type = GF_QUOTA_OPTION_TYPE_NONE;
char *opwords[] = { "enable", "disable", "limit-usage",
- "remove", "list", "version", NULL };
+ "remove", "list", "alert-time",
+ "soft-timeout", "hard-timeout",
+ "default-soft-limit", NULL};
char *w = NULL;
+ uint32_t time = 0;
+ double percent = 0;
GF_ASSERT (words);
GF_ASSERT (options);
@@ -558,7 +562,8 @@ cli_cmd_quota_parse (const char **words, int wordcount, dict_t **options)
}
if (strcmp (w, "limit-usage") == 0) {
- if (wordcount != 6) {
+
+ if (wordcount < 6 || wordcount > 7) {
ret = -1;
goto out;
}
@@ -567,8 +572,8 @@ cli_cmd_quota_parse (const char **words, int wordcount, dict_t **options)
if (words[4][0] != '/') {
cli_err ("Please enter absolute path");
-
- return -2;
+ ret = -1;
+ goto out;
}
ret = dict_set_str (dict, "path", (char *) words[4]);
if (ret)
@@ -576,20 +581,34 @@ cli_cmd_quota_parse (const char **words, int wordcount, dict_t **options)
if (!words[5]) {
cli_err ("Please enter the limit value to be set");
-
- return -2;
+ ret = -1;
+ goto out;
}
ret = gf_string2bytesize (words[5], &value);
if (ret != 0) {
cli_err ("Please enter a correct value");
- return -1;
+ goto out;
}
- ret = dict_set_str (dict, "limit", (char *) words[5]);
+ ret = dict_set_str (dict, "hard-limit", (char *) words[5]);
if (ret < 0)
goto out;
+ if (wordcount == 7) {
+
+ ret = gf_string2percent (words[6], &percent);
+ if (ret != 0) {
+ cli_err ("Please enter a correct value");
+ goto out;
+ }
+
+ ret = dict_set_str (dict, "soft-limit",
+ (char *) words[6]);
+ if (ret < 0)
+ goto out;
+ }
+
goto set_type;
}
if (strcmp (w, "remove") == 0) {
@@ -602,8 +621,8 @@ cli_cmd_quota_parse (const char **words, int wordcount, dict_t **options)
if (words[4][0] != '/') {
cli_err ("Please enter absolute path");
-
- return -2;
+ ret = -1;
+ goto out;
}
ret = dict_set_str (dict, "path", (char *) words[4]);
@@ -636,8 +655,75 @@ cli_cmd_quota_parse (const char **words, int wordcount, dict_t **options)
goto set_type;
}
- if (strcmp (w, "version") == 0) {
- type = GF_QUOTA_OPTION_TYPE_VERSION;
+
+ if (strcmp (w, "alert-time") == 0) {
+ if (wordcount != 5) {
+ ret = -1;
+ goto out;
+ }
+ type = GF_QUOTA_OPTION_TYPE_ALERT_TIME;
+
+ ret = gf_string2time (words[4], &time);
+ if (ret) {
+ cli_err ("Invalid argument %s. Please enter a valid "
+ "string", words[4]);
+ goto out;
+ }
+
+ ret = dict_set_str (dict, "value", (char *)words[4]);
+ if (ret < 0)
+ goto out;
+ goto set_type;
+ }
+ if (strcmp (w, "soft-timeout") == 0) {
+ if (wordcount != 5) {
+ ret = -1;
+ goto out;
+ }
+ type = GF_QUOTA_OPTION_TYPE_SOFT_TIMEOUT;
+
+ ret = gf_string2time (words[4], &time);
+ if (ret) {
+ cli_err ("Invalid argument %s. Please enter a valid "
+ "string", words[4]);
+ goto out;
+ }
+
+ ret = dict_set_str (dict, "value", (char *)words[4]);
+ if (ret < 0)
+ goto out;
+ goto set_type;
+ }
+ if (strcmp (w, "hard-timeout") == 0) {
+ if(wordcount != 5) {
+ ret = -1;
+ goto out;
+ }
+ type = GF_QUOTA_OPTION_TYPE_HARD_TIMEOUT;
+
+ ret = gf_string2time (words[4], &time);
+ if (ret) {
+ cli_err ("Invalid argument %s. Please enter a valid "
+ "string", words[4]);
+ goto out;
+ }
+
+ ret = dict_set_str (dict, "value", (char *)words[4]);
+ if (ret < 0)
+ goto out;
+ goto set_type;
+ }
+ if (strcmp (w, "default-soft-limit") == 0) {
+ if(wordcount != 5) {
+ ret = -1;
+ goto out;
+ }
+ type = GF_QUOTA_OPTION_TYPE_DEFAULT_SOFT_LIMIT;
+
+ ret = dict_set_str (dict, "value", (char *)words[4]);
+ if (ret < 0)
+ goto out;
+ goto set_type;
} else {
GF_ASSERT (!"opword mismatch");
}
@@ -2225,6 +2311,8 @@ cli_cmd_volume_status_parse (const char **words, int wordcount,
cmd |= GF_CLI_STATUS_NFS;
} else if (!strcmp (words[3], "shd")) {
cmd |= GF_CLI_STATUS_SHD;
+ } else if (!strcmp (words[3], "quotad")) {
+ cmd |= GF_CLI_STATUS_QUOTAD;
} else {
cmd = GF_CLI_STATUS_BRICK;
ret = dict_set_str (dict, "brick",
@@ -2280,6 +2368,17 @@ cli_cmd_volume_status_parse (const char **words, int wordcount,
goto out;
}
cmd |= GF_CLI_STATUS_SHD;
+ } else if (!strcmp (words[3], "quotad")) {
+ if (cmd == GF_CLI_STATUS_FD ||
+ cmd == GF_CLI_STATUS_CLIENTS ||
+ cmd == GF_CLI_STATUS_DETAIL ||
+ cmd == GF_CLI_STATUS_INODE) {
+ cli_err ("Detail/FD/Clients/Inode status not "
+ "available for Quota Daemon");
+ ret = -1;
+ goto out;
+ }
+ cmd |= GF_CLI_STATUS_QUOTAD;
} else {
if (cmd == GF_CLI_STATUS_TASKS) {
cli_err ("Tasks status not available for "
@@ -2317,7 +2416,7 @@ cli_cmd_validate_dumpoption (const char *arg, char **option)
{
char *opwords[] = {"all", "nfs", "mem", "iobuf", "callpool", "priv",
"fd", "inode", "history", "inodectx", "fdctx",
- NULL};
+ "quotad", NULL};
char *w = NULL;
w = str_getunamb (arg, opwords);
@@ -2349,6 +2448,10 @@ cli_cmd_volume_statedump_options_parse (const char **words, int wordcount,
strncat (option_str, option, strlen (option));
strncat (option_str, " ", 1);
}
+ if((strstr (option_str, "nfs")) && strstr (option_str, "quotad")) {
+ ret = -1;
+ goto out;
+ }
dict = dict_new ();
if (!dict)
diff --git a/cli/src/cli-cmd-volume.c b/cli/src/cli-cmd-volume.c
index 100be0b7337..9bc11d2dbb4 100644
--- a/cli/src/cli-cmd-volume.c
+++ b/cli/src/cli-cmd-volume.c
@@ -30,8 +30,10 @@
#include "run.h"
extern struct rpc_clnt *global_rpc;
+extern struct rpc_clnt *global_quotad_rpc;
extern rpc_clnt_prog_t *cli_rpc_prog;
+extern rpc_clnt_prog_t cli_quotad_clnt;
int
cli_cmd_volume_help_cbk (struct cli_state *state, struct cli_cmd_word *in_word,
@@ -1007,6 +1009,283 @@ out:
return ret;
}
+static int
+gf_cli_create_auxiliary_mount (char *volname)
+{
+ int ret = -1;
+ char mountdir[PATH_MAX] = {0,};
+ char pidfile_path[PATH_MAX] = {0,};
+ char logfile[PATH_MAX] = {0,};
+
+ GLUSTERFS_GET_AUX_MOUNT_PIDFILE (pidfile_path, volname);
+
+ if (gf_is_service_running (pidfile_path, NULL)) {
+ gf_log ("cli", GF_LOG_DEBUG, "Aux mount of volume %s is running"
+ " already", volname);
+ ret = 0;
+ goto out;
+ }
+
+ snprintf (mountdir, sizeof (mountdir)-1, "/tmp/%s", volname);
+ ret = mkdir (mountdir, 0777);
+ if (ret && errno != EEXIST) {
+ gf_log ("cli", GF_LOG_ERROR, "Failed to create auxiliary mount "
+ "directory %s. Reason : %s", mountdir,
+ strerror (errno));
+ goto out;
+ }
+
+ snprintf (logfile, PATH_MAX-1, "%s/quota-mount-%s.log",
+ DEFAULT_LOG_FILE_DIRECTORY, volname);
+
+ ret = runcmd (SBIN_DIR"/glusterfs",
+ "-s", "localhost",
+ "--volfile-id", volname,
+ "-l", logfile,
+ "-p", pidfile_path,
+ mountdir,
+ "--client-pid", "-42", NULL);
+
+ if (ret) {
+ gf_log ("cli", GF_LOG_WARNING, "failed to mount glusterfs "
+ "client. Please check the log file %s for more details",
+ logfile);
+ ret = -1;
+ goto out;
+ }
+
+ ret = 0;
+
+out:
+ return ret;
+}
+
+static int
+cli_stage_quota_op (char *volname, int op_code)
+{
+ int ret = -1;
+
+ switch (op_code) {
+ case GF_QUOTA_OPTION_TYPE_ENABLE:
+ case GF_QUOTA_OPTION_TYPE_LIMIT_USAGE:
+ case GF_QUOTA_OPTION_TYPE_REMOVE:
+ case GF_QUOTA_OPTION_TYPE_LIST:
+ ret = gf_cli_create_auxiliary_mount (volname);
+ if (ret)
+ goto out;
+ ret = 0;
+ break;
+
+ default:
+ ret = 0;
+ break;
+ }
+
+out:
+ return ret;
+}
+
+static void
+print_quota_list_header (void)
+{
+ //Header
+ cli_out (" Path Hard-limit "
+ "Soft-limit Used Available");
+ cli_out ("-----------------------------------------------------"
+ "---------------------------");
+}
+
+int
+cli_get_soft_limit (dict_t *options, const char **words, dict_t *xdata)
+{
+ call_frame_t *frame = NULL;
+ cli_local_t *local = NULL;
+ rpc_clnt_procedure_t *proc = NULL;
+ char *default_sl = NULL;
+ char *default_sl_dup = NULL;
+ int ret = -1;
+
+ frame = create_frame (THIS, THIS->ctx->pool);
+ if (!frame) {
+ ret = -1;
+ goto out;
+ }
+
+ //We need a ref on @options to prevent CLI_STACK_DESTROY
+ //from destroying it prematurely.
+ dict_ref (options);
+ CLI_LOCAL_INIT (local, words, frame, options);
+ proc = &cli_rpc_prog->proctable[GLUSTER_CLI_QUOTA];
+ ret = proc->fn (frame, THIS, options);
+
+ ret = dict_get_str (options, "default-soft-limit", &default_sl);
+ if (ret) {
+ gf_log ("cli", GF_LOG_ERROR, "Failed to get default soft limit");
+ goto out;
+ }
+
+ default_sl_dup = gf_strdup (default_sl);
+ if (!default_sl_dup) {
+ ret = -1;
+ goto out;
+ }
+
+ ret = dict_set_dynstr (xdata, "default-soft-limit", default_sl_dup);
+ if (ret) {
+ gf_log ("cli", GF_LOG_ERROR, "Failed to set default soft limit");
+ GF_FREE (default_sl_dup);
+ goto out;
+ }
+
+out:
+ CLI_STACK_DESTROY (frame);
+ return ret;
+}
+
+#define QUOTA_CONF_HEADER \
+ "GlusterFS Quota conf | version: v%d.%d\n"
+int
+cli_cmd_quota_conf_skip_header (int fd)
+{
+ char buf[PATH_MAX] = {0,};
+
+ snprintf (buf, sizeof(buf)-1, QUOTA_CONF_HEADER, 1, 1);
+ return gf_skip_header_section (fd, strlen (buf));
+}
+
+int
+cli_cmd_quota_handle_list_all (const char **words, dict_t *options)
+{
+ int all_failed = 1;
+ int count = 0;
+ int ret = -1;
+ rpc_clnt_procedure_t *proc = NULL;
+ cli_local_t *local = NULL;
+ call_frame_t *frame = NULL;
+ dict_t *xdata = NULL;
+ char *gfid_str = NULL;
+ char *volname = NULL;
+ char *volname_dup = NULL;
+ unsigned char buf[16] = {0};
+ int fd = -1;
+ char quota_conf_file[PATH_MAX] = {0};
+
+ xdata = dict_new ();
+ if (!xdata) {
+ ret = -1;
+ goto out;
+ }
+
+ ret = dict_get_str (options, "volname", &volname);
+ if (ret) {
+ gf_log ("cli", GF_LOG_ERROR, "Failed to get volume name");
+ goto out;
+ }
+
+ ret = cli_get_soft_limit (options, words, xdata);
+ if (ret) {
+ gf_log ("cli", GF_LOG_ERROR, "Failed to fetch default "
+ "soft-limit");
+ goto out;
+ }
+
+ frame = create_frame (THIS, THIS->ctx->pool);
+ if (!frame) {
+ ret = -1;
+ goto out;
+ }
+
+ volname_dup = gf_strdup (volname);
+ if (!volname_dup) {
+ ret = -1;
+ goto out;
+ }
+
+ ret = dict_set_dynstr (xdata, "volume-uuid", volname_dup);
+ if (ret) {
+ gf_log ("cli", GF_LOG_ERROR, "Failed to set volume-uuid");
+ GF_FREE (volname_dup);
+ goto out;
+ }
+
+ //TODO: fix hardcoding; Need to perform an RPC call to glusterd
+ //to fetch working directory
+ sprintf (quota_conf_file, "/var/lib/glusterd/vols/%s/quota.conf",
+ volname);
+ fd = open (quota_conf_file, O_RDONLY);
+ if (fd == -1) {
+ //This may because no limits were yet set on the volume
+ gf_log ("cli", GF_LOG_TRACE, "Unable to open "
+ "quota.conf");
+ ret = 0;
+ goto out;
+ }
+
+ ret = cli_cmd_quota_conf_skip_header (fd);
+ if (ret) {
+ goto out;
+ }
+ CLI_LOCAL_INIT (local, words, frame, xdata);
+ proc = &cli_quotad_clnt.proctable[GF_AGGREGATOR_GETLIMIT];
+
+ print_quota_list_header ();
+ gfid_str = GF_CALLOC (1, gf_common_mt_char, 64);
+ if (!gfid_str) {
+ ret = -1;
+ goto out;
+ }
+ for (count = 0;; count++) {
+ ret = read (fd, (void*) buf, 16);
+ if (ret <= 0) {
+ //Finished reading all entries in the conf file
+ break;
+ }
+ if (ret < 16) {
+ //This should never happen. We must have a multiple of
+ //entry_sz bytes in our configuration file.
+ gf_log (THIS->name, GF_LOG_CRITICAL, "Quota "
+ "configuration store may be corrupt.");
+ goto out;
+ }
+ uuid_utoa_r (buf, gfid_str);
+ ret = dict_set_str (xdata, "gfid", gfid_str);
+ if (ret) {
+ gf_log ("cli", GF_LOG_ERROR, "Failed to set gfid");
+ goto out;
+ }
+
+ ret = proc->fn (frame, THIS, xdata);
+ if (ret) {
+ gf_log ("cli", GF_LOG_ERROR, "Failed to get quota "
+ "limits for %s", uuid_utoa ((unsigned char*)buf));
+ }
+
+ dict_del (xdata, "gfid");
+ all_failed = all_failed && ret;
+ }
+
+ if (count > 0) {
+ ret = all_failed? 0: -1;
+ } else {
+ ret = 0;
+ }
+out:
+ if (count == 0) {
+ cli_out ("quota: No quota configured on volume %s", volname);
+ }
+ if (fd != -1) {
+ close (fd);
+ }
+
+ GF_FREE (gfid_str);
+ if (ret) {
+ gf_log ("cli", GF_LOG_ERROR, "Couldn't fetch quota limits "
+ "for even one of the directories configured");
+ }
+ CLI_STACK_DESTROY (frame);
+ return ret;
+}
+
int
cli_cmd_quota_cbk (struct cli_state *state, struct cli_cmd_word *word,
const char **words, int wordcount)
@@ -1020,47 +1299,78 @@ cli_cmd_quota_cbk (struct cli_state *state, struct cli_cmd_word *word,
dict_t *options = NULL;
gf_answer_t answer = GF_ANSWER_NO;
cli_local_t *local = NULL;
+ int sent = 0;
+ char *volname = NULL;
const char *question = "Disabling quota will delete all the quota "
"configuration. Do you want to continue?";
- proc = &cli_rpc_prog->proctable[GLUSTER_CLI_QUOTA];
- if (proc == NULL) {
- ret = -1;
+ //parse **words into options dictionary
+ ret = cli_cmd_quota_parse (words, wordcount, &options);
+ if (ret < 0) {
+ cli_usage_out (word->pattern);
+ parse_err = 1;
goto out;
}
- frame = create_frame (THIS, THIS->ctx->pool);
- if (!frame) {
- ret = -1;
+ ret = dict_get_int32 (options, "type", &type);
+ if (ret) {
+ gf_log ("cli", GF_LOG_ERROR, "Failed to get opcode");
goto out;
}
- ret = cli_cmd_quota_parse (words, wordcount, &options);
-
- if (ret < 0) {
- cli_usage_out (word->pattern);
- parse_err = 1;
- goto out;
- } else if (dict_get_int32 (options, "type", &type) == 0 &&
- type == GF_QUOTA_OPTION_TYPE_DISABLE) {
+ //handle quota-disable and quota-list-all different from others
+ switch (type) {
+ case GF_QUOTA_OPTION_TYPE_DISABLE:
answer = cli_cmd_get_confirmation (state, question);
if (answer == GF_ANSWER_NO)
goto out;
+ break;
+ case GF_QUOTA_OPTION_TYPE_LIST:
+ if (wordcount != 4)
+ break;
+ ret = cli_cmd_quota_handle_list_all (words, options);
+ goto out;
+ default:
+ break;
+ }
+
+ ret = dict_get_str (options, "volname", &volname);
+ if (ret) {
+ gf_log ("cli", GF_LOG_ERROR, "Failed to get volume name");
+ goto out;
+ }
+
+ //create auxillary mount need for quota commands that operate on path
+ ret = cli_stage_quota_op (volname, type);
+ if (ret)
+ goto out;
+
+ frame = create_frame (THIS, THIS->ctx->pool);
+ if (!frame) {
+ ret = -1;
+ goto out;
}
CLI_LOCAL_INIT (local, words, frame, options);
+ proc = &cli_rpc_prog->proctable[GLUSTER_CLI_QUOTA];
+ if (proc == NULL) {
+ ret = -1;
+ goto out;
+ }
if (proc->fn)
ret = proc->fn (frame, THIS, options);
out:
- if (ret && parse_err == 0)
- cli_out ("Quota command failed");
+ if (ret) {
+ cli_cmd_sent_status_get (&sent);
+ if (sent == 0 && parse_err == 0)
+ cli_out ("Quota command failed. Please check the cli "
+ "logs for more details");
+ }
CLI_STACK_DESTROY (frame);
-
return ret;
-
}
int
@@ -1906,7 +2216,9 @@ struct cli_cmd volume_cmds[] = {
cli_cmd_volume_profile_cbk,
"volume profile operations"},
- { "volume quota <VOLNAME> <enable|disable|limit-usage|list|remove> [path] [value]",
+ { "volume quota <VOLNAME> {enable|disable|list [<path> ...]|remove <path>| default-soft-limit <percent>} |\n"
+ "volume quota <VOLNAME> {limit-usage <path> <size> [<percent>]} |\n"
+ "volume quota <VOLNAME> {alert-time|soft-timeout|hard-timeout} {<time>}",
cli_cmd_quota_cbk,
"quota translator specific operations"},
@@ -1915,7 +2227,7 @@ struct cli_cmd volume_cmds[] = {
cli_cmd_volume_top_cbk,
"volume top operations"},
- { "volume status [all | <VOLNAME> [nfs|shd|<BRICK>]]"
+ { "volume status [all | <VOLNAME> [nfs|shd|<BRICK>|quotad]]"
" [detail|clients|mem|inode|fd|callpool|tasks]",
cli_cmd_volume_status_cbk,
"display status of all or specified volume(s)/brick"},
@@ -1924,7 +2236,7 @@ struct cli_cmd volume_cmds[] = {
cli_cmd_volume_heal_cbk,
"self-heal commands on volume specified by <VOLNAME>"},
- {"volume statedump <VOLNAME> [nfs] [all|mem|iobuf|callpool|priv|fd|"
+ {"volume statedump <VOLNAME> [nfs|quotad] [all|mem|iobuf|callpool|priv|fd|"
"inode|history]...",
cli_cmd_volume_statedump_cbk,
"perform statedump on bricks"},
diff --git a/cli/src/cli-cmd.c b/cli/src/cli-cmd.c
index 1045f34f0f5..63b939282fa 100644
--- a/cli/src/cli-cmd.c
+++ b/cli/src/cli-cmd.c
@@ -352,7 +352,7 @@ cli_cmd_broadcast_connected ()
}
int
-cli_cmd_submit (void *req, call_frame_t *frame,
+cli_cmd_submit (struct rpc_clnt* rpc, void *req, call_frame_t *frame,
rpc_clnt_prog_t *prog,
int procnum, struct iobref *iobref,
xlator_t *this, fop_cbk_fn_t cbkfn, xdrproc_t xdrproc)
@@ -368,7 +368,7 @@ cli_cmd_submit (void *req, call_frame_t *frame,
cli_cmd_lock ();
cmd_sent = 0;
- ret = cli_submit_request (req, frame, prog,
+ ret = cli_submit_request (rpc, req, frame, prog,
procnum, NULL, this, cbkfn, xdrproc);
if (!ret) {
diff --git a/cli/src/cli-cmd.h b/cli/src/cli-cmd.h
index 06a1ed32aac..52396bbf755 100644
--- a/cli/src/cli-cmd.h
+++ b/cli/src/cli-cmd.h
@@ -110,7 +110,7 @@ int cli_cmd_lock ();
int cli_cmd_unlock ();
int
-cli_cmd_submit (void *req, call_frame_t *frame,
+cli_cmd_submit (struct rpc_clnt *rpc, void *req, call_frame_t *frame,
rpc_clnt_prog_t *prog,
int procnum, struct iobref *iobref,
xlator_t *this, fop_cbk_fn_t cbkfn, xdrproc_t xdrproc);
diff --git a/cli/src/cli-quotad-client.c b/cli/src/cli-quotad-client.c
new file mode 100644
index 00000000000..f0efc8640bd
--- /dev/null
+++ b/cli/src/cli-quotad-client.c
@@ -0,0 +1,154 @@
+/*
+ Copyright (c) 2010-2013 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+#ifndef _CONFIG_H
+#define _CONFIG_H
+#include "config.h"
+#endif
+
+
+#include "cli-quotad-client.h"
+
+extern struct rpc_clnt global_quotad_rpc;
+extern struct rpc_clnt_program cli_quotad_clnt;
+
+int
+cli_quotad_submit_request (void *req, call_frame_t *frame,
+ rpc_clnt_prog_t *prog,
+ int procnum, struct iobref *iobref,
+ xlator_t *this, fop_cbk_fn_t cbkfn,
+ xdrproc_t xdrproc)
+{
+ int ret = -1;
+ int count = 0;
+ struct iovec iov = {0, };
+ struct iobuf *iobuf = NULL;
+ char new_iobref = 0;
+ ssize_t xdr_size = 0;
+
+ GF_ASSERT (this);
+
+ if (req) {
+ xdr_size = xdr_sizeof (xdrproc, req);
+ iobuf = iobuf_get2 (this->ctx->iobuf_pool, xdr_size);
+ if (!iobuf) {
+ goto out;
+ };
+
+ if (!iobref) {
+ iobref = iobref_new ();
+ if (!iobref) {
+ goto out;
+ }
+
+ new_iobref = 1;
+ }
+
+ iobref_add (iobref, iobuf);
+
+ iov.iov_base = iobuf->ptr;
+ iov.iov_len = iobuf_size (iobuf);
+
+ /* Create the xdr payload */
+ ret = xdr_serialize_generic (iov, req, xdrproc);
+ if (ret == -1) {
+ goto out;
+ }
+ iov.iov_len = ret;
+ count = 1;
+ }
+
+ /* Send the msg */
+ ret = rpc_clnt_submit (&global_quotad_rpc, prog, procnum, cbkfn,
+ &iov, count,
+ NULL, 0, iobref, frame, NULL, 0, NULL, 0, NULL);
+ ret = 0;
+
+out:
+ if (new_iobref)
+ iobref_unref (iobref);
+ if (iobuf)
+ iobuf_unref (iobuf);
+
+ return ret;
+}
+
+int
+cli_quotad_notify (struct rpc_clnt *rpc, void *mydata,
+ rpc_clnt_event_t event, void *data)
+{
+ xlator_t *this = NULL;
+ int ret = 0;
+
+ this = mydata;
+
+ switch (event) {
+ case RPC_CLNT_CONNECT:
+ {
+ gf_log (this->name, GF_LOG_TRACE, "got RPC_CLNT_CONNECT");
+ break;
+ }
+
+ case RPC_CLNT_DISCONNECT:
+ {
+ gf_log (this->name, GF_LOG_TRACE, "got RPC_CLNT_DISCONNECT");
+ break;
+ }
+
+ default:
+ gf_log (this->name, GF_LOG_TRACE,
+ "got some other RPC event %d", event);
+ ret = 0;
+ break;
+ }
+
+ return ret;
+}
+
+struct rpc_clnt *
+cli_quotad_clnt_init (xlator_t *this, dict_t *options)
+{
+ struct rpc_clnt *rpc = NULL;
+ int ret = -1;
+
+
+ ret = dict_set_str (options, "transport.address-family", "unix");
+ if (ret)
+ goto out;
+
+ ret = dict_set_str (options, "transport-type", "socket");
+ if (ret)
+ goto out;
+
+ ret = dict_set_str (options, "transport.socket.connect-path",
+ "/tmp/quotad.socket");
+ if (ret)
+ goto out;
+
+ rpc = rpc_clnt_new (options, this->ctx, this->name, 16);
+ if (!rpc)
+ goto out;
+
+ ret = rpc_clnt_register_notify (rpc, cli_quotad_notify, this);
+ if (ret) {
+ gf_log ("cli", GF_LOG_ERROR, "failed to register notify");
+ goto out;
+ }
+
+ rpc_clnt_start (rpc);
+out:
+ if (ret) {
+ if (rpc)
+ rpc_clnt_unref (rpc);
+ rpc = NULL;
+ }
+
+ return rpc;
+}
+
diff --git a/cli/src/cli-quotad-client.h b/cli/src/cli-quotad-client.h
new file mode 100644
index 00000000000..aa0b42af38d
--- /dev/null
+++ b/cli/src/cli-quotad-client.h
@@ -0,0 +1,33 @@
+/*
+ Copyright (c) 2010-2013 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+#include "cli.h"
+#include "compat-errno.h"
+#include "compat.h"
+#include "cli-cmd.h"
+#include "cli1-xdr.h"
+#include "xdr-generic.h"
+#include "protocol-common.h"
+#include "cli-mem-types.h"
+
+
+int
+cli_quotad_submit_request (void *req, call_frame_t *frame,
+ rpc_clnt_prog_t *prog,
+ int procnum, struct iobref *iobref,
+ xlator_t *this, fop_cbk_fn_t cbkfn,
+ xdrproc_t xdrproc);
+
+struct rpc_clnt *
+cli_quotad_clnt_init (xlator_t *this, dict_t *options);
+
+int
+cli_quotad_notify (struct rpc_clnt *rpc, void *mydata,
+ rpc_clnt_event_t event, void *data);
+
diff --git a/cli/src/cli-rpc-ops.c b/cli/src/cli-rpc-ops.c
index 208f7a281f0..07c081affcc 100644
--- a/cli/src/cli-rpc-ops.c
+++ b/cli/src/cli-rpc-ops.c
@@ -37,9 +37,13 @@
#include "syscall.h"
#include "glusterfs3.h"
#include "portmap-xdr.h"
+#include "byte-order.h"
+#include "cli-quotad-client.h"
#include "run.h"
+extern struct rpc_clnt *global_quotad_rpc;
+extern rpc_clnt_prog_t cli_quotad_clnt;
extern rpc_clnt_prog_t *cli_rpc_prog;
extern int cli_op_ret;
extern int connected;
@@ -2281,137 +2285,346 @@ out:
return ret;
}
-int32_t
-gf_cli_print_limit_list (char *volname, char *limit_list,
- char *op_errstr)
+static int
+print_quota_list_output (char *mountdir, char *default_sl, char *path)
{
- int64_t size = 0;
- int64_t limit_value = 0;
- int32_t i, j;
- int32_t len = 0, ret = -1;
- char *size_str = NULL;
- char path [PATH_MAX] = {0, };
- char ret_str [1024] = {0, };
- char value [1024] = {0, };
- char mountdir [] = "/tmp/mntXXXXXX";
- char abspath [PATH_MAX] = {0, };
- char *colon_ptr = NULL;
- runner_t runner = {0,};
+ uint64_t used_space = 0;
+ uint64_t avail = 0;
+ char *used_str = NULL;
+ char *avail_str = NULL;
+ int ret = -1;
+ char *sl_final = NULL;
+ char percent_str[20] = {0,};
+ char *hl_str = NULL;
+
+ struct quota_limit {
+ int64_t hl;
+ int64_t sl;
+ } __attribute__ ((__packed__)) existing_limits;
+
+ ret = sys_lgetxattr (mountdir, "trusted.glusterfs.quota.limit-set",
+ (void *)&existing_limits,
+ sizeof (existing_limits));
+ if (ret < 0) {
+ gf_log ("cli", GF_LOG_ERROR, "Failed to get the xattr "
+ "trusted.glusterfs.quota.limit-set on %s. Reason : %s",
+ mountdir, strerror (errno));
+ goto out;
+ }
- GF_VALIDATE_OR_GOTO ("cli", volname, out);
- GF_VALIDATE_OR_GOTO ("cli", limit_list, out);
+ existing_limits.hl = ntoh64 (existing_limits.hl);
+ existing_limits.sl = ntoh64 (existing_limits.sl);
- if (!connected)
+ hl_str = gf_uint64_2human_readable (existing_limits.hl);
+
+ if (existing_limits.sl < 0) {
+ sl_final = default_sl;
+ } else {
+ snprintf (percent_str, sizeof (percent_str), "%"PRIu64"%%",
+ existing_limits.sl);
+ sl_final = percent_str;
+ }
+
+ ret = sys_lgetxattr (mountdir, "trusted.glusterfs.quota.size",
+ &used_space, sizeof (used_space));
+
+ if (ret < 0) {
+ cli_out ("%-40s %7s %9s %11s %7s", path, hl_str, sl_final,
+ "N/A", "N/A");
+ } else {
+ used_space = ntoh64 (used_space);
+
+ used_str = gf_uint64_2human_readable (used_space);
+
+ if (existing_limits.hl > used_space)
+ avail = existing_limits.hl - used_space;
+ else
+ avail = 0;
+
+ avail_str = gf_uint64_2human_readable (avail);
+ if (used_str == NULL)
+ cli_out ("%-40s %7s %9s %11"PRIu64
+ "%9"PRIu64, path, hl_str,
+ sl_final, used_space, avail);
+ else
+ cli_out ("%-40s %7s %9s %11s %7s", path, hl_str,
+ sl_final, used_str, avail_str);
+ }
+
+out:
+ GF_FREE (used_str);
+ GF_FREE (avail_str);
+ GF_FREE (hl_str);
+ return ret;
+}
+
+int
+gf_cli_print_limit_list_from_dict (char *volname, dict_t *dict,
+ char *default_sl, int count, char *op_errstr)
+{
+ int ret = -1;
+ int i = 0;
+ char key[1024] = {0,};
+ char mountdir[PATH_MAX] = {0,};
+ char *path = NULL;
+
+ if (!dict|| count <= 0)
goto out;
- len = strlen (limit_list);
- if (len == 0) {
- cli_err ("%s", op_errstr?op_errstr:"quota limit not set ");
+ /*To-Do:
+ * Proper error reporting to handle the case where none of the given
+ * path arguments are present or have their limits set.
+ */
+
+ cli_out (" Path Hard-limit "
+ "Soft-limit Used Available");
+ cli_out ("-----------------------------------------------------"
+ "---------------------------");
+
+ while (count--) {
+ snprintf (key, sizeof (key), "path%d", i++);
+
+ ret = dict_get_str (dict, key, &path);
+ if (ret < 0) {
+ gf_log ("cli", GF_LOG_DEBUG, "Path not present in limit"
+ " list");
+ continue;
+ }
+
+ ret = gf_canonicalize_path (path);
+ if (ret)
+ goto out;
+ snprintf (mountdir, sizeof (mountdir), "/tmp/%s%s", volname,
+ path);
+
+ ret = print_quota_list_output (mountdir, default_sl, path);
+
+ }
+out:
+ return ret;
+}
+
+int
+print_quota_list_from_quotad (call_frame_t *frame, dict_t *rsp_dict)
+{
+ int64_t used_space = 0;
+ int64_t avail = 0;
+ int64_t *limit = NULL;
+ char *used_str = NULL;
+ char *avail_str = NULL;
+ char percent_str[20]= {0};
+ char *hl_str = NULL;
+ char *sl_final = NULL;
+ char *path = NULL;
+ char *default_sl = NULL;
+ int ret = -1;
+ cli_local_t *local = NULL;
+ dict_t *gd_rsp_dict = NULL;
+
+ local = frame->local;
+ gd_rsp_dict = local->dict;
+
+ struct quota_limit {
+ int64_t hl;
+ int64_t sl;
+ } __attribute__ ((__packed__)) *existing_limits = NULL;
+
+ ret = dict_get_str (rsp_dict, GET_ANCESTRY_PATH_KEY, &path);
+ if (ret) {
+ gf_log ("cli", GF_LOG_WARNING, "path key is not present "
+ "in dict");
goto out;
}
- if (mkdtemp (mountdir) == NULL) {
- gf_log ("cli", GF_LOG_WARNING, "failed to create a temporary "
- "mount directory");
- ret = -1;
+ ret = dict_get_bin (rsp_dict, QUOTA_LIMIT_KEY, (void**)&limit);
+ if (ret) {
+ gf_log ("cli", GF_LOG_WARNING,
+ "limit key not present in dict");
goto out;
}
- /* Mount a temporary client to fetch the disk usage
- * of the directory on which the limit is set.
- */
- ret = runcmd (SBIN_DIR"/glusterfs", "-s",
- "localhost", "--volfile-id", volname, "-l",
- DEFAULT_LOG_FILE_DIRECTORY"/quota-list.log",
- mountdir, NULL);
+ ret = dict_get_str (gd_rsp_dict, "default-soft-limit", &default_sl);
if (ret) {
- gf_log ("cli", GF_LOG_WARNING, "failed to mount glusterfs client");
- ret = -1;
- goto rm_dir;
+ gf_log (frame->this->name, GF_LOG_ERROR, "failed to "
+ "get default soft limit");
+ goto out;
}
+ existing_limits = (struct quota_limit *)limit;
+ existing_limits->hl = ntoh64 (existing_limits->hl);
+ existing_limits->sl = ntoh64 (existing_limits->sl);
- len = strlen (limit_list);
- if (len == 0) {
- cli_err ("quota limit not set ");
- goto unmount;
+ hl_str = gf_uint64_2human_readable (existing_limits->hl);
+
+ if (existing_limits->sl < 0) {
+ sl_final = default_sl;
+ } else {
+ snprintf (percent_str, sizeof (percent_str), "%"PRIu64"%%",
+ existing_limits->sl);
+ sl_final = percent_str;
}
- i = 0;
+ ret = dict_get_bin (rsp_dict, QUOTA_SIZE_KEY, (void**)&limit);
+ if (ret < 0) {
+ gf_log ("cli", GF_LOG_WARNING,
+ "size key not present in dict");
+ cli_out ("%-40s %7s %9s %11s %7s", path, hl_str, sl_final,
+ "N/A", "N/A");
+ } else {
+ used_space = *limit;
+ used_space = ntoh64 (used_space);
+ used_str = gf_uint64_2human_readable (used_space);
- cli_out ("\tpath\t\t limit_set\t size");
- cli_out ("-----------------------------------------------------------"
- "-----------------------");
- while (i < len) {
- j = 0;
+ if (existing_limits->hl > used_space)
+ avail = existing_limits->hl - used_space;
+ else
+ avail = 0;
- while (limit_list [i] != ',' && limit_list [i] != '\0') {
- path [j++] = limit_list[i++];
- }
- path [j] = '\0';
- //here path[] contains both path and limit value
+ avail_str = gf_uint64_2human_readable (avail);
+ if (used_str == NULL)
+ cli_out ("%-40s %7s %9s %11"PRIu64
+ "%9"PRIu64, path, hl_str,
+ sl_final, used_space, avail);
+ else
+ cli_out ("%-40s %7s %9s %11s %7s", path, hl_str,
+ sl_final, used_str, avail_str);
+ }
+
+ ret = 0;
+out:
+ GF_FREE (used_str);
+ GF_FREE (avail_str);
+ GF_FREE (hl_str);
+ return ret;
+}
+
+int
+cli_quotad_getlimit_cbk (struct rpc_req *req, struct iovec *iov,
+ int count, void *myframe)
+{
+ //TODO: we need to gather the path, hard-limit, soft-limit and used space
+ gf_cli_rsp rsp = {0,};
+ int ret = -1;
+ dict_t *dict = NULL;
+ call_frame_t *frame = NULL;
- colon_ptr = strrchr (path, ':');
- *colon_ptr = '\0';
- strcpy (value, ++colon_ptr);
+ if (-1 == req->rpc_status) {
+ goto out;
+ }
- snprintf (abspath, sizeof (abspath), "%s/%s", mountdir, path);
+ frame = myframe;
+
+ ret = xdr_to_generic (*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp);
+ if (ret < 0) {
+ gf_log (frame->this->name, GF_LOG_ERROR,
+ "Failed to decode xdr response");
+ goto out;
+ }
+
+ if (rsp.op_ret && strcmp (rsp.op_errstr, "") == 0) {
+ cli_err ("quota command : failed");
+ goto out;
+
+ } else if (strcmp (rsp.op_errstr, ""))
+ cli_err ("quota command failed : %s", rsp.op_errstr);
+
+ if (rsp.dict.dict_len) {
+ /* Unserialize the dictionary */
+ dict = dict_new ();
- ret = sys_lgetxattr (abspath, "trusted.limit.list", (void *) ret_str, 4096);
+ ret = dict_unserialize (rsp.dict.dict_val,
+ rsp.dict.dict_len,
+ &dict);
if (ret < 0) {
- cli_out ("%-20s %10s", path, value);
- } else {
- sscanf (ret_str, "%"PRId64",%"PRId64, &size,
- &limit_value);
- size_str = gf_uint64_2human_readable ((uint64_t) size);
- if (size_str == NULL) {
- cli_out ("%-20s %10s %20"PRId64, path,
- value, size);
- } else {
- cli_out ("%-20s %10s %20s", path,
- value, size_str);
- GF_FREE (size_str);
- }
+ gf_log ("cli", GF_LOG_ERROR,
+ "failed to "
+ "unserialize req-buffer to dictionary");
+ goto out;
}
- i++;
+ print_quota_list_from_quotad (frame, dict);
}
-unmount:
+out:
+ cli_cmd_broadcast_response (ret);
+ if (dict)
+ dict_unref (dict);
- runinit (&runner);
- runner_add_args (&runner, "umount",
-#if GF_LINUX_HOST_OS
- "-l",
-#endif
- mountdir, NULL);
- ret = runner_run_reuse (&runner);
- if (ret)
- runner_log (&runner, "cli", GF_LOG_WARNING, "error executing");
- runner_end (&runner);
+ free (rsp.dict.dict_val);
+ return ret;
+}
+
+int
+cli_quotad_getlimit (call_frame_t *frame, xlator_t *this, void *data)
+{
+ gf_cli_req req = {{0,}};
+ int ret = 0;
+ dict_t *dict = NULL;
+
+ if (!frame || !this || !data) {
+ ret = -1;
+ goto out;
+ }
+
+ dict = data;
+ ret = dict_allocate_and_serialize (dict, &req.dict.dict_val,
+ &req.dict.dict_len);
+ if (ret < 0) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "failed to serialize the data");
+
+ goto out;
+ }
+
+ ret = cli_cmd_submit (global_quotad_rpc, &req, frame, &cli_quotad_clnt,
+ GF_AGGREGATOR_GETLIMIT, NULL,
+ this, cli_quotad_getlimit_cbk,
+ (xdrproc_t) xdr_gf_cli_req);
-rm_dir:
- rmdir (mountdir);
out:
+ gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret);
return ret;
+
+
+}
+
+void
+gf_cli_quota_list (char *volname, dict_t *dict, int count, char *op_errstr,
+ char *default_sl)
+{
+ GF_VALIDATE_OR_GOTO ("cli", volname, out);
+
+ if (!connected)
+ goto out;
+
+ if (count > 0)
+ gf_cli_print_limit_list_from_dict (volname, dict, default_sl,
+ count, op_errstr);
+out:
+ return;
}
int
gf_cli_quota_cbk (struct rpc_req *req, struct iovec *iov,
int count, void *myframe)
{
- gf_cli_rsp rsp = {0,};
- int ret = -1;
- dict_t *dict = NULL;
- char *volname = NULL;
- char *limit_list = NULL;
- int32_t type = 0;
- char msg[1024] = {0,};
- call_frame_t *frame = NULL;
-
+ gf_cli_rsp rsp = {0,};
+ int ret = -1;
+ dict_t *dict = NULL;
+ char *volname = NULL;
+ int32_t type = 0;
+ call_frame_t *frame = NULL;
+ char *default_sl = NULL;
+ char *limit_list = NULL;
+ cli_local_t *local = NULL;
+ dict_t *aggr = NULL;
+ char *default_sl_dup = NULL;
+ int32_t entry_count = 0;
if (-1 == req->rpc_status) {
goto out;
}
frame = myframe;
+ local = frame->local;
+ aggr = local->dict;
ret = xdr_to_generic (*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp);
if (ret < 0) {
@@ -2420,15 +2633,14 @@ gf_cli_quota_cbk (struct rpc_req *req, struct iovec *iov,
goto out;
}
- if (rsp.op_ret &&
- strcmp (rsp.op_errstr, "") == 0) {
- snprintf (msg, sizeof (msg), "command unsuccessful %s",
- rsp.op_errstr);
+ if (rsp.op_ret && strcmp (rsp.op_errstr, "") == 0) {
+ cli_err ("quota command : failed");
if (global_state->mode & GLUSTER_MODE_XML)
goto xml_output;
goto out;
- }
+ } else if (strcmp (rsp.op_errstr, ""))
+ cli_err ("quota command failed : %s", rsp.op_errstr);
if (rsp.dict.dict_len) {
/* Unserialize the dictionary */
@@ -2438,28 +2650,51 @@ gf_cli_quota_cbk (struct rpc_req *req, struct iovec *iov,
rsp.dict.dict_len,
&dict);
if (ret < 0) {
- gf_log ("glusterd", GF_LOG_ERROR,
+ gf_log ("cli", GF_LOG_ERROR,
"failed to "
"unserialize req-buffer to dictionary");
goto out;
}
}
+ gf_log ("cli", GF_LOG_DEBUG, "Received resp to quota command");
+
ret = dict_get_str (dict, "volname", &volname);
if (ret)
- gf_log (frame->this->name, GF_LOG_TRACE,
+ gf_log (frame->this->name, GF_LOG_ERROR,
"failed to get volname");
- ret = dict_get_str (dict, "limit_list", &limit_list);
+ ret = dict_get_str (dict, "default-soft-limit", &default_sl);
if (ret)
- gf_log (frame->this->name, GF_LOG_TRACE,
- "failed to get limit_list");
+ gf_log (frame->this->name, GF_LOG_TRACE, "failed to get "
+ "default soft limit");
+
+ // default-soft-limit is part of rsp_dict only iff we sent
+ // GLUSTER_CLI_QUOTA with type being GF_QUOTA_OPTION_TYPE_LIST
+ if (default_sl) {
+ default_sl_dup = gf_strdup (default_sl);
+ if (!default_sl_dup) {
+ ret = -1;
+ goto out;
+ }
+ ret = dict_set_dynstr (aggr, "default-soft-limit",
+ default_sl_dup);
+ if (ret) {
+ gf_log (frame->this->name, GF_LOG_TRACE,
+ "failed to set default soft limit");
+ GF_FREE (default_sl_dup);
+ }
+ }
ret = dict_get_int32 (dict, "type", &type);
if (ret)
gf_log (frame->this->name, GF_LOG_TRACE,
"failed to get type");
+ ret = dict_get_int32 (dict, "count", &entry_count);
+ if (ret)
+ gf_log (frame->this->name, GF_LOG_TRACE, "failed to get count");
+
if (type == GF_QUOTA_OPTION_TYPE_LIST) {
if (global_state->mode & GLUSTER_MODE_XML) {
ret = cli_xml_output_vol_quota_limit_list
@@ -2469,31 +2704,15 @@ gf_cli_quota_cbk (struct rpc_req *req, struct iovec *iov,
gf_log ("cli", GF_LOG_ERROR,
"Error outputting to xml");
goto out;
-
}
- if (limit_list) {
- gf_cli_print_limit_list (volname,
- limit_list,
- rsp.op_errstr);
- } else {
- gf_log ("cli", GF_LOG_INFO, "Received resp to quota "
- "command ");
- if (rsp.op_errstr)
- snprintf (msg, sizeof (msg), "%s",
- rsp.op_errstr);
- }
- } else {
- gf_log ("cli", GF_LOG_INFO, "Received resp to quota command ");
- if (rsp.op_errstr)
- snprintf (msg, sizeof (msg), "%s", rsp.op_errstr);
- else
- snprintf (msg, sizeof (msg), "successful");
+ gf_cli_quota_list (volname, dict, entry_count, rsp.op_errstr,
+ default_sl);
}
xml_output:
if (global_state->mode & GLUSTER_MODE_XML) {
- ret = cli_xml_output_str ("volQuota", msg, rsp.op_ret,
+ ret = cli_xml_output_str ("volQuota", NULL, rsp.op_ret,
rsp.op_errno, rsp.op_errstr);
if (ret)
gf_log ("cli", GF_LOG_ERROR,
@@ -2501,12 +2720,8 @@ xml_output:
goto out;
}
- if (strlen (msg) > 0) {
- if (rsp.op_ret)
- cli_err ("%s", msg);
- else
- cli_out ("%s", msg);
- }
+ if (!rsp.op_ret && type != GF_QUOTA_OPTION_TYPE_LIST)
+ cli_out ("volume quota : success");
ret = rsp.op_ret;
out:
@@ -2700,7 +2915,7 @@ gf_cli_list_friends (call_frame_t *frame, xlator_t *this,
flags = (long)data;
req.flags = flags;
frame->local = (void*)flags;
- ret = cli_cmd_submit (&req, frame, cli_rpc_prog,
+ ret = cli_cmd_submit (NULL, &req, frame, cli_rpc_prog,
GLUSTER_CLI_LIST_FRIENDS, NULL,
this, gf_cli_list_friends_cbk,
(xdrproc_t) xdr_gf1_cli_peer_list_req);
@@ -2814,7 +3029,7 @@ gf_cli_get_volume (call_frame_t *frame, xlator_t *this,
ret = dict_allocate_and_serialize (dict, &req.dict.dict_val,
&req.dict.dict_len);
- ret = cli_cmd_submit (&req, frame, cli_rpc_prog,
+ ret = cli_cmd_submit (NULL, &req, frame, cli_rpc_prog,
GLUSTER_CLI_GET_VOLUME, NULL,
this, gf_cli_get_volume_cbk,
(xdrproc_t) xdr_gf_cli_req);
@@ -3033,7 +3248,7 @@ gf_cli_rename_volume (call_frame_t *frame, xlator_t *this,
}
- ret = cli_cmd_submit (&req, frame, cli_rpc_prog,
+ ret = cli_cmd_submit (NULL, &req, frame, cli_rpc_prog,
GLUSTER_CLI_RENAME_VOLUME, NULL,
this, gf_cli_rename_volume_cbk,
(xdrproc_t) xdr_gf_cli_req);
@@ -3385,7 +3600,7 @@ gf_cli_getspec (call_frame_t *frame, xlator_t *this,
goto out;
}
- ret = cli_cmd_submit (&req, frame, &cli_handshake_prog,
+ ret = cli_cmd_submit (NULL, &req, frame, &cli_handshake_prog,
GF_HNDSK_GETSPEC, NULL,
this, gf_cli_getspec_cbk,
(xdrproc_t) xdr_gf_getspec_req);
@@ -3442,7 +3657,7 @@ gf_cli_pmap_b2p (call_frame_t *frame, xlator_t *this, void *data)
if (ret)
goto out;
- ret = cli_cmd_submit (&req, frame, &cli_pmap_prog,
+ ret = cli_cmd_submit (NULL, &req, frame, &cli_pmap_prog,
GF_PMAP_PORTBYBRICK, NULL,
this, gf_cli_pmap_b2p_cbk,
(xdrproc_t) xdr_pmap_port_by_brick_req);
@@ -3557,7 +3772,7 @@ gf_cli_fsm_log (call_frame_t *frame, xlator_t *this, void *data)
if (!frame || !this || !data)
goto out;
req.name = data;
- ret = cli_cmd_submit (&req, frame, cli_rpc_prog,
+ ret = cli_cmd_submit (NULL, &req, frame, cli_rpc_prog,
GLUSTER_CLI_FSM_LOG, NULL,
this, gf_cli_fsm_log_cbk,
(xdrproc_t) xdr_gf1_cli_fsm_log_req);
@@ -5125,7 +5340,7 @@ gf_cli_getwd (call_frame_t *frame, xlator_t *this, void *data)
if (!frame || !this)
goto out;
- ret = cli_cmd_submit (&req, frame, cli_rpc_prog,
+ ret = cli_cmd_submit (NULL, &req, frame, cli_rpc_prog,
GLUSTER_CLI_GETWD, NULL,
this, gf_cli_getwd_cbk,
(xdrproc_t) xdr_gf1_cli_getwd_req);
@@ -6329,7 +6544,8 @@ gf_cli_status_cbk (struct rpc_req *req, struct iovec *iov,
goto out;
}
- if ((cmd & GF_CLI_STATUS_NFS) || (cmd & GF_CLI_STATUS_SHD))
+ if ((cmd & GF_CLI_STATUS_NFS) || (cmd & GF_CLI_STATUS_SHD) ||
+ (cmd & GF_CLI_STATUS_QUOTAD))
notbrick = _gf_true;
if (global_state->mode & GLUSTER_MODE_XML) {
@@ -6444,7 +6660,8 @@ gf_cli_status_cbk (struct rpc_req *req, struct iovec *iov,
*/
memset (status.brick, 0, PATH_MAX + 255);
if (!strcmp (hostname, "NFS Server") ||
- !strcmp (hostname, "Self-heal Daemon"))
+ !strcmp (hostname, "Self-heal Daemon") ||
+ !strcmp (hostname, "Quota Daemon"))
snprintf (status.brick, PATH_MAX + 255, "%s on %s",
hostname, path);
else
@@ -6694,7 +6911,7 @@ gf_cli_mount (call_frame_t *frame, xlator_t *this, void *data)
goto out;
}
- ret = cli_cmd_submit (&req, frame, cli_rpc_prog,
+ ret = cli_cmd_submit (NULL, &req, frame, cli_rpc_prog,
GLUSTER_CLI_MOUNT, NULL,
this, gf_cli_mount_cbk,
(xdrproc_t)xdr_gf1_cli_mount_req);
@@ -6757,7 +6974,7 @@ gf_cli_umount (call_frame_t *frame, xlator_t *this, void *data)
goto out;
}
- ret = cli_cmd_submit (&req, frame, cli_rpc_prog,
+ ret = cli_cmd_submit (NULL, &req, frame, cli_rpc_prog,
GLUSTER_CLI_UMOUNT, NULL,
this, gf_cli_umount_cbk,
(xdrproc_t)xdr_gf1_cli_umount_req);
@@ -7326,7 +7543,7 @@ gf_cli_list_volume (call_frame_t *frame, xlator_t *this, void *data)
if (!frame || !this)
goto out;
- ret = cli_cmd_submit (&req, frame, cli_rpc_prog,
+ ret = cli_cmd_submit (NULL, &req, frame, cli_rpc_prog,
GLUSTER_CLI_LIST_VOLUME, NULL,
this, gf_cli_list_volume_cbk,
(xdrproc_t)xdr_gf_cli_req);
@@ -7499,7 +7716,7 @@ cli_to_glusterd (gf_cli_req *req, call_frame_t *frame,
goto out;
}
- ret = cli_cmd_submit (req, frame, prog, procnum, iobref, this,
+ ret = cli_cmd_submit (NULL, req, frame, prog, procnum, iobref, this,
cbkfn, (xdrproc_t) xdrproc);
out:
@@ -7556,3 +7773,17 @@ struct rpc_clnt_program cli_prog = {
.numproc = GLUSTER_CLI_MAXVALUE,
.proctable = gluster_cli_actors,
};
+
+struct rpc_clnt_procedure cli_quotad_procs[GF_AGGREGATOR_MAXVALUE] = {
+ [GF_AGGREGATOR_NULL] = {"NULL", NULL},
+ [GF_AGGREGATOR_LOOKUP] = {"LOOKUP", NULL},
+ [GF_AGGREGATOR_GETLIMIT] = {"GETLIMIT", cli_quotad_getlimit},
+};
+
+struct rpc_clnt_program cli_quotad_clnt = {
+ .progname = "CLI Quotad client",
+ .prognum = GLUSTER_AGGREGATOR_PROGRAM,
+ .progver = GLUSTER_AGGREGATOR_VERSION,
+ .numproc = GF_AGGREGATOR_MAXVALUE,
+ .proctable = cli_quotad_procs,
+};
diff --git a/cli/src/cli.c b/cli/src/cli.c
index 91b315ff169..67f1ad25793 100644
--- a/cli/src/cli.c
+++ b/cli/src/cli.c
@@ -45,6 +45,7 @@
#endif
#include "cli.h"
+#include "cli-quotad-client.h"
#include "cli-cmd.h"
#include "cli-mem-types.h"
@@ -82,6 +83,7 @@ const char *argp_program_bug_address = "<" PACKAGE_BUGREPORT ">";
+struct rpc_clnt *global_quotad_rpc;
struct rpc_clnt *global_rpc;
rpc_clnt_prog_t *cli_rpc_prog;
@@ -184,7 +186,7 @@ logging_init (glusterfs_ctx_t *ctx, struct cli_state *state)
}
int
-cli_submit_request (void *req, call_frame_t *frame,
+cli_submit_request (struct rpc_clnt *rpc, void *req, call_frame_t *frame,
rpc_clnt_prog_t *prog,
int procnum, struct iobref *iobref,
xlator_t *this, fop_cbk_fn_t cbkfn, xdrproc_t xdrproc)
@@ -229,8 +231,10 @@ cli_submit_request (void *req, call_frame_t *frame,
count = 1;
}
+ if (!rpc)
+ rpc = global_rpc;
/* Send the msg */
- ret = rpc_clnt_submit (global_rpc, prog, procnum, cbkfn,
+ ret = rpc_clnt_submit (rpc, prog, procnum, cbkfn,
&iov, count,
NULL, 0, iobref, frame, NULL, 0, NULL, 0, NULL);
ret = 0;
@@ -491,6 +495,42 @@ _cli_out (const char *fmt, ...)
}
struct rpc_clnt *
+cli_quotad_clnt_rpc_init (void)
+{
+ struct rpc_clnt *rpc = NULL;
+ dict_t *rpc_opts = NULL;
+ int ret = -1;
+
+ rpc_opts = dict_new ();
+ if (!rpc_opts) {
+ ret = -1;
+ goto out;
+ }
+
+ ret = dict_set_str (rpc_opts, "transport.address-family", "unix");
+ if (ret)
+ goto out;
+
+ ret = dict_set_str (rpc_opts, "transport-type", "socket");
+ if (ret)
+ goto out;
+
+ ret = dict_set_str (rpc_opts, "transport.socket.connect-path",
+ "/tmp/quotad.socket");
+ if (ret)
+ goto out;
+
+ rpc = cli_quotad_clnt_init (THIS, rpc_opts);
+ if (!rpc)
+ goto out;
+
+ global_quotad_rpc = rpc;
+out:
+ dict_unref (rpc_opts);
+ return rpc;
+}
+
+struct rpc_clnt *
cli_rpc_init (struct cli_state *state)
{
struct rpc_clnt *rpc = NULL;
@@ -634,6 +674,10 @@ main (int argc, char *argv[])
if (!global_rpc)
goto out;
+ global_quotad_rpc = cli_quotad_clnt_rpc_init ();
+ if (!global_quotad_rpc)
+ goto out;
+
ret = cli_cmds_register (&state);
if (ret)
goto out;
diff --git a/cli/src/cli.h b/cli/src/cli.h
index bc71ee2b4f0..b71140a810b 100644
--- a/cli/src/cli.h
+++ b/cli/src/cli.h
@@ -45,6 +45,13 @@ enum argp_option_keys {
#define GLUSTER_MODE_SCRIPT (1 << 0)
#define GLUSTER_MODE_ERR_FATAL (1 << 1)
#define GLUSTER_MODE_XML (1 << 2)
+
+
+#define GLUSTERFS_GET_AUX_MOUNT_PIDFILE(pidfile,volname) { \
+ snprintf (pidfile, PATH_MAX-1, \
+ DEFAULT_VAR_RUN_DIRECTORY"/%s.pid", volname); \
+ }
+
struct cli_state;
struct cli_cmd_word;
struct cli_cmd_tree;
@@ -212,7 +219,7 @@ int _cli_err (const char *fmt, ...);
} while (0)
int
-cli_submit_request (void *req, call_frame_t *frame,
+cli_submit_request (struct rpc_clnt *rpc, void *req, call_frame_t *frame,
rpc_clnt_prog_t *prog,
int procnum, struct iobref *iobref,
xlator_t *this, fop_cbk_fn_t cbkfn, xdrproc_t xdrproc);
diff --git a/glusterfsd/src/glusterfsd-mgmt.c b/glusterfsd/src/glusterfsd-mgmt.c
index f4445acf6dd..bbfc5059361 100644
--- a/glusterfsd/src/glusterfsd-mgmt.c
+++ b/glusterfsd/src/glusterfsd-mgmt.c
@@ -912,6 +912,9 @@ glusterfs_handle_node_status (rpcsvc_request_t *req)
ret = gf_asprintf (&node_name, "%s", "nfs-server");
else if ((cmd & GF_CLI_STATUS_SHD) != 0)
ret = gf_asprintf (&node_name, "%s", "glustershd");
+ else if ((cmd & GF_CLI_STATUS_QUOTAD) != 0)
+ ret = gf_asprintf (&node_name, "%s", "quotad");
+
else {
ret = -1;
goto out;
@@ -934,6 +937,8 @@ glusterfs_handle_node_status (rpcsvc_request_t *req)
ret = gf_asprintf (&subvol_name, "%s", volname);
else if ((cmd & GF_CLI_STATUS_SHD) != 0)
ret = gf_asprintf (&subvol_name, "%s-replicate-0", volname);
+ else if ((cmd & GF_CLI_STATUS_QUOTAD) != 0)
+ ret = gf_asprintf (&subvol_name, "%s", volname);
else {
ret = -1;
goto out;
diff --git a/libglusterfs/src/common-utils.c b/libglusterfs/src/common-utils.c
index 4af1b445433..cb51dd17537 100644
--- a/libglusterfs/src/common-utils.c
+++ b/libglusterfs/src/common-utils.c
@@ -3063,3 +3063,51 @@ gf_get_hard_limit (char *limit, char **hard_limit)
return 0;
}
+
+int
+gf_skip_header_section (int fd, int header_len)
+{
+ int ret = -1;
+
+ ret = lseek (fd, header_len, SEEK_SET);
+ if (ret == (off_t) -1) {
+ gf_log ("", GF_LOG_ERROR, "Failed to skip header "
+ "section");
+ } else {
+ ret = 0;
+ }
+
+ return ret;
+}
+
+gf_boolean_t
+gf_is_service_running (char *pidfile, int *pid)
+{
+ FILE *file = NULL;
+ gf_boolean_t running = _gf_false;
+ int ret = 0;
+ int fno = 0;
+
+ file = fopen (pidfile, "r+");
+ if (!file)
+ goto out;
+
+ fno = fileno (file);
+ ret = lockf (fno, F_TEST, 0);
+ if (ret == -1)
+ running = _gf_true;
+ if (!pid)
+ goto out;
+
+ ret = fscanf (file, "%d", pid);
+ if (ret <= 0) {
+ gf_log ("", GF_LOG_ERROR, "Unable to read pidfile: %s, %s",
+ pidfile, strerror (errno));
+ *pid = -1;
+ }
+
+out:
+ if (file)
+ fclose (file);
+ return running;
+}
diff --git a/libglusterfs/src/common-utils.h b/libglusterfs/src/common-utils.h
index acf2202d54e..b50d3ac380e 100644
--- a/libglusterfs/src/common-utils.h
+++ b/libglusterfs/src/common-utils.h
@@ -615,4 +615,8 @@ char **backtrace_symbols(void *const *, size_t);
int gf_get_soft_limit (char *limit, char **soft_limit);
int gf_get_hard_limit (char *limit, char **hard_limit);
+gf_boolean_t
+gf_is_service_running (char *pidfile, int *pid);
+int
+gf_skip_header_section (int fd, int header_len);
#endif /* _COMMON_UTILS_H */
diff --git a/rpc/xdr/src/cli1-xdr.h b/rpc/xdr/src/cli1-xdr.h
index d418fabf399..0bed637e283 100644
--- a/rpc/xdr/src/cli1-xdr.h
+++ b/rpc/xdr/src/cli1-xdr.h
@@ -92,6 +92,10 @@ enum gf_quota_type {
GF_QUOTA_OPTION_TYPE_REMOVE = 0 + 4,
GF_QUOTA_OPTION_TYPE_LIST = 0 + 5,
GF_QUOTA_OPTION_TYPE_VERSION = 0 + 6,
+ GF_QUOTA_OPTION_TYPE_ALERT_TIME = 0 + 7,
+ GF_QUOTA_OPTION_TYPE_SOFT_TIMEOUT = 0 + 8,
+ GF_QUOTA_OPTION_TYPE_HARD_TIMEOUT = 0 + 9,
+ GF_QUOTA_OPTION_TYPE_DEFAULT_SOFT_LIMIT = 0 + 10,
};
typedef enum gf_quota_type gf_quota_type;
@@ -166,6 +170,7 @@ enum gf_cli_status_type {
GF_CLI_STATUS_BRICK = 0x0400,
GF_CLI_STATUS_NFS = 0x0800,
GF_CLI_STATUS_SHD = 0x1000,
+ GF_CLI_STATUS_QUOTAD = 0x2000,
};
typedef enum gf_cli_status_type gf_cli_status_type;
diff --git a/rpc/xdr/src/cli1-xdr.x b/rpc/xdr/src/cli1-xdr.x
index cc7ca8e248f..07327c72729 100644
--- a/rpc/xdr/src/cli1-xdr.x
+++ b/rpc/xdr/src/cli1-xdr.x
@@ -51,7 +51,11 @@ enum gf_quota_type {
GF_QUOTA_OPTION_TYPE_LIMIT_USAGE,
GF_QUOTA_OPTION_TYPE_REMOVE,
GF_QUOTA_OPTION_TYPE_LIST,
- GF_QUOTA_OPTION_TYPE_VERSION
+ GF_QUOTA_OPTION_TYPE_VERSION,
+ GF_QUOTA_OPTION_TYPE_ALERT_TIME,
+ GF_QUOTA_OPTION_TYPE_SOFT_TIMEOUT,
+ GF_QUOTA_OPTION_TYPE_HARD_TIMEOUT,
+ GF_QUOTA_OPTION_TYPE_DEFAULT_SOFT_LIMIT
};
enum gf1_cli_friends_list {
@@ -107,19 +111,20 @@ enum gf1_cli_top_op {
bit-wise operations which reduces complexity */
enum gf_cli_status_type {
GF_CLI_STATUS_NONE = 0x0000,
- GF_CLI_STATUS_MEM = 0x0001, /*0000000000001*/
- GF_CLI_STATUS_CLIENTS = 0x0002, /*0000000000010*/
- GF_CLI_STATUS_INODE = 0x0004, /*0000000000100*/
- GF_CLI_STATUS_FD = 0x0008, /*0000000001000*/
- GF_CLI_STATUS_CALLPOOL = 0x0010, /*0000000010000*/
- GF_CLI_STATUS_DETAIL = 0x0020, /*0000000100000*/
+ GF_CLI_STATUS_MEM = 0x0001, /*00000000000001*/
+ GF_CLI_STATUS_CLIENTS = 0x0002, /*00000000000010*/
+ GF_CLI_STATUS_INODE = 0x0004, /*00000000000100*/
+ GF_CLI_STATUS_FD = 0x0008, /*00000000001000*/
+ GF_CLI_STATUS_CALLPOOL = 0x0010, /*00000000010000*/
+ GF_CLI_STATUS_DETAIL = 0x0020, /*00000000100000*/
GF_CLI_STATUS_TASKS = 0x0040, /*0000001000000*/
- GF_CLI_STATUS_MASK = 0x00FF, /*0000011111111 Used to get the op*/
- GF_CLI_STATUS_VOL = 0x0100, /*0000100000000*/
- GF_CLI_STATUS_ALL = 0x0200, /*0001000000000*/
- GF_CLI_STATUS_BRICK = 0x0400, /*0010000000000*/
- GF_CLI_STATUS_NFS = 0x0800, /*0100000000000*/
- GF_CLI_STATUS_SHD = 0x1000 /*1000000000000*/
+ GF_CLI_STATUS_MASK = 0x00FF, /*00000011111111 Used to get the op*/
+ GF_CLI_STATUS_VOL = 0x0100, /*00000100000000*/
+ GF_CLI_STATUS_ALL = 0x0200, /*00001000000000*/
+ GF_CLI_STATUS_BRICK = 0x0400, /*00010000000000*/
+ GF_CLI_STATUS_NFS = 0x0800, /*00100000000000*/
+ GF_CLI_STATUS_SHD = 0x1000, /*01000000000000*/
+ GF_CLI_STATUS_QUOTAD = 0x2000 /*10000000000000*/
};
struct gf_cli_req {
diff --git a/tests/bugs/bug-848251.t b/tests/bugs/bug-848251.t
index dda393272c6..844162283d8 100644
--- a/tests/bugs/bug-848251.t
+++ b/tests/bugs/bug-848251.t
@@ -19,6 +19,7 @@ TEST $CLI volume quota $V0 enable;
TEST MOUNTDIR="/tmp/$RANDOM"
TEST mkdir $MOUNTDIR
TEST glusterfs -s $H0 --volfile-id=$V0 $MOUNTDIR
+sleep 10
function set_quota(){
mkdir "$MOUNTDIR/$name"
@@ -32,17 +33,17 @@ function quota_list(){
TEST name=":d1"
#file name containing ':' in the start
TEST set_quota
-EXPECT "0Bytes" quota_list
+EXPECT "80%" quota_list
TEST name=":d1/d:1"
#file name containing ':' in between
TEST set_quota
-EXPECT "0Bytes" quota_list
+EXPECT "80%" quota_list
TEST name=":d1/d:1/d1:"
#file name containing ':' in the end
TEST set_quota
-EXPECT "0Bytes" quota_list
+EXPECT "80%" quota_list
TEST umount $MOUNTDIR
TEST rm -rf $MOUNTDIR
diff --git a/xlators/mgmt/glusterd/src/glusterd-handler.c b/xlators/mgmt/glusterd/src/glusterd-handler.c
index 740d04aa1e4..3aafa122b6a 100644
--- a/xlators/mgmt/glusterd/src/glusterd-handler.c
+++ b/xlators/mgmt/glusterd/src/glusterd-handler.c
@@ -54,6 +54,8 @@
#include <lvm2app.h>
#endif
+extern glusterd_op_info_t opinfo;
+
int glusterd_big_locked_notify (struct rpc_clnt *rpc, void *mydata,
rpc_clnt_event_t event,
void *data, rpc_clnt_notify_t notify_fn)
@@ -307,10 +309,23 @@ _build_option_key (dict_t *d, char *k, data_t *v, void *tmp)
char reconfig_key[256] = {0, };
struct args_pack *pack = NULL;
int ret = -1;
+ xlator_t *this = NULL;
+ glusterd_conf_t *priv = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+ priv = this->private;
+ GF_ASSERT (priv);
pack = tmp;
if (strcmp (k, GLUSTERD_GLOBAL_OPT_VERSION) == 0)
return 0;
+
+ if (priv->op_version > GD_OP_VERSION_MIN) {
+ if ((strcmp (k, "features.limit-usage") == 0) ||
+ (strcmp (k, "features.soft-limit") == 0))
+ return 0;
+ }
snprintf (reconfig_key, 256, "volume%d.option.%s",
pack->vol_count, k);
ret = dict_set_str (pack->dict, reconfig_key, v->data);
@@ -3468,10 +3483,13 @@ __glusterd_handle_status_volume (rpcsvc_request_t *req)
glusterd_op_t cli_op = GD_OP_STATUS_VOLUME;
char err_str[2048] = {0,};
xlator_t *this = NULL;
+ glusterd_conf_t *conf = NULL;
GF_ASSERT (req);
this = THIS;
GF_ASSERT (this);
+ conf = this->private;
+ GF_ASSERT (conf);
ret = xdr_to_generic (req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
if (ret < 0) {
@@ -3512,6 +3530,14 @@ __glusterd_handle_status_volume (rpcsvc_request_t *req)
"Received status volume req for volume %s", volname);
}
+ if ((cmd & GF_CLI_STATUS_QUOTAD) &&
+ (conf->op_version == GD_OP_VERSION_MIN)) {
+ snprintf (err_str, sizeof (err_str), "The cluster is operating "
+ "at version 1. Getting the status of quotad is not "
+ "allowed in this state.");
+ ret = -1;
+ goto out;
+ }
ret = glusterd_op_begin_synctask (req, GD_OP_STATUS_VOLUME, dict);
@@ -3810,6 +3836,7 @@ __glusterd_peer_rpc_notify (struct rpc_clnt *rpc, void *mydata,
glusterd_peerinfo_t *peerinfo = NULL;
glusterd_peerctx_t *peerctx = NULL;
gf_boolean_t quorum_action = _gf_false;
+ uuid_t uuid;
peerctx = mydata;
if (!peerctx)
@@ -3851,6 +3878,13 @@ __glusterd_peer_rpc_notify (struct rpc_clnt *rpc, void *mydata,
glusterd_friend_remove_notify (peerctx);
goto out;
}
+ glusterd_get_lock_owner (&uuid);
+ if (!uuid_is_null (uuid) &&
+ !uuid_compare (peerinfo->uuid, uuid)) {
+ glusterd_unlock (peerinfo->uuid);
+ if (opinfo.state.state != GD_OP_STATE_DEFAULT)
+ opinfo.state.state = GD_OP_STATE_DEFAULT;
+ }
peerinfo->connected = 0;
break;
diff --git a/xlators/mgmt/glusterd/src/glusterd-op-sm.c b/xlators/mgmt/glusterd/src/glusterd-op-sm.c
index 14f03a9a56a..501be66f826 100644
--- a/xlators/mgmt/glusterd/src/glusterd-op-sm.c
+++ b/xlators/mgmt/glusterd/src/glusterd-op-sm.c
@@ -888,18 +888,24 @@ glusterd_op_stage_status_volume (dict_t *dict, char **op_errstr)
if (cmd & GF_CLI_STATUS_ALL)
goto out;
+ if ((cmd & GF_CLI_STATUS_QUOTAD) &&
+ (priv->op_version == GD_OP_VERSION_MIN)) {
+ snprintf (msg, sizeof (msg), "The cluster is operating at "
+ "version 1. Getting the status of quotad is not "
+ "allowed in this state.");
+ ret = -1;
+ goto out;
+ }
+
ret = dict_get_str (dict, "volname", &volname);
if (ret) {
- gf_log (THIS->name, GF_LOG_ERROR,
- "Unable to get volume name");
+ gf_log (this->name, GF_LOG_ERROR, "Unable to get volume name");
goto out;
}
ret = glusterd_volinfo_find (volname, &volinfo);
if (ret) {
- snprintf (msg, sizeof(msg), "Volume %s does not exist",
- volname);
- gf_log (THIS->name, GF_LOG_ERROR, "%s", msg);
+ snprintf (msg, sizeof(msg), FMTSTR_CHECK_VOL_EXISTS, volname);
ret = -1;
goto out;
}
@@ -912,7 +918,6 @@ glusterd_op_stage_status_volume (dict_t *dict, char **op_errstr)
if (!ret) {
snprintf (msg, sizeof (msg), "Volume %s is not started",
volname);
- gf_log (THIS->name, GF_LOG_ERROR, "%s", msg);
ret = -1;
goto out;
}
@@ -927,7 +932,6 @@ glusterd_op_stage_status_volume (dict_t *dict, char **op_errstr)
snprintf (msg, sizeof (msg),
"NFS server is disabled for volume %s",
volname);
- gf_log (THIS->name, GF_LOG_ERROR, "%s", msg);
goto out;
}
} else if ((cmd & GF_CLI_STATUS_SHD) != 0) {
@@ -936,7 +940,6 @@ glusterd_op_stage_status_volume (dict_t *dict, char **op_errstr)
snprintf (msg, sizeof (msg),
"Volume %s is not of type replicate",
volname);
- gf_log (THIS->name, GF_LOG_ERROR, "%s", msg);
goto out;
}
@@ -948,10 +951,15 @@ glusterd_op_stage_status_volume (dict_t *dict, char **op_errstr)
snprintf (msg, sizeof (msg),
"Self-heal Daemon is disabled for volume %s",
volname);
- gf_log (THIS->name, GF_LOG_ERROR, "%s", msg);
goto out;
}
-
+ } else if ((cmd & GF_CLI_STATUS_QUOTAD) != 0) {
+ if (!glusterd_is_volume_quota_enabled (volinfo)) {
+ ret = -1;
+ snprintf (msg, sizeof (msg), "Volume %s does not have "
+ "quota enabled", volname);
+ goto out;
+ }
} else if ((cmd & GF_CLI_STATUS_BRICK) != 0) {
ret = dict_get_str (dict, "brick", &brick);
if (ret)
@@ -962,8 +970,6 @@ glusterd_op_stage_status_volume (dict_t *dict, char **op_errstr)
if (ret) {
snprintf (msg, sizeof(msg), "No brick %s in"
" volume %s", brick, volname);
- gf_log (THIS->name, GF_LOG_ERROR, "%s", msg);
-
ret = -1;
goto out;
}
@@ -979,7 +985,7 @@ glusterd_op_stage_status_volume (dict_t *dict, char **op_errstr)
*op_errstr = gf_strdup ("Validation Failed for Status");
}
- gf_log (THIS->name, GF_LOG_DEBUG, "Returning: %d", ret);
+ gf_log (this->name, GF_LOG_DEBUG, "Returning: %d", ret);
return ret;
}
@@ -2216,6 +2222,14 @@ glusterd_op_status_volume (dict_t *dict, char **op_errstr,
other_count++;
node_count++;
+ } else if ((cmd & GF_CLI_STATUS_QUOTAD) != 0) {
+ ret = glusterd_add_node_to_dict ("quotad", rsp_dict, 0,
+ vol_opts);
+ if (ret)
+ goto out;
+ other_count++;
+ node_count++;
+
} else if ((cmd & GF_CLI_STATUS_BRICK) != 0) {
ret = dict_get_str (dict, "brick", &brick);
if (ret)
@@ -2291,6 +2305,17 @@ glusterd_op_status_volume (dict_t *dict, char **op_errstr,
goto out;
other_count++;
node_count++;
+ other_index++;
+ }
+ if (glusterd_is_volume_quota_enabled (volinfo)) {
+ ret = glusterd_add_node_to_dict ("quotad",
+ rsp_dict,
+ other_index,
+ vol_opts);
+ if (ret)
+ goto out;
+ other_count++;
+ node_count++;
}
}
}
@@ -4069,7 +4094,8 @@ glusterd_op_stage_validate (glusterd_op_t op, dict_t *dict, char **op_errstr,
break;
case GD_OP_QUOTA:
- ret = glusterd_op_stage_quota (dict, op_errstr);
+ ret = glusterd_op_stage_quota (dict, op_errstr,
+ rsp_dict);
break;
case GD_OP_STATUS_VOLUME:
@@ -4942,9 +4968,6 @@ out:
return ret;
}
-
-
-
static int
glusterd_bricks_select_status_volume (dict_t *dict, char **op_errstr,
struct list_head *selected)
@@ -4984,6 +5007,7 @@ glusterd_bricks_select_status_volume (dict_t *dict, char **op_errstr,
case GF_CLI_STATUS_CALLPOOL:
case GF_CLI_STATUS_NFS:
case GF_CLI_STATUS_SHD:
+ case GF_CLI_STATUS_QUOTAD:
break;
default:
goto out;
@@ -5065,6 +5089,25 @@ glusterd_bricks_select_status_volume (dict_t *dict, char **op_errstr,
list_add_tail (&pending_node->list, selected);
ret = 0;
+ } else if ((cmd & GF_CLI_STATUS_QUOTAD) != 0) {
+ if (!glusterd_is_nodesvc_online ("quotad")) {
+ gf_log (this->name, GF_LOG_ERROR, "Quotad is not "
+ "running");
+ ret = -1;
+ goto out;
+ }
+ pending_node = GF_CALLOC (1, sizeof (*pending_node),
+ gf_gld_mt_pending_node_t);
+ if (!pending_node) {
+ ret = -1;
+ goto out;
+ }
+ pending_node->node = priv->quotad;
+ pending_node->type = GD_NODE_QUOTAD;
+ pending_node->index = 0;
+ list_add_tail (&pending_node->list, selected);
+
+ ret = 0;
} else {
list_for_each_entry (brickinfo, &volinfo->bricks, brick_list) {
brick_index++;
diff --git a/xlators/mgmt/glusterd/src/glusterd-quota.c b/xlators/mgmt/glusterd/src/glusterd-quota.c
index cef2baf599c..56a24c74306 100644
--- a/xlators/mgmt/glusterd/src/glusterd-quota.c
+++ b/xlators/mgmt/glusterd/src/glusterd-quota.c
@@ -21,9 +21,27 @@
#include "glusterd-utils.h"
#include "glusterd-volgen.h"
#include "run.h"
+#include "syscall.h"
+#include "byte-order.h"
+#include "compat-errno.h"
#include <sys/wait.h>
+
+const char *gd_quota_op_list[GF_QUOTA_OPTION_TYPE_DEFAULT_SOFT_LIMIT+1] = {
+ [GF_QUOTA_OPTION_TYPE_NONE] = "none",
+ [GF_QUOTA_OPTION_TYPE_ENABLE] = "enable",
+ [GF_QUOTA_OPTION_TYPE_DISABLE] = "disable",
+ [GF_QUOTA_OPTION_TYPE_LIMIT_USAGE] = "limit-usage",
+ [GF_QUOTA_OPTION_TYPE_REMOVE] = "remove",
+ [GF_QUOTA_OPTION_TYPE_LIST] = "list",
+ [GF_QUOTA_OPTION_TYPE_VERSION] = "version",
+ [GF_QUOTA_OPTION_TYPE_ALERT_TIME] = "alert-time",
+ [GF_QUOTA_OPTION_TYPE_SOFT_TIMEOUT] = "soft-timeout",
+ [GF_QUOTA_OPTION_TYPE_HARD_TIMEOUT] = "hard-timeout",
+ [GF_QUOTA_OPTION_TYPE_DEFAULT_SOFT_LIMIT] = "default-soft-limit",
+};
+
int
__glusterd_handle_quota (rpcsvc_request_t *req)
{
@@ -31,15 +49,17 @@ __glusterd_handle_quota (rpcsvc_request_t *req)
gf_cli_req cli_req = {{0,}};
dict_t *dict = NULL;
glusterd_op_t cli_op = GD_OP_QUOTA;
- char operation[256] = {0, };
char *volname = NULL;
int32_t type = 0;
char msg[2048] = {0,};
xlator_t *this = NULL;
+ glusterd_conf_t *conf = NULL;
GF_ASSERT (req);
this = THIS;
GF_ASSERT (this);
+ conf = this->private;
+ GF_ASSERT (conf);
ret = xdr_to_generic (req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
if (ret < 0) {
@@ -82,23 +102,16 @@ __glusterd_handle_quota (rpcsvc_request_t *req)
goto out;
}
- switch (type) {
- case GF_QUOTA_OPTION_TYPE_ENABLE:
- strncpy (operation, "enable", sizeof (operation));
- break;
-
- case GF_QUOTA_OPTION_TYPE_DISABLE:
- strncpy (operation, "disable", sizeof (operation));
- break;
-
- case GF_QUOTA_OPTION_TYPE_LIMIT_USAGE:
- strncpy (operation, "limit-usage", sizeof (operation));
- break;
+ if ((conf->op_version == GD_OP_VERSION_MIN) &&
+ (type > GF_QUOTA_OPTION_TYPE_VERSION)) {
+ snprintf (msg, sizeof (msg), "Cannot execute command. The "
+ "cluster is operating at version %d. Quota command %s "
+ "is unavailable in this version", conf->op_version,
+ gd_quota_op_list[type]);
+ ret = -1;
+ goto out;
+ }
- case GF_QUOTA_OPTION_TYPE_REMOVE:
- strncpy (operation, "remove", sizeof (operation));
- break;
- }
ret = glusterd_op_begin_synctask (req, GD_OP_QUOTA, dict);
out:
@@ -132,7 +145,6 @@ glusterd_check_if_quota_trans_enabled (glusterd_volinfo_t *volinfo)
}
if (flag == _gf_false) {
- gf_log ("", GF_LOG_ERROR, "first enable the quota translator");
ret = -1;
goto out;
}
@@ -141,13 +153,21 @@ out:
return ret;
}
-/* At the end of the function, the variable found will be set
+/* At the end of the function, the variable @found will be set
* to true if the path to be removed was present in the limit-list,
* else will be false.
+ *
+ * In addition, the function does the following things:
+ *
+ * a. places the path to be removed, if found, in @removed_path,
+ * b. places the new limit list formed after removing @path's entry, in
+ * @new_list. If @path is not found, the input limit string @quota_limits is
+ * dup'd as is and placed in @new_list.
*/
int32_t
-_glusterd_quota_remove_limits (char **quota_limits, char *path,
- gf_boolean_t *found)
+_glusterd_quota_remove_limits (char *quota_limits, char *path,
+ gf_boolean_t *found, char **new_list,
+ char **removed_path)
{
int ret = 0;
int i = 0;
@@ -158,14 +178,15 @@ _glusterd_quota_remove_limits (char **quota_limits, char *path,
int flag = 0;
char *limits = NULL;
char *qlimits = NULL;
+ char *rp = NULL;
if (found != NULL)
*found = _gf_false;
- if (*quota_limits == NULL)
+ if (quota_limits == NULL)
return -1;
- qlimits = *quota_limits;
+ qlimits = quota_limits;
pathlen = strlen (path);
@@ -192,6 +213,15 @@ _glusterd_quota_remove_limits (char **quota_limits, char *path,
} else {
skiplen = size + 1;
size = len - i - size;
+ if (removed_path) {
+ rp = GF_CALLOC (skiplen, sizeof (char), gf_gld_mt_char);
+ if (!rp) {
+ ret = -1;
+ goto out;
+ }
+ strncpy (rp, &qlimits[i], skiplen - 1);
+ *removed_path = rp;
+ }
memcpy ((void *) &limits [i], (void *) &qlimits [i + skiplen], size);
break;
}
@@ -200,42 +230,27 @@ _glusterd_quota_remove_limits (char **quota_limits, char *path,
size = 0;
}
- if (!flag) {
- ret = 1;
- } else {
- len = strlen (limits);
-
- if (len == 0) {
- GF_FREE (qlimits);
-
- *quota_limits = NULL;
-
- goto out;
- }
-
- if (limits[len - 1] == ',') {
- limits[len - 1] = '\0';
- len --;
- }
-
- GF_FREE (qlimits);
-
- qlimits = GF_CALLOC (len + 1, sizeof (char), gf_gld_mt_char);
-
- if (!qlimits) {
- ret = -1;
- goto out;
- }
-
- memcpy ((void *) qlimits, (void *) limits, len + 1);
+ len = strlen (limits);
+ if (len == 0)
+ goto out;
- *quota_limits = qlimits;
+ if (limits[len - 1] == ',') {
+ limits[len - 1] = '\0';
+ len --;
+ }
- ret = 0;
+ *new_list = GF_CALLOC (len + 1, sizeof (char), gf_gld_mt_char);
+ if (!*new_list) {
+ ret = -1;
+ goto out;
}
+ memcpy ((void *) *new_list, (void *) limits, len + 1);
+ ret = 0;
out:
GF_FREE (limits);
+ if (ret != -1)
+ ret = flag ? 0 : 1;
return ret;
}
@@ -381,46 +396,38 @@ _glusterd_quota_get_limit_usages (glusterd_volinfo_t *volinfo,
}
int32_t
-glusterd_quota_get_limit_usages (glusterd_conf_t *priv,
- glusterd_volinfo_t *volinfo,
- char *volname,
- dict_t *dict,
- char **op_errstr,
- dict_t *rsp_dict)
+glusterd_quota_get_default_soft_limit (glusterd_volinfo_t *volinfo,
+ dict_t *rsp_dict)
{
- int32_t i = 0;
- int32_t ret = 0;
- int32_t count = 0;
- char *path = NULL;
- char cmd_str [1024] = {0, };
- char *ret_str = NULL;
+ int32_t ret = 0;
+ xlator_t *this = NULL;
+ glusterd_conf_t *conf = NULL;
+ char *default_limit = NULL;
+ char *val = NULL;
if (rsp_dict == NULL)
- return 0;
-
- ret = dict_get_int32 (dict, "count", &count);
- if (ret < 0)
- goto out;
+ return -1;
- if (count == 0) {
- ret_str = _glusterd_quota_get_limit_usages (volinfo, NULL,
- op_errstr);
- } else {
- i = 0;
- while (count--) {
- snprintf (cmd_str, 1024, "path%d", i++);
+ this = THIS;
+ GF_ASSERT (this);
+ conf = this->private;
+ GF_ASSERT (conf);
- ret = dict_get_str (dict, cmd_str, &path);
- if (ret < 0)
- goto out;
+ ret = glusterd_volinfo_get (volinfo, "features.default-soft-limit",
+ &default_limit);
+ if (default_limit)
+ val = gf_strdup (default_limit);
+ else
+ val = gf_strdup ("80%");
- ret_str = _glusterd_quota_get_limit_usages (volinfo, path, op_errstr);
- }
+ ret = dict_set_dynstr (rsp_dict, "default-soft-limit", val);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to set default "
+ "soft-limit into dict");
+ goto out;
}
+ ret = 0;
- if (ret_str) {
- ret = dict_set_dynstr (rsp_dict, "limit_list", ret_str);
- }
out:
return ret;
}
@@ -431,54 +438,74 @@ glusterd_quota_enable (glusterd_volinfo_t *volinfo, char **op_errstr,
{
int32_t ret = -1;
char *quota_status = NULL;
+ xlator_t *this = NULL;
- GF_VALIDATE_OR_GOTO ("glusterd", volinfo, out);
- GF_VALIDATE_OR_GOTO ("glusterd", crawl, out);
- GF_VALIDATE_OR_GOTO ("glusterd", op_errstr, out);
+ this = THIS;
+ GF_ASSERT (this);
+
+ GF_VALIDATE_OR_GOTO (this->name, volinfo, out);
+ GF_VALIDATE_OR_GOTO (this->name, crawl, out);
+ GF_VALIDATE_OR_GOTO (this->name, op_errstr, out);
if (glusterd_is_volume_started (volinfo) == 0) {
*op_errstr = gf_strdup ("Volume is stopped, start volume "
"to enable quota.");
+ ret = -1;
goto out;
}
ret = glusterd_check_if_quota_trans_enabled (volinfo);
if (ret == 0) {
*op_errstr = gf_strdup ("Quota is already enabled");
+ ret = -1;
goto out;
}
quota_status = gf_strdup ("on");
if (!quota_status) {
- gf_log ("", GF_LOG_ERROR, "memory allocation failed");
- *op_errstr = gf_strdup ("Enabling quota has been unsuccessful");
+ gf_log (this->name, GF_LOG_ERROR, "memory allocation failed");
+ ret = -1;
goto out;
}
- ret = dict_set_dynstr (volinfo->dict, VKEY_FEATURES_QUOTA, quota_status);
+ ret = dict_set_dynstr (volinfo->dict, VKEY_FEATURES_QUOTA,
+ quota_status);
if (ret) {
- gf_log ("", GF_LOG_ERROR, "dict set failed");
- *op_errstr = gf_strdup ("Enabling quota has been unsuccessful");
+ gf_log (this->name, GF_LOG_ERROR, "dict set failed");
goto out;
}
- *op_errstr = gf_strdup ("Enabling quota has been successful");
-
*crawl = _gf_true;
ret = 0;
out:
+ if (ret && op_errstr && !*op_errstr)
+ gf_asprintf (op_errstr, "Enabling quota on volume %s has been "
+ "unsuccessful", volinfo->volname);
return ret;
}
int32_t
glusterd_quota_disable (glusterd_volinfo_t *volinfo, char **op_errstr)
{
- int32_t ret = -1;
- char *quota_status = NULL, *quota_limits = NULL;
+ int32_t ret = -1;
+ int i = 0;
+ char *quota_status = NULL;
+ char *value = NULL;
+ xlator_t *this = NULL;
+ glusterd_conf_t *conf = NULL;
+ char *quota_options[] = {"features.soft-timeout",
+ "features.hard-timeout",
+ "features.alert-time",
+ "features.default-soft-limit", NULL};
- GF_VALIDATE_OR_GOTO ("glusterd", volinfo, out);
- GF_VALIDATE_OR_GOTO ("glusterd", op_errstr, out);
+ this = THIS;
+ GF_ASSERT (this);
+ conf = this->private;
+ GF_ASSERT (conf);
+
+ GF_VALIDATE_OR_GOTO (this->name, volinfo, out);
+ GF_VALIDATE_OR_GOTO (this->name, op_errstr, out);
ret = glusterd_check_if_quota_trans_enabled (volinfo);
if (ret == -1) {
@@ -488,47 +515,345 @@ glusterd_quota_disable (glusterd_volinfo_t *volinfo, char **op_errstr)
quota_status = gf_strdup ("off");
if (!quota_status) {
- gf_log ("", GF_LOG_ERROR, "memory allocation failed");
- *op_errstr = gf_strdup ("Disabling quota has been unsuccessful");
+ gf_log (this->name, GF_LOG_ERROR, "memory allocation failed");
+ ret = -1;
goto out;
}
ret = dict_set_dynstr (volinfo->dict, VKEY_FEATURES_QUOTA, quota_status);
if (ret) {
- gf_log ("", GF_LOG_ERROR, "dict set failed");
- *op_errstr = gf_strdup ("Disabling quota has been unsuccessful");
+ gf_log (this->name, GF_LOG_ERROR, "dict set failed");
goto out;
}
- *op_errstr = gf_strdup ("Disabling quota has been successful");
+ for (i = 0; quota_options [i]; i++) {
+ ret = glusterd_volinfo_get (volinfo, quota_options[i], &value);
+ if (ret) {
+ gf_log (this->name, GF_LOG_INFO, "failed to get option"
+ " %s",
+ quota_options[i]);
+ } else {
+ dict_del (volinfo->dict, quota_options[i]);
+ }
+ }
- ret = glusterd_volinfo_get (volinfo, VKEY_FEATURES_LIMIT_USAGE,
- &quota_limits);
+ //Remove aux mount of the volume on every node in the cluster
+ ret = glusterd_remove_auxiliary_mount (volinfo->volname);
+ if (ret)
+ goto out;
+
+ (void) glusterd_clean_up_quota_store (volinfo);
+
+ ret = 0;
+out:
+ if (ret && op_errstr && !*op_errstr)
+ gf_asprintf (op_errstr, "Disabling quota on volume %s has been "
+ "unsuccessful", volinfo->volname);
+ return ret;
+}
+
+static int
+glusterd_set_quota_limit (char *volname, char *path, char *hard_limit,
+ char *soft_limit, char **op_errstr)
+{
+ int ret = -1;
+ xlator_t *this = NULL;
+ char abspath[PATH_MAX] = {0,};
+ glusterd_conf_t *priv = NULL;
+ double soft_lim = 0;
+
+ typedef struct quota_limits {
+ int64_t hl;
+ int64_t sl;
+ } __attribute__ ((__packed__)) quota_limits_t;
+
+ quota_limits_t existing_limit = {0,};
+ quota_limits_t new_limit = {0,};
+
+ this = THIS;
+ GF_ASSERT (this);
+ priv = this->private;
+ GF_ASSERT (priv);
+
+ snprintf (abspath, sizeof (abspath)-1, "/tmp/%s%s", volname, path);
+
+ ret = gf_lstat_dir (abspath, NULL);
if (ret) {
- gf_log ("", GF_LOG_WARNING, "failed to get the quota limits");
+ gf_asprintf (op_errstr, "Failed to find the directory %s. "
+ "Reason : %s", abspath, strerror (errno));
+ goto out;
+ }
+
+ if (!soft_limit) {
+ ret = sys_lgetxattr (abspath,
+ "trusted.glusterfs.quota.limit-set",
+ (void *)&existing_limit,
+ sizeof (existing_limit));
+ if (ret < 0) {
+ switch (errno) {
+ case ENOATTR:
+ existing_limit.sl = -1;
+ break;
+ default:
+ gf_asprintf (op_errstr, "Failed to get the xattr "
+ "'trusted.glusterfs.quota.limit-set' from "
+ "%s. Reason : %s", abspath,
+ strerror (errno));
+ goto out;
+ }
+ } else {
+ existing_limit.hl = ntoh64 (existing_limit.hl);
+ existing_limit.sl = ntoh64 (existing_limit.sl);
+ }
+ new_limit.sl = existing_limit.sl;
+
} else {
- GF_FREE (quota_limits);
+ ret = gf_string2percent (soft_limit, &soft_lim);
+ if (ret)
+ goto out;
+ new_limit.sl = soft_lim;
}
- dict_del (volinfo->dict, VKEY_FEATURES_LIMIT_USAGE);
+ new_limit.sl = hton64 (new_limit.sl);
+
+ ret = gf_string2bytesize (hard_limit, (uint64_t*)&new_limit.hl);
+ if (ret)
+ goto out;
+
+ new_limit.hl = hton64 (new_limit.hl);
+
+ ret = sys_lsetxattr (abspath, "trusted.glusterfs.quota.limit-set",
+ (char *)(void *)&new_limit, sizeof (new_limit), 0);
+ if (ret) {
+ gf_asprintf (op_errstr, "setxattr of "
+ "'trusted.glusterfs.quota.limit-set' failed on %s."
+ " Reason : %s", abspath, strerror (errno));
+ goto out;
+ }
+ ret = 0;
out:
return ret;
}
+static int
+glusterd_update_quota_conf_version (glusterd_volinfo_t *volinfo)
+{
+ volinfo->quota_conf_version++;
+ return 0;
+}
+
+static int
+glusterd_store_quota_config (glusterd_volinfo_t *volinfo, char *path,
+ char *gfid_str, int opcode, char **op_errstr)
+{
+ int ret = -1;
+ int count = 0;
+ xlator_t *this = NULL;
+ glusterd_conf_t *conf = NULL;
+ unsigned char buf[16] = {0,};
+ int fd = -1;
+ int conf_fd = -1;
+ size_t entry_sz = 16;
+ uuid_t gfid = {0,};
+ gf_boolean_t found = _gf_false;
+ gf_boolean_t modified = _gf_false;
+
+
+ this = THIS;
+ GF_ASSERT (this);
+ conf = this->private;
+ GF_ASSERT (conf);
+
+ uuid_parse (gfid_str, gfid);
+
+ glusterd_store_create_quota_conf_sh_on_absence (volinfo);
+
+ fd = gf_store_mkstemp (volinfo->quota_conf_shandle);
+ if (fd < 0) {
+ ret = -1;
+ goto out;
+ }
+
+ conf_fd = open (volinfo->quota_conf_shandle->path, O_RDONLY);
+ if (conf_fd == -1) {
+ ret = -1;
+ goto out;
+ }
+
+
+ ret = glusterd_store_quota_conf_skip_header (this, conf_fd);
+ if (ret) {
+ goto out;
+ }
+
+ ret = glusterd_store_quota_conf_stamp_header (this, fd);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to add header to tmp "
+ "file.");
+ goto out;
+ }
+ //gfid is stored as 16 bytes of 'raw' data
+ entry_sz = 16;
+ for (;;) {
+ ret = read (conf_fd, (void*)&buf, entry_sz) ;
+ if (ret <= 0) {
+ //Finished reading all entries in the conf file
+ break;
+ }
+ if (ret != 16) {
+ //This should never happen. We must have a multiple of
+ //entry_sz bytes in our configuration file.
+ gf_log (this->name, GF_LOG_CRITICAL, "Quota "
+ "configuration store may be corrupt.");
+ ret = -1;
+ goto out;
+ }
+ count++;
+ if (uuid_compare (gfid, buf)) {
+ /*If the gfids don't match, write @buf into tmp file. */
+ ret = write (fd, (void*) buf, entry_sz);
+ if (ret == -1) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to "
+ "write %s into quota configuration.",
+ uuid_utoa (buf));
+ goto out;
+ }
+ } else {
+ /*If a match is found, write @buf into tmp file for
+ * limit-usage only.
+ */
+ if (opcode == GF_QUOTA_OPTION_TYPE_LIMIT_USAGE) {
+ ret = write (fd, (void *) buf, entry_sz);
+ if (ret == -1) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to write %s into quota "
+ "configuration.",
+ uuid_utoa (buf));
+ goto out;
+ }
+ }
+ found = _gf_true;
+ }
+ }
+
+ switch (opcode) {
+ case GF_QUOTA_OPTION_TYPE_LIMIT_USAGE:
+ /*
+ * count = 0 implies that the conf file is empty.
+ * In this case, we directly go ahead and write gfid_str
+ * into the tmp file.
+ * If count is non-zero and found is false, limit is
+ * being set on a gfid for the first time. So
+ * append gfid_str to the end of the file.
+ */
+ if ((count == 0) ||
+ ((count > 0) && (found == _gf_false))) {
+ memcpy (buf, gfid, 16);
+ ret = write (fd, (void *) buf, entry_sz);
+ if (ret == -1) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to write %s into quota "
+ "configuration.",
+ uuid_utoa (buf));
+ goto out;
+ }
+ modified = _gf_true;
+ }
+
+ break;
+
+ case GF_QUOTA_OPTION_TYPE_REMOVE:
+ /*
+ * count = 0 is not a valid scenario and must be treated
+ * as error.
+ * If count is non-zero and found is false, then it is
+ * an error.
+ * If count is non-zero and found is true, take no
+ * action, by virtue of which the gfid is as good as
+ * deleted from the store.
+ */
+ if (count == 0) {
+ gf_asprintf (op_errstr, "Cannot remove limit on"
+ " %s. The quota configuration file"
+ " for volume %s is empty.", path,
+ volinfo->volname);
+ ret = -1;
+ goto out;
+ } else {
+ if (!found) {
+ gf_asprintf (op_errstr, "Error. gfid %s"
+ " for path %s not found in"
+ " store", gfid_str, path);
+ ret = -1;
+ goto out;
+ } else {
+ modified = _gf_true;
+ }
+
+ }
+ break;
+
+ default:
+ ret = 0;
+ break;
+ }
+
+ if (modified)
+ glusterd_update_quota_conf_version (volinfo);
+
+ ret = 0;
+out:
+ if (conf_fd != -1) {
+ close (conf_fd);
+ }
+
+ if (fd != -1) {
+ close (fd);
+ }
+
+ if (ret && (fd > 0)) {
+ gf_store_unlink_tmppath (volinfo->quota_conf_shandle);
+ } else if (!ret) {
+ ret = gf_store_rename_tmppath (volinfo->quota_conf_shandle);
+ if (modified) {
+ ret = glusterd_compute_cksum (volinfo, _gf_true);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to "
+ "compute cksum for quota conf file");
+ goto out;
+ }
+
+ ret = glusterd_store_save_quota_version_and_cksum
+ (volinfo);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to "
+ "store quota version and cksum");
+ goto out;
+ }
+ }
+ }
+
+ return ret;
+}
+
int32_t
-glusterd_quota_limit_usage (glusterd_volinfo_t *volinfo, dict_t *dict, char **op_errstr)
+glusterd_quota_limit_usage (glusterd_volinfo_t *volinfo, dict_t *dict,
+ int opcode, char **op_errstr)
{
- int32_t ret = -1;
- char *path = NULL;
- char *limit = NULL;
- char *value = NULL;
- char msg [1024] = {0,};
- char *quota_limits = NULL;
+ int32_t ret = -1;
+ char *path = NULL;
+ char *hard_limit = NULL;
+ char *soft_limit = NULL;
+ char *gfid_str = NULL;
+ xlator_t *this = NULL;
- GF_VALIDATE_OR_GOTO ("glusterd", dict, out);
- GF_VALIDATE_OR_GOTO ("glusterd", volinfo, out);
- GF_VALIDATE_OR_GOTO ("glusterd", op_errstr, out);
+ this = THIS;
+ GF_ASSERT (this);
+
+ GF_VALIDATE_OR_GOTO (this->name, dict, out);
+ GF_VALIDATE_OR_GOTO (this->name, volinfo, out);
+ GF_VALIDATE_OR_GOTO (this->name, op_errstr, out);
ret = glusterd_check_if_quota_trans_enabled (volinfo);
if (ret == -1) {
@@ -537,141 +862,217 @@ glusterd_quota_limit_usage (glusterd_volinfo_t *volinfo, dict_t *dict, char **op
goto out;
}
- ret = glusterd_volinfo_get (volinfo, VKEY_FEATURES_LIMIT_USAGE,
- &quota_limits);
- if (ret) {
- gf_log ("", GF_LOG_ERROR, "failed to get the quota limits");
- *op_errstr = gf_strdup ("failed to set limit");
- goto out;
- }
-
ret = dict_get_str (dict, "path", &path);
if (ret) {
- gf_log ("", GF_LOG_ERROR, "Unable to fetch quota limits" );
- *op_errstr = gf_strdup ("failed to set limit");
+ gf_log (this->name, GF_LOG_ERROR, "Unable to fetch path");
goto out;
}
+ ret = gf_canonicalize_path (path);
+ if (ret)
+ goto out;
- ret = dict_get_str (dict, "limit", &limit);
+ ret = dict_get_str (dict, "hard-limit", &hard_limit);
if (ret) {
- gf_log ("", GF_LOG_ERROR, "Unable to fetch quota limits" );
- *op_errstr = gf_strdup ("failed to set limit");
+ gf_log (this->name, GF_LOG_ERROR, "Unable to fetch hard limit");
goto out;
}
- if (quota_limits) {
- ret = _glusterd_quota_remove_limits (&quota_limits, path, NULL);
- if (ret == -1) {
- gf_log ("", GF_LOG_ERROR, "Unable to allocate memory");
- *op_errstr = gf_strdup ("failed to set limit");
+ if (dict_get (dict, "soft-limit")) {
+ ret = dict_get_str (dict, "soft-limit", &soft_limit);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Unable to fetch "
+ "soft limit");
goto out;
}
}
- if (quota_limits == NULL) {
- ret = gf_asprintf (&value, "%s:%s", path, limit);
- if (ret == -1) {
- gf_log ("", GF_LOG_ERROR, "Unable to allocate memory");
- *op_errstr = gf_strdup ("failed to set limit");
- goto out;
- }
- } else {
- ret = gf_asprintf (&value, "%s,%s:%s",
- quota_limits, path, limit);
- if (ret == -1) {
- gf_log ("", GF_LOG_ERROR, "Unable to allocate memory");
- *op_errstr = gf_strdup ("failed to set limit");
+ if (is_origin_glusterd ()) {
+ ret = glusterd_set_quota_limit (volinfo->volname, path,
+ hard_limit, soft_limit,
+ op_errstr);
+ if (ret)
goto out;
- }
+ }
- GF_FREE (quota_limits);
+ ret = dict_get_str (dict, "gfid", &gfid_str);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to get gfid of path "
+ "%s", path);
+ goto out;
}
- quota_limits = value;
+ ret = glusterd_store_quota_config (volinfo, path, gfid_str, opcode,
+ op_errstr);
+ if (ret)
+ goto out;
+
+ ret = 0;
+out:
- ret = dict_set_str (volinfo->dict, VKEY_FEATURES_LIMIT_USAGE,
- quota_limits);
+ if (ret && op_errstr && !*op_errstr)
+ gf_asprintf (op_errstr, "Failed to set hard limit on path %s "
+ "for volume %s", path, volinfo->volname);
+ return ret;
+}
+
+static int
+glusterd_remove_quota_limit (char *volname, char *path, char **op_errstr)
+{
+ int ret = -1;
+ xlator_t *this = NULL;
+ char abspath[PATH_MAX] = {0,};
+ glusterd_conf_t *priv = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+ priv = this->private;
+ GF_ASSERT (priv);
+
+ snprintf (abspath, sizeof (abspath)-1, "/tmp/%s%s", volname, path);
+
+ ret = gf_lstat_dir (abspath, NULL);
if (ret) {
- gf_log ("", GF_LOG_ERROR, "Unable to set quota limits" );
- *op_errstr = gf_strdup ("failed to set limit");
+ gf_asprintf (op_errstr, "Failed to find the directory %s. "
+ "Reason : %s", abspath, strerror (errno));
goto out;
}
- snprintf (msg, 1024, "limit set on %s", path);
- *op_errstr = gf_strdup (msg);
+ ret = sys_lremovexattr (abspath, "trusted.glusterfs.quota.limit-set");
+ if (ret) {
+ gf_asprintf (op_errstr, "removexattr failed on %s. Reason : %s",
+ abspath, strerror (errno));
+ goto out;
+ }
ret = 0;
+
out:
return ret;
}
int32_t
-glusterd_quota_remove_limits (glusterd_volinfo_t *volinfo, dict_t *dict, char **op_errstr)
+glusterd_quota_remove_limits (glusterd_volinfo_t *volinfo, dict_t *dict,
+ int opcode, char **op_errstr)
{
int32_t ret = -1;
- char str [PATH_MAX + 1024] = {0,};
- char *quota_limits = NULL;
char *path = NULL;
- gf_boolean_t flag = _gf_false;
+ char *gfid_str = NULL;
+ xlator_t *this = NULL;
- GF_VALIDATE_OR_GOTO ("glusterd", dict, out);
- GF_VALIDATE_OR_GOTO ("glusterd", volinfo, out);
- GF_VALIDATE_OR_GOTO ("glusterd", op_errstr, out);
+ this = THIS;
+ GF_ASSERT (this);
+
+ GF_VALIDATE_OR_GOTO (this->name, dict, out);
+ GF_VALIDATE_OR_GOTO (this->name, volinfo, out);
+ GF_VALIDATE_OR_GOTO (this->name, op_errstr, out);
ret = glusterd_check_if_quota_trans_enabled (volinfo);
if (ret == -1) {
- *op_errstr = gf_strdup ("Quota is disabled, please enable quota");
+ *op_errstr = gf_strdup ("Quota is disabled, please enable "
+ "quota");
goto out;
}
- ret = glusterd_volinfo_get (volinfo, VKEY_FEATURES_LIMIT_USAGE,
- &quota_limits);
+ ret = dict_get_str (dict, "path", &path);
if (ret) {
- gf_log ("", GF_LOG_ERROR, "failed to get the quota limits");
+ gf_log (this->name, GF_LOG_ERROR, "Unable to fetch path");
goto out;
}
- ret = dict_get_str (dict, "path", &path);
- if (ret) {
- gf_log ("", GF_LOG_ERROR, "Unable to fetch quota limits" );
+ ret = gf_canonicalize_path (path);
+ if (ret)
goto out;
+
+ if (is_origin_glusterd ()) {
+ ret = glusterd_remove_quota_limit (volinfo->volname, path,
+ op_errstr);
+ if (ret)
+ goto out;
}
- ret = _glusterd_quota_remove_limits (&quota_limits, path, &flag);
- if (ret == -1) {
- if (flag == _gf_true)
- snprintf (str, sizeof (str), "Removing limit on %s has "
- "been unsuccessful", path);
- else
- snprintf (str, sizeof (str), "%s has no limit set", path);
- *op_errstr = gf_strdup (str);
+ ret = dict_get_str (dict, "gfid", &gfid_str);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to get gfid of path "
+ "%s", path);
goto out;
- } else {
- if (flag == _gf_true)
- snprintf (str, sizeof (str), "Removed quota limit on "
- "%s", path);
- else
- snprintf (str, sizeof (str), "no limit set on %s",
- path);
- *op_errstr = gf_strdup (str);
- }
-
- if (quota_limits) {
- ret = dict_set_str (volinfo->dict, VKEY_FEATURES_LIMIT_USAGE,
- quota_limits);
- if (ret) {
- gf_log ("", GF_LOG_ERROR, "Unable to set quota limits" );
- goto out;
- }
- } else {
- dict_del (volinfo->dict, VKEY_FEATURES_LIMIT_USAGE);
}
+ ret = glusterd_store_quota_config (volinfo, path, gfid_str, opcode,
+ op_errstr);
+ if (ret)
+ goto out;
+
+
ret = 0;
out:
return ret;
}
+int
+glusterd_set_quota_option (glusterd_volinfo_t *volinfo, dict_t *dict,
+ char *key, char **op_errstr)
+{
+ int ret = 0;
+ char *value = NULL;
+ xlator_t *this = NULL;
+ char *option = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+
+ ret = glusterd_check_if_quota_trans_enabled (volinfo);
+ if (ret == -1) {
+ gf_asprintf (op_errstr, "Cannot set %s. Quota on volume %s is "
+ "disabled", key, volinfo->volname);
+ return -1;
+ }
+
+ ret = dict_get_str (dict, "value", &value);
+ if(ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Option value absent.");
+ return -1;
+ }
+
+ option = gf_strdup (value);
+ ret = dict_set_dynstr (volinfo->dict, key, option);
+ if(ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to set option %s",
+ key);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+glusterd_quotad_op (int opcode)
+{
+ int ret = -1;
+
+ switch (opcode) {
+ case GF_QUOTA_OPTION_TYPE_ENABLE:
+ case GF_QUOTA_OPTION_TYPE_DISABLE:
+
+ if (glusterd_all_volumes_with_quota_stopped ())
+ ret = glusterd_quotad_stop ();
+ else
+ ret = glusterd_check_generate_start_quotad ();
+ break;
+
+ case GF_QUOTA_OPTION_TYPE_DEFAULT_SOFT_LIMIT:
+ case GF_QUOTA_OPTION_TYPE_HARD_TIMEOUT:
+ case GF_QUOTA_OPTION_TYPE_SOFT_TIMEOUT:
+ case GF_QUOTA_OPTION_TYPE_ALERT_TIME:
+
+ ret = glusterd_reconfigure_quotad ();
+ break;
+
+ default:
+ ret = 0;
+ break;
+ }
+ return ret;
+}
int
glusterd_op_quota (dict_t *dict, char **op_errstr, dict_t *rsp_dict)
@@ -682,76 +1083,120 @@ glusterd_op_quota (dict_t *dict, char **op_errstr, dict_t *rsp_dict)
int type = -1;
gf_boolean_t start_crawl = _gf_false;
glusterd_conf_t *priv = NULL;
+ xlator_t *this = NULL;
GF_ASSERT (dict);
GF_ASSERT (op_errstr);
- priv = THIS->private;
+ this = THIS;
+ GF_ASSERT (this);
+ priv = this->private;
+ GF_ASSERT (priv);
ret = dict_get_str (dict, "volname", &volname);
if (ret) {
- gf_log ("", GF_LOG_ERROR, "Unable to get volume name " );
+ gf_log (this->name, GF_LOG_ERROR, "Unable to get volume name");
goto out;
}
ret = glusterd_volinfo_find (volname, &volinfo);
if (ret) {
- gf_log ("", GF_LOG_ERROR, "Unable to allocate memory");
+ gf_asprintf (op_errstr, FMTSTR_CHECK_VOL_EXISTS, volname);
goto out;
}
ret = dict_get_int32 (dict, "type", &type);
- if (type == GF_QUOTA_OPTION_TYPE_ENABLE) {
- ret = glusterd_quota_enable (volinfo, op_errstr, &start_crawl);
- if (ret < 0)
- goto out;
-
- goto create_vol;
+ if ((priv->op_version == GD_OP_VERSION_MIN) &&
+ (type > GF_QUOTA_OPTION_TYPE_VERSION)) {
+ gf_asprintf (op_errstr, "Volume quota failed. The cluster is "
+ "operating at version %d. Quota command"
+ " %s is unavailable in this version.",
+ priv->op_version,
+ gd_quota_op_list[type]);
+ ret = -1;
+ goto out;
}
- if (type == GF_QUOTA_OPTION_TYPE_DISABLE) {
- ret = glusterd_quota_disable (volinfo, op_errstr);
- if (ret < 0)
- goto out;
+ switch (type) {
+ case GF_QUOTA_OPTION_TYPE_ENABLE:
+ ret = glusterd_quota_enable (volinfo, op_errstr,
+ &start_crawl);
+ if (ret < 0)
+ goto out;
+ break;
- goto create_vol;
- }
+ case GF_QUOTA_OPTION_TYPE_DISABLE:
+ ret = glusterd_quota_disable (volinfo, op_errstr);
+ if (ret < 0)
+ goto out;
- if (type == GF_QUOTA_OPTION_TYPE_LIMIT_USAGE) {
- ret = glusterd_quota_limit_usage (volinfo, dict, op_errstr);
- if (ret < 0)
+ break;
+
+ case GF_QUOTA_OPTION_TYPE_LIMIT_USAGE:
+ ret = glusterd_quota_limit_usage (volinfo, dict, type,
+ op_errstr);
goto out;
- goto create_vol;
- }
+ case GF_QUOTA_OPTION_TYPE_REMOVE:
+ ret = glusterd_quota_remove_limits (volinfo, dict, type,
+ op_errstr);
+ goto out;
- if (type == GF_QUOTA_OPTION_TYPE_REMOVE) {
- ret = glusterd_quota_remove_limits (volinfo, dict, op_errstr);
- if (ret < 0)
+ case GF_QUOTA_OPTION_TYPE_LIST:
+ ret = glusterd_check_if_quota_trans_enabled (volinfo);
+ if (ret == -1) {
+ *op_errstr = gf_strdup ("Cannot list limits, "
+ "quota is disabled");
+ goto out;
+ }
+ ret = glusterd_quota_get_default_soft_limit (volinfo,
+ rsp_dict);
goto out;
- goto create_vol;
- }
+ case GF_QUOTA_OPTION_TYPE_SOFT_TIMEOUT:
+ ret = glusterd_set_quota_option (volinfo, dict,
+ "features.soft-timeout",
+ op_errstr);
+ if (ret)
+ goto out;
+ break;
- if (type == GF_QUOTA_OPTION_TYPE_LIST) {
- ret = glusterd_check_if_quota_trans_enabled (volinfo);
- if (ret == -1) {
- *op_errstr = gf_strdup ("cannot list the limits, "
- "quota is disabled");
- goto out;
- }
+ case GF_QUOTA_OPTION_TYPE_HARD_TIMEOUT:
+ ret = glusterd_set_quota_option (volinfo, dict,
+ "features.hard-timeout",
+ op_errstr);
+ if (ret)
+ goto out;
+ break;
- ret = glusterd_quota_get_limit_usages (priv, volinfo, volname,
- dict, op_errstr, rsp_dict);
+ case GF_QUOTA_OPTION_TYPE_ALERT_TIME:
+ ret = glusterd_set_quota_option (volinfo, dict,
+ "features.alert-time",
+ op_errstr);
+ if (ret)
+ goto out;
+ break;
- goto out;
+ case GF_QUOTA_OPTION_TYPE_DEFAULT_SOFT_LIMIT:
+ ret = glusterd_set_quota_option (volinfo, dict,
+ "features.default-soft-limit",
+ op_errstr);
+ if (ret)
+ goto out;
+ break;
+
+ default:
+ gf_asprintf (op_errstr, "Quota command failed. Invalid "
+ "opcode");
+ ret = -1;
+ goto out;
}
-create_vol:
+
ret = glusterd_create_volfiles_and_notify_services (volinfo);
if (ret) {
- gf_log ("", GF_LOG_ERROR, "Unable to re-create volfile for"
- " 'quota'");
+ gf_log (this->name, GF_LOG_ERROR, "Unable to re-create "
+ "volfiles");
ret = -1;
goto out;
}
@@ -760,81 +1205,262 @@ create_vol:
if (ret)
goto out;
- if (GLUSTERD_STATUS_STARTED == volinfo->status)
- ret = glusterd_check_generate_start_nfs ();
-
- ret = 0;
+ if (GLUSTERD_STATUS_STARTED == volinfo->status) {
+ if (priv->op_version == GD_OP_VERSION_MIN)
+ ret = glusterd_check_generate_start_nfs ();
+ }
-out:
if (rsp_dict && start_crawl == _gf_true)
glusterd_quota_initiate_fs_crawl (priv, volname);
- if (rsp_dict && *op_errstr) {
- ret = dict_set_dynstr (rsp_dict, "errstr", *op_errstr);
+ if (priv->op_version > GD_OP_VERSION_MIN) {
+ ret = glusterd_quotad_op (type);
+ if (ret)
+ goto out;
+ }
+ ret = 0;
+out:
+ return ret;
+}
+
+/*
+ * glusterd_get_gfid_from_brick() fetches the 'trusted.gfid' attribute of @path
+ * from each brick in the backend and places the same in the rsp_dict with the
+ * keys being gfid0, gfid1, gfid2 and so on. The absence of @path in the backend
+ * is not treated as error.
+ */
+static int
+glusterd_get_gfid_from_brick (dict_t *dict, glusterd_volinfo_t *volinfo,
+ dict_t *rsp_dict, char **op_errstr)
+{
+ int ret = -1;
+ int count = 0;
+ char *path = NULL;
+ char backend_path[PATH_MAX] = {0,};
+ xlator_t *this = NULL;
+ glusterd_conf_t *priv = NULL;
+ glusterd_brickinfo_t *brickinfo = NULL;
+ char key[256] = {0,};
+ char *gfid_str = NULL;
+ uuid_t gfid;
+
+ this = THIS;
+ GF_ASSERT (this);
+ priv = this->private;
+ GF_ASSERT (priv);
+
+ ret = dict_get_str (dict, "path", &path);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to get path");
+ goto out;
+ }
+
+ list_for_each_entry (brickinfo, &volinfo->bricks, brick_list) {
+ ret = glusterd_resolve_brick (brickinfo);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, FMTSTR_RESOLVE_BRICK,
+ brickinfo->hostname, brickinfo->path);
+ goto out;
+ }
+
+ if (uuid_compare (brickinfo->uuid, MY_UUID))
+ continue;
+
+ if (brickinfo->vg[0])
+ continue;
+
+ snprintf (backend_path, sizeof (backend_path), "%s%s",
+ brickinfo->path, path);
+
+ ret = gf_lstat_dir (backend_path, NULL);
+ if (ret) {
+ gf_log (this->name, GF_LOG_INFO, "Failed to find "
+ "directory %s. Reason : %s", backend_path,
+ strerror (errno));
+ ret = 0;
+ continue;
+ }
+ ret = sys_lgetxattr (backend_path, GFID_XATTR_KEY, gfid, 16);
+ if (ret < 0) {
+ gf_log (this->name, GF_LOG_INFO, "Failed to get "
+ "extended attribute %s for directory %s. "
+ "Reason : %s", GFID_XATTR_KEY, backend_path,
+ strerror (errno));
+ ret = 0;
+ continue;
+ }
+ snprintf (key, sizeof (key), "gfid%d", count);
+
+ gfid_str = gf_strdup (uuid_utoa (gfid));
+ if (!gfid_str) {
+ ret = -1;
+ goto out;
+ }
+
+ ret = dict_set_dynstr (rsp_dict, key, gfid_str);
if (ret) {
- GF_FREE (*op_errstr);
- gf_log ("", GF_LOG_DEBUG,
- "failed to set error message in ctx");
+ gf_log (this->name, GF_LOG_ERROR, "Failed to place "
+ "gfid of %s in dict", backend_path);
+ GF_FREE (gfid_str);
+ goto out;
}
- *op_errstr = NULL;
+ count++;
+ }
+
+ ret = dict_set_int32 (rsp_dict, "count", count);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to set count");
+ goto out;
}
+ ret = 0;
+out:
return ret;
}
int
-glusterd_op_stage_quota (dict_t *dict, char **op_errstr)
+glusterd_op_stage_quota (dict_t *dict, char **op_errstr, dict_t *rsp_dict)
{
- int ret = 0;
- char *volname = NULL;
- gf_boolean_t exists = _gf_false;
- int type = 0;
- dict_t *ctx = NULL;
+ int ret = 0;
+ int type = 0;
+ int i = 0;
+ char *volname = NULL;
+ char *value = NULL;
+ gf_boolean_t exists = _gf_false;
+ dict_t *ctx = NULL;
+ dict_t *tmp_dict = NULL;
+ xlator_t *this = NULL;
+ glusterd_conf_t *priv = NULL;
+ glusterd_volinfo_t *volinfo = NULL;
+
+ struct {
+ int opcode;
+ char *key;
+ } optable[] = {
+ {GF_QUOTA_OPTION_TYPE_ALERT_TIME,
+ "features.alert-time"},
+ {GF_QUOTA_OPTION_TYPE_SOFT_TIMEOUT, "features.soft-timeout"},
+ {GF_QUOTA_OPTION_TYPE_HARD_TIMEOUT, "features.hard-timeout"},
+ {GF_QUOTA_OPTION_TYPE_NONE, NULL}
+ };
+
+ this = THIS;
+ GF_ASSERT (this);
+ priv = this->private;
+ GF_ASSERT (priv);
GF_ASSERT (dict);
GF_ASSERT (op_errstr);
+ tmp_dict = dict_new ();
+ if (!tmp_dict)
+ goto out;
+
ret = dict_get_str (dict, "volname", &volname);
if (ret) {
- gf_log ("", GF_LOG_ERROR, "Unable to get volume name");
+ gf_log (this->name, GF_LOG_ERROR, "Unable to get volume name");
goto out;
}
exists = glusterd_check_volume_exists (volname);
if (!exists) {
- gf_log ("", GF_LOG_ERROR, "Volume with name: %s "
- "does not exist",
- volname);
- *op_errstr = gf_strdup ("Invalid volume name");
+ gf_asprintf (op_errstr, FMTSTR_CHECK_VOL_EXISTS, volname);
+ ret = -1;
+ goto out;
+ }
+ ret = glusterd_volinfo_find (volname, &volinfo);
+ if (ret) {
+ gf_asprintf (op_errstr, FMTSTR_CHECK_VOL_EXISTS, volname);
+ goto out;
+ }
+
+ if (!glusterd_is_volume_started (volinfo)) {
+ *op_errstr = gf_strdup ("Volume is stopped, start volume "
+ "before executing quota command.");
ret = -1;
goto out;
}
ret = dict_get_int32 (dict, "type", &type);
if (ret) {
- gf_log ("", GF_LOG_ERROR, "Unable to get 'type' for quota op");
- *op_errstr = gf_strdup ("Volume quota failed, internal error "
- ", unable to get type of operation");
+ *op_errstr = gf_strdup ("Volume quota failed, internal error, "
+ "unable to get type of operation");
goto out;
}
+ if ((!glusterd_is_volume_quota_enabled (volinfo)) &&
+ (type != GF_QUOTA_OPTION_TYPE_ENABLE)) {
+ *op_errstr = gf_strdup ("Quota is disabled, please enable "
+ "quota");
+ ret = -1;
+ goto out;
+ }
- ctx = glusterd_op_get_ctx();
- if (ctx && (type == GF_QUOTA_OPTION_TYPE_ENABLE
- || type == GF_QUOTA_OPTION_TYPE_LIST)) {
- /* Fuse mount req. only for enable & list-usage options*/
- if (!glusterd_is_fuse_available ()) {
- gf_log ("glusterd", GF_LOG_ERROR, "Unable to open /dev/"
- "fuse (%s), quota command failed",
- strerror (errno));
- *op_errstr = gf_strdup ("Fuse unavailable");
- ret = -1;
- goto out;
- }
+ if ((priv->op_version == GD_OP_VERSION_MIN) &&
+ (type > GF_QUOTA_OPTION_TYPE_VERSION)) {
+ gf_asprintf (op_errstr, "Volume quota failed. The cluster is "
+ "operating at version %d. Quota command"
+ " %s is unavailable in this version.",
+ priv->op_version,
+ gd_quota_op_list[type]);
+ ret = -1;
+ goto out;
}
-out:
- gf_log ("", GF_LOG_DEBUG, "Returning %d", ret);
+ ctx = glusterd_op_get_ctx();
+ if (ctx && (type == GF_QUOTA_OPTION_TYPE_ENABLE
+ || type == GF_QUOTA_OPTION_TYPE_LIST)) {
+ /* Fuse mount req. only for enable & list-usage options*/
+ if (!glusterd_is_fuse_available ()) {
+ *op_errstr = gf_strdup ("Fuse unavailable");
+ ret = -1;
+ goto out;
+ }
+ }
- return ret;
+ switch (type) {
+ case GF_QUOTA_OPTION_TYPE_LIMIT_USAGE:
+ case GF_QUOTA_OPTION_TYPE_REMOVE:
+ ret = glusterd_get_gfid_from_brick (dict, volinfo,
+ rsp_dict,
+ op_errstr);
+ if (ret)
+ goto out;
+ break;
+
+ case GF_QUOTA_OPTION_TYPE_ALERT_TIME:
+ case GF_QUOTA_OPTION_TYPE_SOFT_TIMEOUT:
+ case GF_QUOTA_OPTION_TYPE_HARD_TIMEOUT:
+ ret = dict_get_str (dict, "value", &value);
+ if (ret)
+ goto out;
+
+ for (i = 0; optable[i].key; i++) {
+ if (type == optable[i].opcode)
+ break;
+ }
+ ret = dict_set_str (tmp_dict, optable[i].key, value);
+ if (ret)
+ goto out;
+
+ ret = glusterd_validate_reconfopts (volinfo, tmp_dict,
+ op_errstr);
+ if (ret)
+ goto out;
+ break;
+
+ default:
+ ret = 0;
+ }
+
+ ret = 0;
+
+ out:
+ if (tmp_dict)
+ dict_unref (tmp_dict);
+ if (ret && op_errstr && *op_errstr)
+ gf_log (this->name, GF_LOG_ERROR, "%s", *op_errstr);
+ gf_log (this->name, GF_LOG_DEBUG, "Returning %d", ret);
+
+ return ret;
}
diff --git a/xlators/mgmt/glusterd/src/glusterd-rebalance.c b/xlators/mgmt/glusterd/src/glusterd-rebalance.c
index 214f40c668a..1b6ee293ae3 100644
--- a/xlators/mgmt/glusterd/src/glusterd-rebalance.c
+++ b/xlators/mgmt/glusterd/src/glusterd-rebalance.c
@@ -141,7 +141,7 @@ __glusterd_defrag_notify (struct rpc_clnt *rpc, void *mydata,
}
UNLOCK (&defrag->lock);
- if (!glusterd_is_service_running (pidfile, NULL)) {
+ if (!gf_is_service_running (pidfile, NULL)) {
if (volinfo->rebal.defrag_status ==
GF_DEFRAG_STATUS_STARTED) {
volinfo->rebal.defrag_status =
diff --git a/xlators/mgmt/glusterd/src/glusterd-replace-brick.c b/xlators/mgmt/glusterd/src/glusterd-replace-brick.c
index 29f6a0e0f7c..94b0383fec4 100644
--- a/xlators/mgmt/glusterd/src/glusterd-replace-brick.c
+++ b/xlators/mgmt/glusterd/src/glusterd-replace-brick.c
@@ -450,7 +450,7 @@ glusterd_op_stage_replace_brick (dict_t *dict, char **op_errstr,
GLUSTERD_GET_BRICK_PIDFILE (pidfile, volinfo, src_brickinfo,
priv);
if ((replace_op != GF_REPLACE_OP_COMMIT_FORCE) &&
- !glusterd_is_service_running (pidfile, NULL)) {
+ !gf_is_service_running (pidfile, NULL)) {
snprintf(msg, sizeof(msg), "Source brick %s:%s "
"is not online.", src_brickinfo->hostname,
src_brickinfo->path);
diff --git a/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c b/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c
index 38714a5ebf9..cd81383e921 100644
--- a/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c
+++ b/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c
@@ -96,13 +96,6 @@ glusterd_op_send_cli_response (glusterd_op_t op, int32_t op_ret,
break;
}
- case GD_OP_QUOTA:
- {
- if (ctx && !op_errstr) {
- ret = dict_get_str (ctx, "errstr", &errstr);
- }
- break;
- }
case GD_OP_PROFILE_VOLUME:
{
if (ctx && dict_get_int32 (ctx, "count", &count)) {
@@ -142,6 +135,7 @@ glusterd_op_send_cli_response (glusterd_op_t op, int32_t op_ret,
case GD_OP_LIST_VOLUME:
case GD_OP_CLEARLOCKS_VOLUME:
case GD_OP_HEAL_VOLUME:
+ case GD_OP_QUOTA:
{
/*nothing specific to be done*/
break;
@@ -1521,8 +1515,9 @@ glusterd_brick_op (call_frame_t *frame, xlator_t *this,
continue;
if ((pending_node->type == GD_NODE_NFS) ||
+ (pending_node->type == GD_NODE_QUOTAD) ||
((pending_node->type == GD_NODE_SHD) &&
- (req_ctx->op == GD_OP_STATUS_VOLUME)))
+ (req_ctx->op == GD_OP_STATUS_VOLUME)))
ret = glusterd_node_op_build_payload
(req_ctx->op,
(gd1_mgmt_brick_op_req **)&req,
diff --git a/xlators/mgmt/glusterd/src/glusterd-store.c b/xlators/mgmt/glusterd/src/glusterd-store.c
index 5902589f4b6..5b51aabaddb 100644
--- a/xlators/mgmt/glusterd/src/glusterd-store.c
+++ b/xlators/mgmt/glusterd/src/glusterd-store.c
@@ -698,6 +698,21 @@ glusterd_store_node_state_path_set (glusterd_volinfo_t *volinfo,
GLUSTERD_NODE_STATE_FILE);
}
+static void
+glusterd_store_quota_conf_path_set (glusterd_volinfo_t *volinfo,
+ char *quota_conf_path, size_t len)
+{
+ char voldirpath[PATH_MAX] = {0,};
+ GF_ASSERT (volinfo);
+ GF_ASSERT (quota_conf_path);
+ GF_ASSERT (len <= PATH_MAX);
+
+ glusterd_store_voldirpath_set (volinfo, voldirpath,
+ sizeof (voldirpath));
+ snprintf (quota_conf_path, len, "%s/%s", voldirpath,
+ GLUSTERD_VOLUME_QUOTA_CONFIG);
+}
+
int32_t
glusterd_store_create_rbstate_shandle_on_absence (glusterd_volinfo_t *volinfo)
{
@@ -743,6 +758,22 @@ glusterd_store_create_nodestate_sh_on_absence (glusterd_volinfo_t *volinfo)
}
int32_t
+glusterd_store_create_quota_conf_sh_on_absence (glusterd_volinfo_t *volinfo)
+{
+ char quota_conf_path[PATH_MAX] = {0};
+ int32_t ret = 0;
+
+ GF_ASSERT (volinfo);
+
+ glusterd_store_quota_conf_path_set (volinfo, quota_conf_path,
+ sizeof (quota_conf_path));
+ ret =
+ gf_store_handle_create_on_absence (&volinfo->quota_conf_shandle,
+ quota_conf_path);
+
+ return ret;
+}
+int32_t
glusterd_store_brickinfos (glusterd_volinfo_t *volinfo, int vol_fd)
{
int32_t ret = 0;
@@ -1083,7 +1114,7 @@ glusterd_store_volinfo (glusterd_volinfo_t *volinfo, glusterd_volinfo_ver_ac_t a
goto out;
//checksum should be computed at the end
- ret = glusterd_volume_compute_cksum (volinfo);
+ ret = glusterd_compute_cksum (volinfo, _gf_false);
if (ret)
goto out;
@@ -1897,6 +1928,14 @@ glusterd_store_retrieve_volume (char *volname)
break;
case 1:
+ /*The following strcmp check is to ensure that
+ * glusterd does not restore the quota limits
+ * into volinfo->dict post upgradation from 3.3
+ * to 3.4 as the same limits will now be stored
+ * in xattrs on the respective directories.
+ */
+ if (!strcmp (key, "features.limit-usage"))
+ break;
ret = dict_set_str(volinfo->dict, key,
gf_strdup (value));
if (ret) {
@@ -1971,7 +2010,23 @@ glusterd_store_retrieve_volume (char *volname)
if (ret)
goto out;
- ret = glusterd_volume_compute_cksum (volinfo);
+ ret = glusterd_compute_cksum (volinfo, _gf_false);
+ if (ret)
+ goto out;
+
+ ret = glusterd_store_retrieve_quota_version (volinfo);
+ if (ret)
+ goto out;
+
+ ret = glusterd_store_create_quota_conf_sh_on_absence (volinfo);
+ if (ret)
+ goto out;
+
+ ret = glusterd_compute_cksum (volinfo, _gf_true);
+ if (ret)
+ goto out;
+
+ ret = glusterd_store_save_quota_version_and_cksum (volinfo);
if (ret)
goto out;
@@ -2565,3 +2620,106 @@ out:
gf_log ("", GF_LOG_DEBUG, "Returning %d", ret);
return ret;
}
+
+int
+glusterd_store_retrieve_quota_version (glusterd_volinfo_t *volinfo)
+{
+ int ret = -1;
+ uint32_t version = 0;
+ char cksum_path[PATH_MAX] = {0,};
+ char path[PATH_MAX] = {0,};
+ char *version_str = NULL;
+ char *tmp = NULL;
+ xlator_t *this = NULL;
+ glusterd_conf_t *conf = NULL;
+ gf_store_handle_t *handle = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+ conf = this->private;
+ GF_ASSERT (conf);
+
+ GLUSTERD_GET_VOLUME_DIR (path, volinfo, conf);
+ snprintf (cksum_path, sizeof (cksum_path), "%s/%s", path,
+ GLUSTERD_VOL_QUOTA_CKSUM_FILE);
+
+ ret = gf_store_handle_new (cksum_path, &handle);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Unable to get store handle "
+ "for %s", cksum_path);
+ goto out;
+ }
+
+ ret = gf_store_retrieve_value (handle, "version", &version_str);
+ if (ret) {
+ gf_log (this->name, GF_LOG_DEBUG, "Version absent");
+ ret = 0;
+ goto out;
+ }
+
+ version = strtoul (version_str, &tmp, 10);
+ if (version < 0) {
+ gf_log (this->name, GF_LOG_DEBUG, "Invalid version number");
+ goto out;
+ }
+ volinfo->quota_conf_version = version;
+ ret = 0;
+
+out:
+ if (version_str)
+ GF_FREE (version_str);
+ gf_store_handle_destroy (handle);
+ return ret;
+}
+
+int
+glusterd_store_save_quota_version_and_cksum (glusterd_volinfo_t *volinfo)
+{
+ int ret = -1;
+ char cksum_path[PATH_MAX] = {0,};
+ char path[PATH_MAX] = {0,};
+ xlator_t *this = NULL;
+ glusterd_conf_t *conf = NULL;
+ char buf[256] = {0,};
+ int fd = -1;
+
+ this = THIS;
+ GF_ASSERT (this);
+ conf = this->private;
+ GF_ASSERT (conf);
+
+ GLUSTERD_GET_VOLUME_DIR (path, volinfo, conf);
+ snprintf (cksum_path, sizeof (cksum_path), "%s/%s", path,
+ GLUSTERD_VOL_QUOTA_CKSUM_FILE);
+
+ fd = open (cksum_path, O_RDWR | O_APPEND | O_CREAT| O_TRUNC, 0600);
+
+ if (-1 == fd) {
+ gf_log (this->name, GF_LOG_ERROR, "Unable to open %s,"
+ "Reason: %s", cksum_path, strerror (errno));
+ ret = -1;
+ goto out;
+ }
+
+ snprintf (buf, sizeof (buf)-1, "%u", volinfo->quota_conf_cksum);
+ ret = gf_store_save_value (fd, "cksum", buf);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to store cksum");
+ goto out;
+ }
+
+ memset (buf, 0, sizeof (buf));
+ snprintf (buf, sizeof (buf)-1, "%u", volinfo->quota_conf_version);
+ ret = gf_store_save_value (fd, "version", buf);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to store version");
+ goto out;
+ }
+
+ ret = 0;
+
+out:
+ if (fd != -1)
+ close (fd);
+ return ret;
+}
diff --git a/xlators/mgmt/glusterd/src/glusterd-store.h b/xlators/mgmt/glusterd/src/glusterd-store.h
index ce1f766b1de..fadea8b2f14 100644
--- a/xlators/mgmt/glusterd/src/glusterd-store.h
+++ b/xlators/mgmt/glusterd/src/glusterd-store.h
@@ -128,4 +128,14 @@ glusterd_store_retrieve_options (xlator_t *this);
int32_t
glusterd_store_options (xlator_t *this, dict_t *opts);
+
+int32_t
+glusterd_store_create_quota_conf_sh_on_absence (glusterd_volinfo_t *volinfo);
+
+int
+glusterd_store_retrieve_quota_version (glusterd_volinfo_t *volinfo);
+
+int
+glusterd_store_save_quota_version_and_cksum (glusterd_volinfo_t *volinfo);
+
#endif
diff --git a/xlators/mgmt/glusterd/src/glusterd-syncop.c b/xlators/mgmt/glusterd/src/glusterd-syncop.c
index b3bab6fdcdd..33a865209fd 100644
--- a/xlators/mgmt/glusterd/src/glusterd-syncop.c
+++ b/xlators/mgmt/glusterd/src/glusterd-syncop.c
@@ -261,14 +261,18 @@ glusterd_syncop_aggr_rsp_dict (glusterd_op_t op, dict_t *aggr, dict_t *rsp)
break;
- case GD_OP_QUOTA:
case GD_OP_CLEARLOCKS_VOLUME:
ret = glusterd_use_rsp_dict (aggr, rsp);
if (ret)
goto out;
-
break;
+ case GD_OP_QUOTA:
+ ret = glusterd_volume_quota_copy_to_op_ctx_dict (aggr, rsp);
+ if (ret)
+ goto out;
+ break;
+
case GD_OP_SYS_EXEC:
ret = glusterd_sys_exec_output_rsp_dict (aggr, rsp);
if (ret)
@@ -474,7 +478,7 @@ _gd_syncop_stage_op_cbk (struct rpc_req *req, struct iovec *iov,
}
uuid_copy (args->uuid, rsp.uuid);
- if (rsp.op == GD_OP_REPLACE_BRICK) {
+ if (rsp.op == GD_OP_REPLACE_BRICK || rsp.op == GD_OP_QUOTA) {
pthread_mutex_lock (&args->lock_dict);
{
ret = glusterd_syncop_aggr_rsp_dict (rsp.op, args->dict,
@@ -624,8 +628,8 @@ gd_syncop_mgmt_brick_op (struct rpc_clnt *rpc, glusterd_pending_node_t *pnode,
args.op_errno = ENOTCONN;
if ((pnode->type == GD_NODE_NFS) ||
- ((pnode->type == GD_NODE_SHD) &&
- (op == GD_OP_STATUS_VOLUME))) {
+ (pnode->type == GD_NODE_QUOTAD) ||
+ ((pnode->type == GD_NODE_SHD) && (op == GD_OP_STATUS_VOLUME))) {
ret = glusterd_node_op_build_payload
(op, &req, dict_out);
@@ -683,6 +687,7 @@ _gd_syncop_commit_op_cbk (struct rpc_req *req, struct iovec *iov,
glusterd_peerinfo_t *peerinfo = NULL;
int op_ret = -1;
int op_errno = -1;
+ int type = GF_QUOTA_OPTION_TYPE_NONE;
this = THIS;
frame = myframe;
@@ -725,16 +730,27 @@ _gd_syncop_commit_op_cbk (struct rpc_req *req, struct iovec *iov,
}
uuid_copy (args->uuid, rsp.uuid);
- pthread_mutex_lock (&args->lock_dict);
- {
- ret = glusterd_syncop_aggr_rsp_dict (rsp.op, args->dict,
- rsp_dict);
- if (ret)
- gf_log (this->name, GF_LOG_ERROR, "%s",
- "Failed to aggregate response from "
- " node/brick");
+ if (rsp.op == GD_OP_QUOTA) {
+ ret = dict_get_int32 (args->dict, "type", &type);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to get "
+ "opcode");
+ goto out;
+ }
+ }
+
+ if ((rsp.op != GD_OP_QUOTA) || (type == GF_QUOTA_OPTION_TYPE_LIST)) {
+ pthread_mutex_lock (&args->lock_dict);
+ {
+ ret = glusterd_syncop_aggr_rsp_dict (rsp.op, args->dict,
+ rsp_dict);
+ if (ret)
+ gf_log (this->name, GF_LOG_ERROR, "%s",
+ "Failed to aggregate response from "
+ " node/brick");
+ }
+ pthread_mutex_unlock (&args->lock_dict);
}
- pthread_mutex_unlock (&args->lock_dict);
op_ret = rsp.op_ret;
op_errno = rsp.op_errno;
@@ -878,7 +894,7 @@ gd_stage_op_phase (struct list_head *peers, glusterd_op_t op, dict_t *op_ctx,
goto stage_done;
}
- if ((op == GD_OP_REPLACE_BRICK)) {
+ if ((op == GD_OP_REPLACE_BRICK || op == GD_OP_QUOTA)) {
ret = glusterd_syncop_aggr_rsp_dict (op, op_ctx, rsp_dict);
if (ret) {
gf_log (this->name, GF_LOG_ERROR, "%s",
@@ -913,6 +929,10 @@ stage_done:
op, req_dict, op_ctx);
peer_cnt++;
}
+
+ gf_log (this->name, GF_LOG_DEBUG, "Sent stage op req for 'Volume %s' "
+ "to %d peers", gd_op_list[op], peer_cnt);
+
gd_synctask_barrier_wait((&args), peer_cnt);
if (args.errstr)
@@ -922,9 +942,14 @@ stage_done:
ret = args.op_ret;
- gf_log (this->name, GF_LOG_DEBUG, "Sent stage op req for 'Volume %s' "
- "to %d peers", gd_op_list[op], peer_cnt);
out:
+ if ((ret == 0) && (op == GD_OP_QUOTA)) {
+ ret = glusterd_validate_and_set_gfid (op_ctx, req_dict,
+ op_errstr);
+ if (ret)
+ goto out;
+ }
+
if (rsp_dict)
dict_unref (rsp_dict);
return ret;
@@ -943,6 +968,7 @@ gd_commit_op_phase (struct list_head *peers, glusterd_op_t op, dict_t *op_ctx,
uuid_t tmp_uuid = {0};
char *errstr = NULL;
struct syncargs args = {0};
+ int type = GF_QUOTA_OPTION_TYPE_NONE;
this = THIS;
rsp_dict = dict_new ();
@@ -956,15 +982,28 @@ gd_commit_op_phase (struct list_head *peers, glusterd_op_t op, dict_t *op_ctx,
hostname = "localhost";
goto commit_done;
}
- if (op != GD_OP_SYNC_VOLUME) {
- ret = glusterd_syncop_aggr_rsp_dict (op, op_ctx, rsp_dict);
+
+ if (op == GD_OP_QUOTA) {
+ ret = dict_get_int32 (op_ctx, "type", &type);
if (ret) {
- gf_log (this->name, GF_LOG_ERROR, "%s",
- "Failed to aggregate response "
- "from node/brick");
+ gf_log (this->name, GF_LOG_ERROR, "Failed to get "
+ "opcode");
goto out;
}
}
+
+ if (((op == GD_OP_QUOTA) && (type == GF_QUOTA_OPTION_TYPE_LIST)) ||
+ ((op != GD_OP_SYNC_VOLUME) && (op != GD_OP_QUOTA))) {
+
+ ret = glusterd_syncop_aggr_rsp_dict (op, op_ctx,
+ rsp_dict);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "%s", "Failed to aggregate "
+ "response from node/brick");
+ goto out;
+ }
+ }
+
dict_unref (rsp_dict);
rsp_dict = NULL;
diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.c b/xlators/mgmt/glusterd/src/glusterd-utils.c
index e093a566b09..c45f2445c34 100644
--- a/xlators/mgmt/glusterd/src/glusterd-utils.c
+++ b/xlators/mgmt/glusterd/src/glusterd-utils.c
@@ -565,6 +565,8 @@ glusterd_volinfo_delete (glusterd_volinfo_t *volinfo)
dict_unref (volinfo->gsync_slaves);
GF_FREE (volinfo->logdir);
+ gf_store_handle_destroy (volinfo->quota_conf_shandle);
+
glusterd_auth_cleanup (volinfo);
GF_FREE (volinfo);
@@ -1105,7 +1107,7 @@ glusterd_service_stop (const char *service, char *pidfile, int sig,
this = THIS;
GF_ASSERT (this);
- if (!glusterd_is_service_running (pidfile, &pid)) {
+ if (!gf_is_service_running (pidfile, &pid)) {
ret = 0;
gf_log (this->name, GF_LOG_INFO, "%s already stopped", service);
goto out;
@@ -1118,7 +1120,7 @@ glusterd_service_stop (const char *service, char *pidfile, int sig,
goto out;
sleep (1);
- if (glusterd_is_service_running (pidfile, NULL)) {
+ if (gf_is_service_running (pidfile, NULL)) {
ret = kill (pid, SIGKILL);
if (ret) {
gf_log (this->name, GF_LOG_ERROR, "Unable to "
@@ -1284,7 +1286,7 @@ glusterd_volume_start_glusterfs (glusterd_volinfo_t *volinfo,
if (ret)
goto out;
GLUSTERD_GET_BRICK_PIDFILE (pidfile, volinfo, brickinfo, priv);
- if (glusterd_is_service_running (pidfile, NULL))
+ if (gf_is_service_running (pidfile, NULL))
goto connect;
_reap_brick_process (pidfile, brickinfo->path);
@@ -1617,89 +1619,85 @@ glusterd_sort_and_redirect (const char *src_filepath, int dest_fd)
}
int
-glusterd_volume_compute_cksum (glusterd_volinfo_t *volinfo)
-{
- int32_t ret = -1;
- glusterd_conf_t *priv = NULL;
- char path[PATH_MAX] = {0,};
- char cksum_path[PATH_MAX] = {0,};
- char filepath[PATH_MAX] = {0,};
- int fd = -1;
- uint32_t cksum = 0;
- char buf[4096] = {0,};
+glusterd_volume_compute_cksum (glusterd_volinfo_t *volinfo, char *cksum_path,
+ char *filepath, gf_boolean_t is_quota_conf,
+ uint32_t *cs)
+{
+ int32_t ret = -1;
+ uint32_t cksum = 0;
+ int fd = -1;
+ int sort_fd = 0;
char sort_filepath[PATH_MAX] = {0};
- gf_boolean_t unlink_sortfile = _gf_false;
- int sort_fd = 0;
- xlator_t *this = NULL;
+ char *cksum_path_final = NULL;
+ char buf[4096] = {0,};
+ gf_boolean_t unlink_sortfile = _gf_false;
+ glusterd_conf_t *priv = NULL;
+ xlator_t *this = NULL;
GF_ASSERT (volinfo);
this = THIS;
priv = THIS->private;
GF_ASSERT (priv);
- GLUSTERD_GET_VOLUME_DIR (path, volinfo, priv);
-
- snprintf (cksum_path, sizeof (cksum_path), "%s/%s",
- path, GLUSTERD_CKSUM_FILE);
-
fd = open (cksum_path, O_RDWR | O_APPEND | O_CREAT| O_TRUNC, 0600);
if (-1 == fd) {
- gf_log (this->name, GF_LOG_ERROR, "Unable to open %s, errno: %d",
- cksum_path, errno);
+ gf_log (this->name, GF_LOG_ERROR, "Unable to open %s,"
+ " errno: %d", cksum_path, errno);
ret = -1;
goto out;
}
- snprintf (filepath, sizeof (filepath), "%s/%s", path,
- GLUSTERD_VOLUME_INFO_FILE);
- snprintf (sort_filepath, sizeof (sort_filepath), "/tmp/%s.XXXXXX",
- volinfo->volname);
+ if (!is_quota_conf) {
+ snprintf (sort_filepath, sizeof (sort_filepath),
+ "/tmp/%s.XXXXXX", volinfo->volname);
- sort_fd = mkstemp (sort_filepath);
- if (sort_fd < 0) {
- gf_log (this->name, GF_LOG_ERROR, "Could not generate temp "
- "file, reason: %s for volume: %s", strerror (errno),
- volinfo->volname);
- goto out;
- } else {
- unlink_sortfile = _gf_true;
- }
+ sort_fd = mkstemp (sort_filepath);
+ if (sort_fd < 0) {
+ gf_log (this->name, GF_LOG_ERROR, "Could not generate "
+ "temp file, reason: %s for volume: %s",
+ strerror (errno), volinfo->volname);
+ goto out;
+ } else {
+ unlink_sortfile = _gf_true;
+ }
- /* sort the info file, result in sort_filepath */
+ /* sort the info file, result in sort_filepath */
- ret = glusterd_sort_and_redirect (filepath, sort_fd);
- if (ret) {
- gf_log (this->name, GF_LOG_ERROR, "sorting info file failed");
- goto out;
- }
+ ret = glusterd_sort_and_redirect (filepath, sort_fd);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "sorting info file "
+ "failed");
+ goto out;
+ }
- ret = close (sort_fd);
- if (ret)
- goto out;
+ ret = close (sort_fd);
+ if (ret)
+ goto out;
+ }
- ret = get_checksum_for_path (sort_filepath, &cksum);
+ cksum_path_final = is_quota_conf ? filepath : sort_filepath;
+ ret = get_checksum_for_path (cksum_path_final, &cksum);
if (ret) {
- gf_log (this->name, GF_LOG_ERROR, "Unable to get checksum"
- " for path: %s", sort_filepath);
+ gf_log (this->name, GF_LOG_ERROR, "unable to get "
+ "checksum for path: %s", cksum_path_final);
goto out;
}
-
- snprintf (buf, sizeof (buf), "%s=%u\n", "info", cksum);
- ret = write (fd, buf, strlen (buf));
-
- if (ret <= 0) {
- ret = -1;
- goto out;
+ if (!is_quota_conf) {
+ snprintf (buf, sizeof (buf), "%s=%u\n", "info", cksum);
+ ret = write (fd, buf, strlen (buf));
+ if (ret <= 0) {
+ ret = -1;
+ goto out;
+ }
}
ret = get_checksum_for_file (fd, &cksum);
-
if (ret)
goto out;
- volinfo->cksum = cksum;
+ *cs = cksum;
out:
if (fd > 0)
@@ -1711,6 +1709,54 @@ out:
return ret;
}
+int glusterd_compute_cksum (glusterd_volinfo_t *volinfo,
+ gf_boolean_t is_quota_conf)
+{
+ int ret = -1;
+ uint32_t cs = 0;
+ char cksum_path[PATH_MAX] = {0,};
+ char path[PATH_MAX] = {0,};
+ char filepath[PATH_MAX] = {0,};
+ glusterd_conf_t *conf = NULL;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+ conf = this->private;
+ GF_ASSERT (conf);
+
+ GLUSTERD_GET_VOLUME_DIR (path, volinfo, conf);
+
+ if (is_quota_conf) {
+ snprintf (cksum_path, sizeof (cksum_path), "%s/%s", path,
+ GLUSTERD_VOL_QUOTA_CKSUM_FILE);
+ snprintf (filepath, sizeof (filepath), "%s/%s", path,
+ GLUSTERD_VOLUME_QUOTA_CONFIG);
+ } else {
+ snprintf (cksum_path, sizeof (cksum_path), "%s/%s", path,
+ GLUSTERD_CKSUM_FILE);
+ snprintf (filepath, sizeof (filepath), "%s/%s", path,
+ GLUSTERD_VOLUME_INFO_FILE);
+ }
+
+ ret = glusterd_volume_compute_cksum (volinfo, cksum_path, filepath,
+ is_quota_conf, &cs);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to compute checksum "
+ "for volume %s", volinfo->volname);
+ goto out;
+ }
+
+ if (is_quota_conf)
+ volinfo->quota_conf_cksum = cs;
+ else
+ volinfo->cksum = cs;
+
+ ret = 0;
+out:
+ return ret;
+}
+
int
_add_dict_to_prdict (dict_t *this, char *key, data_t *value, void *data)
{
@@ -2019,6 +2065,93 @@ out:
return ret;
}
+int
+glusterd_vol_add_quota_conf_to_dict (glusterd_volinfo_t *volinfo, dict_t* load,
+ int vol_idx)
+{
+ int fd = -1;
+ char *gfid_str = NULL;
+ unsigned char buf[16] = {0};
+ char key[PATH_MAX] = {0};
+ int gfid_idx = 0;
+ int ret = -1;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+
+ ret = glusterd_store_create_quota_conf_sh_on_absence (volinfo);
+ if (ret)
+ goto out;
+
+ fd = open (volinfo->quota_conf_shandle->path, O_RDONLY);
+ if (fd == -1) {
+ ret = -1;
+ goto out;
+ }
+
+ ret = glusterd_store_quota_conf_skip_header (this, fd);
+ if (ret)
+ goto out;
+
+ for (gfid_idx=0; ; gfid_idx++) {
+
+ ret = read (fd, (void*)&buf, 16) ;
+ if (ret <= 0) {
+ //Finished reading all entries in the conf file
+ break;
+ }
+ if (ret != 16) {
+ //This should never happen. We must have a multiple of
+ //entry_sz bytes in our configuration file.
+ gf_log (this->name, GF_LOG_CRITICAL, "Quota "
+ "configuration store may be corrupt.");
+ goto out;
+ }
+
+ gfid_str = gf_strdup (uuid_utoa (buf));
+ if (!gfid_str) {
+ ret = -1;
+ goto out;
+ }
+
+ snprintf (key, sizeof(key)-1, "volume%d.gfid%d", vol_idx,
+ gfid_idx);
+ key[sizeof(key)-1] = '\0';
+ ret = dict_set_dynstr (load, key, gfid_str);
+ if (ret) {
+ goto out;
+ }
+
+ gfid_str = NULL;
+ }
+
+ snprintf (key, sizeof(key)-1, "volume%d.gfid-count", vol_idx);
+ key[sizeof(key)-1] = '\0';
+ ret = dict_set_int32 (load, key, gfid_idx);
+ if (ret)
+ goto out;
+
+ snprintf (key, sizeof(key)-1, "volume%d.quota-cksum", vol_idx);
+ key[sizeof(key)-1] = '\0';
+ ret = dict_set_uint32 (load, key, volinfo->quota_conf_cksum);
+ if (ret)
+ goto out;
+
+ snprintf (key, sizeof(key)-1, "volume%d.quota-version", vol_idx);
+ key[sizeof(key)-1] = '\0';
+ ret = dict_set_uint32 (load, key, volinfo->quota_conf_version);
+ if (ret)
+ goto out;
+
+ ret = 0;
+out:
+ if (fd != -1)
+ close (fd);
+ GF_FREE (gfid_str);
+ return ret;
+}
+
int32_t
glusterd_build_volume_dict (dict_t **vols)
{
@@ -2041,6 +2174,11 @@ glusterd_build_volume_dict (dict_t **vols)
ret = glusterd_add_volume_to_dict (volinfo, dict, count);
if (ret)
goto out;
+ if (!glusterd_is_volume_quota_enabled (volinfo))
+ continue;
+ ret = glusterd_vol_add_quota_conf_to_dict (volinfo, dict, count);
+ if (ret)
+ goto out;
}
@@ -2078,11 +2216,17 @@ glusterd_compare_friend_volume (dict_t *vols, int32_t count, int32_t *status,
glusterd_volinfo_t *volinfo = NULL;
char *volname = NULL;
uint32_t cksum = 0;
+ uint32_t quota_cksum = 0;
+ uint32_t quota_version = 0;
int32_t version = 0;
+ xlator_t *this = NULL;
GF_ASSERT (vols);
GF_ASSERT (status);
+ this = THIS;
+ GF_ASSERT (this);
+
snprintf (key, sizeof (key), "volume%d.name", count);
ret = dict_get_str (vols, key, &volname);
if (ret)
@@ -2105,7 +2249,7 @@ glusterd_compare_friend_volume (dict_t *vols, int32_t count, int32_t *status,
if (version > volinfo->version) {
//Mismatch detected
ret = 0;
- gf_log ("", GF_LOG_ERROR, "Version of volume %s differ."
+ gf_log (this->name, GF_LOG_ERROR, "Version of volume %s differ."
"local version = %d, remote version = %d on peer %s",
volinfo->volname, volinfo->version, version, hostname);
*status = GLUSTERD_VOL_COMP_UPDATE_REQ;
@@ -2125,17 +2269,66 @@ glusterd_compare_friend_volume (dict_t *vols, int32_t count, int32_t *status,
if (cksum != volinfo->cksum) {
ret = 0;
- gf_log ("", GF_LOG_ERROR, "Cksums of volume %s differ."
+ gf_log (this->name, GF_LOG_ERROR, "Cksums of volume %s differ."
" local cksum = %u, remote cksum = %u on peer %s",
volinfo->volname, volinfo->cksum, cksum, hostname);
*status = GLUSTERD_VOL_COMP_RJT;
goto out;
}
+ memset (key, 0, sizeof (key));
+ snprintf (key, sizeof (key), "volume%d.quota-version", count);
+ ret = dict_get_uint32 (vols, key, &quota_version);
+ if (ret) {
+ gf_log (this->name, GF_LOG_DEBUG, "quota-version key absent for"
+ " volume %s in peer %s's response", volinfo->volname,
+ hostname);
+ ret = 0;
+ } else {
+ if (quota_version > volinfo->quota_conf_version) {
+ //Mismatch detected
+ ret = 0;
+ gf_log (this->name, GF_LOG_ERROR, "Quota configuration "
+ "versions of volume %s differ. "
+ "local version = %d, remote version = %d "
+ "on peer %s", volinfo->volname,
+ volinfo->quota_conf_version, quota_version,
+ hostname);
+ *status = GLUSTERD_VOL_COMP_UPDATE_REQ;
+ goto out;
+ } else if (quota_version < volinfo->quota_conf_version) {
+ *status = GLUSTERD_VOL_COMP_SCS;
+ goto out;
+ }
+ }
+
+ //Now, versions are same, compare cksums.
+ //
+ memset (key, 0, sizeof (key));
+ snprintf (key, sizeof (key), "volume%d.quota-cksum", count);
+ ret = dict_get_uint32 (vols, key, &quota_cksum);
+ if (ret) {
+ gf_log (this->name, GF_LOG_DEBUG, "quota checksum absent for "
+ "volume %s in peer %s's response", volinfo->volname,
+ hostname);
+ ret = 0;
+ } else {
+ if (quota_cksum != volinfo->quota_conf_cksum) {
+ ret = 0;
+ gf_log (this->name, GF_LOG_ERROR, "Cksums of quota "
+ "configurations of volume %s differ. "
+ "local cksum = %u, remote cksum = %u on "
+ "peer %s", volinfo->volname,
+ volinfo->quota_conf_cksum, quota_cksum,
+ hostname);
+ *status = GLUSTERD_VOL_COMP_RJT;
+ goto out;
+ }
+ }
*status = GLUSTERD_VOL_COMP_SCS;
out:
- gf_log ("", GF_LOG_DEBUG, "Returning with ret: %d, status: %d",
+ gf_log (this->name, GF_LOG_DEBUG, "Returning with ret: %d, status: %d",
ret, *status);
return ret;
}
@@ -2582,6 +2775,113 @@ out:
return ret;
}
+static int
+glusterd_import_quota_conf (dict_t *vols, int vol_idx,
+ glusterd_volinfo_t *new_volinfo)
+{
+ int gfid_idx = 0;
+ int gfid_count = 0;
+ int ret = -1;
+ int fd = -1;
+ char key[PATH_MAX] = {0};
+ char *gfid_str = NULL;
+ uuid_t gfid = {0,};
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+
+ if (!glusterd_is_volume_quota_enabled (new_volinfo)) {
+ (void) glusterd_clean_up_quota_store (new_volinfo);
+ return 0;
+ }
+
+ ret = glusterd_store_create_quota_conf_sh_on_absence (new_volinfo);
+ if (ret)
+ goto out;
+
+ fd = gf_store_mkstemp (new_volinfo->quota_conf_shandle);
+ if (fd < 0) {
+ ret = -1;
+ goto out;
+ }
+
+ snprintf (key, sizeof (key)-1, "volume%d.quota-cksum", vol_idx);
+ key[sizeof(key)-1] = '\0';
+ ret = dict_get_uint32 (vols, key, &new_volinfo->quota_conf_cksum);
+ if (ret)
+ gf_log (this->name, GF_LOG_DEBUG, "Failed to get quota cksum");
+
+ snprintf (key, sizeof (key)-1, "volume%d.quota-version", vol_idx);
+ key[sizeof(key)-1] = '\0';
+ ret = dict_get_uint32 (vols, key, &new_volinfo->quota_conf_version);
+ if (ret)
+ gf_log (this->name, GF_LOG_DEBUG, "Failed to get quota "
+ "version");
+
+ snprintf (key, sizeof (key)-1, "volume%d.gfid-count", vol_idx);
+ key[sizeof(key)-1] = '\0';
+ ret = dict_get_int32 (vols, key, &gfid_count);
+ if (ret)
+ goto out;
+
+ ret = glusterd_store_quota_conf_stamp_header (this, fd);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to add header to tmp "
+ "file");
+ goto out;
+ }
+
+ gfid_idx = 0;
+ for (gfid_idx = 0; gfid_idx < gfid_count; gfid_idx++) {
+
+ snprintf (key, sizeof (key)-1, "volume%d.gfid%d",
+ vol_idx, gfid_idx);
+ key[sizeof(key)-1] = '\0';
+ ret = dict_get_str (vols, key, &gfid_str);
+ if (ret)
+ goto out;
+
+ uuid_parse (gfid_str, gfid);
+ ret = write (fd, (void*)gfid, 16);
+ if (ret != 16) {
+ gf_log (this->name, GF_LOG_CRITICAL, "Unable to write "
+ "gfid %s into quota.conf for %s", gfid_str,
+ new_volinfo->volname);
+ ret = -1;
+ goto out;
+ }
+
+ }
+
+ ret = gf_store_rename_tmppath (new_volinfo->quota_conf_shandle);
+
+ ret = 0;
+
+out:
+ if (fd != -1)
+ close (fd);
+
+ if (!ret) {
+ ret = glusterd_compute_cksum (new_volinfo, _gf_true);
+ if (ret)
+ goto out;
+
+ ret = glusterd_store_save_quota_version_and_cksum (new_volinfo);
+ if (ret)
+ goto out;
+ }
+
+ if (ret && (fd > 0)) {
+ gf_store_unlink_tmppath (new_volinfo->quota_conf_shandle);
+ (void) gf_store_handle_destroy
+ (new_volinfo->quota_conf_shandle);
+ new_volinfo->quota_conf_shandle = NULL;
+ }
+
+ return ret;
+}
+
int32_t
glusterd_import_volinfo (dict_t *vols, int count,
glusterd_volinfo_t **volinfo)
@@ -2863,6 +3163,7 @@ glusterd_import_volinfo (dict_t *vols, int count,
ret = glusterd_import_bricks (vols, count, new_volinfo);
if (ret)
goto out;
+
*volinfo = new_volinfo;
out:
if (msg[0])
@@ -3032,6 +3333,10 @@ glusterd_import_friend_volume (dict_t *vols, size_t count)
if (ret)
goto out;
+ ret = glusterd_import_quota_conf (vols, count, new_volinfo);
+ if (ret)
+ goto out;
+
list_add_tail (&new_volinfo->vol_list, &priv->volumes);
out:
gf_log ("", GF_LOG_DEBUG, "Returning with ret: %d", ret);
@@ -3161,6 +3466,7 @@ glusterd_compare_friend_data (dict_t *vols, int32_t *status, char *hostname)
gf_boolean_t update = _gf_false;
gf_boolean_t stale_nfs = _gf_false;
gf_boolean_t stale_shd = _gf_false;
+ gf_boolean_t stale_qd = _gf_false;
GF_ASSERT (vols);
GF_ASSERT (status);
@@ -3190,6 +3496,8 @@ glusterd_compare_friend_data (dict_t *vols, int32_t *status, char *hostname)
stale_nfs = _gf_true;
if (glusterd_is_nodesvc_running ("glustershd"))
stale_shd = _gf_true;
+ if (glusterd_is_nodesvc_running ("quotad"))
+ stale_qd = _gf_true;
ret = glusterd_import_global_opts (vols);
if (ret)
goto out;
@@ -3203,6 +3511,8 @@ glusterd_compare_friend_data (dict_t *vols, int32_t *status, char *hostname)
glusterd_nfs_server_stop ();
if (stale_shd)
glusterd_shd_stop ();
+ if (stale_qd)
+ glusterd_quotad_stop ();
}
}
@@ -3213,40 +3523,6 @@ out:
return ret;
}
-/* Valid only in if service is 'local' to glusterd.
- * pid can be -1, if reading pidfile failed */
-gf_boolean_t
-glusterd_is_service_running (char *pidfile, int *pid)
-{
- FILE *file = NULL;
- gf_boolean_t running = _gf_false;
- int ret = 0;
- int fno = 0;
-
- file = fopen (pidfile, "r+");
- if (!file)
- goto out;
-
- fno = fileno (file);
- ret = lockf (fno, F_TEST, 0);
- if (ret == -1)
- running = _gf_true;
- if (!pid)
- goto out;
-
- ret = fscanf (file, "%d", pid);
- if (ret <= 0) {
- gf_log ("", GF_LOG_ERROR, "Unable to read pidfile: %s, %s",
- pidfile, strerror (errno));
- *pid = -1;
- }
-
-out:
- if (file)
- fclose (file);
- return running;
-}
-
void
glusterd_get_nodesvc_dir (char *server, char *workdir,
char *path, size_t len)
@@ -3285,7 +3561,10 @@ glusterd_get_nodesvc_volfile (char *server, char *workdir,
GF_ASSERT (len == PATH_MAX);
glusterd_get_nodesvc_dir (server, workdir, dir, sizeof (dir));
- snprintf (volfile, len, "%s/%s-server.vol", dir, server);
+ if (strcmp ("quotad", server) != 0)
+ snprintf (volfile, len, "%s/%s-server.vol", dir, server);
+ else
+ snprintf (volfile, len, "%s/%s.vol", dir, server);
}
void
@@ -3298,11 +3577,14 @@ glusterd_nodesvc_set_online_status (char *server, gf_boolean_t status)
GF_ASSERT (priv);
GF_ASSERT (priv->shd);
GF_ASSERT (priv->nfs);
+ GF_ASSERT (priv->quotad);
if (!strcmp("glustershd", server))
priv->shd->online = status;
else if (!strcmp ("nfs", server))
priv->nfs->online = status;
+ else if (!strcmp ("quotad", server))
+ priv->quotad->online = status;
}
gf_boolean_t
@@ -3316,11 +3598,14 @@ glusterd_is_nodesvc_online (char *server)
GF_ASSERT (conf);
GF_ASSERT (conf->shd);
GF_ASSERT (conf->nfs);
+ GF_ASSERT (conf->quotad);
if (!strcmp (server, "glustershd"))
online = conf->shd->online;
else if (!strcmp (server, "nfs"))
online = conf->nfs->online;
+ else if (!strcmp (server, "quotad"))
+ online = conf->quotad->online;
return online;
}
@@ -3346,6 +3631,7 @@ glusterd_pending_node_get_rpc (glusterd_pending_node_t *pending_node)
nodesrv_t *shd = NULL;
glusterd_volinfo_t *volinfo = NULL;
nodesrv_t *nfs = NULL;
+ nodesrv_t *quotad = NULL;
GF_VALIDATE_OR_GOTO (THIS->name, pending_node, out);
GF_VALIDATE_OR_GOTO (THIS->name, pending_node->node, out);
@@ -3367,6 +3653,10 @@ glusterd_pending_node_get_rpc (glusterd_pending_node_t *pending_node)
nfs = pending_node->node;
rpc = nfs->rpc;
+ } else if (pending_node->type == GD_NODE_QUOTAD) {
+ quotad = pending_node->node;
+ rpc = quotad->rpc;
+
} else {
GF_ASSERT (0);
}
@@ -3386,11 +3676,14 @@ glusterd_nodesvc_get_rpc (char *server)
GF_ASSERT (priv);
GF_ASSERT (priv->shd);
GF_ASSERT (priv->nfs);
+ GF_ASSERT (priv->quotad);
if (!strcmp (server, "glustershd"))
rpc = priv->shd->rpc;
else if (!strcmp (server, "nfs"))
rpc = priv->nfs->rpc;
+ else if (!strcmp (server, "quotad"))
+ rpc = priv->quotad->rpc;
return rpc;
}
@@ -3408,11 +3701,14 @@ glusterd_nodesvc_set_rpc (char *server, struct rpc_clnt *rpc)
GF_ASSERT (priv);
GF_ASSERT (priv->shd);
GF_ASSERT (priv->nfs);
+ GF_ASSERT (priv->quotad);
if (!strcmp ("glustershd", server))
priv->shd->rpc = rpc;
else if (!strcmp ("nfs", server))
priv->nfs->rpc = rpc;
+ else if (!strcmp ("quotad", server))
+ priv->quotad->rpc = rpc;
return ret;
}
@@ -3539,6 +3835,14 @@ glusterd_nodesvc_start (char *server)
runner_add_args (&runner, "--xlator-option",
glusterd_uuid_option, NULL);
}
+ if (!strcmp (server, "quotad")) {
+ runner_add_args (&runner, "--xlator-option",
+ "*replicate*.data-self-heal=off",
+ "--xlator-option",
+ "*replicate*.metadata-self-heal=off",
+ "--xlator-option",
+ "*replicate*.entry-self-heal=off", NULL);
+ }
runner_log (&runner, "", GF_LOG_DEBUG,
"Starting the nfs/glustershd services");
@@ -3562,6 +3866,12 @@ glusterd_shd_start ()
return glusterd_nodesvc_start ("glustershd");
}
+int
+glusterd_quotad_start ()
+{
+ return glusterd_nodesvc_start ("quotad");
+}
+
gf_boolean_t
glusterd_is_nodesvc_running (char *server)
{
@@ -3570,7 +3880,7 @@ glusterd_is_nodesvc_running (char *server)
glusterd_get_nodesvc_pidfile (server, priv->workdir,
pidfile, sizeof (pidfile));
- return glusterd_is_service_running (pidfile, NULL);
+ return gf_is_service_running (pidfile, NULL);
}
int32_t
@@ -3680,6 +3990,12 @@ glusterd_shd_stop ()
}
int
+glusterd_quotad_stop ()
+{
+ return glusterd_nodesvc_stop ("quotad", SIGTERM);
+}
+
+int
glusterd_add_node_to_dict (char *server, dict_t *dict, int count,
dict_t *vol_opts)
{
@@ -3695,7 +4011,7 @@ glusterd_add_node_to_dict (char *server, dict_t *dict, int count,
sizeof (pidfile));
//Consider service to be running only when glusterd sees it Online
if (glusterd_is_nodesvc_online (server))
- running = glusterd_is_service_running (pidfile, &pid);
+ running = gf_is_service_running (pidfile, &pid);
/* For nfs-servers/self-heal-daemon setting
* brick<n>.hostname = "NFS Server" / "Self-heal Daemon"
@@ -3711,6 +4027,8 @@ glusterd_add_node_to_dict (char *server, dict_t *dict, int count,
ret = dict_set_str (dict, key, "NFS Server");
else if (!strcmp (server, "glustershd"))
ret = dict_set_str (dict, key, "Self-heal Daemon");
+ else if (!strcmp (server, "quotad"))
+ ret = dict_set_str (dict, key, "Quota Daemon");
if (ret)
goto out;
@@ -3829,6 +4147,12 @@ glusterd_reconfigure_shd ()
}
int
+glusterd_reconfigure_quotad ()
+{
+ return glusterd_reconfigure_nodesvc (glusterd_create_quotad_volfile);
+}
+
+int
glusterd_reconfigure_nfs ()
{
int ret = -1;
@@ -3905,21 +4229,52 @@ glusterd_check_generate_start_shd ()
}
int
-glusterd_nodesvcs_batch_op (glusterd_volinfo_t *volinfo,
- int (*nfs_op) (), int (*shd_op) ())
+glusterd_check_generate_start_quotad ()
{
+ int ret = 0;
+
+ ret = glusterd_check_generate_start_service (glusterd_create_quotad_volfile,
+ glusterd_quotad_stop,
+ glusterd_quotad_start);
+ if (ret == -EINVAL)
+ ret = 0;
+ return ret;
+}
+
+int
+glusterd_nodesvcs_batch_op (glusterd_volinfo_t *volinfo, int (*nfs_op) (),
+ int (*shd_op) (), int (*qd_op) ())
+ {
int ret = 0;
+ xlator_t *this = THIS;
+ glusterd_conf_t *conf = NULL;
+
+ GF_ASSERT (this);
+ conf = this->private;
+ GF_ASSERT (conf);
ret = nfs_op ();
if (ret)
goto out;
- if (volinfo && !glusterd_is_volume_replicate (volinfo))
+ if (volinfo && !glusterd_is_volume_replicate (volinfo)) {
+ ; //do nothing
+ } else {
+ ret = shd_op ();
+ if (ret)
+ goto out;
+ }
+
+ if (conf->op_version == GD_OP_VERSION_MIN)
goto out;
- ret = shd_op ();
+ if (volinfo && !glusterd_is_volume_quota_enabled (volinfo))
+ goto out;
+
+ ret = qd_op ();
if (ret)
goto out;
+
out:
return ret;
}
@@ -3929,7 +4284,8 @@ glusterd_nodesvcs_start (glusterd_volinfo_t *volinfo)
{
return glusterd_nodesvcs_batch_op (volinfo,
glusterd_nfs_server_start,
- glusterd_shd_start);
+ glusterd_shd_start,
+ glusterd_quotad_start);
}
int
@@ -3937,7 +4293,8 @@ glusterd_nodesvcs_stop (glusterd_volinfo_t *volinfo)
{
return glusterd_nodesvcs_batch_op (volinfo,
glusterd_nfs_server_stop,
- glusterd_shd_stop);
+ glusterd_shd_stop,
+ glusterd_quotad_stop);
}
gf_boolean_t
@@ -3983,21 +4340,53 @@ glusterd_all_replicate_volumes_stopped ()
return _gf_true;
}
+gf_boolean_t
+glusterd_all_volumes_with_quota_stopped ()
+{
+ glusterd_conf_t *priv = NULL;
+ xlator_t *this = NULL;
+ glusterd_volinfo_t *voliter = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+ priv = this->private;
+ GF_ASSERT (priv);
+
+ list_for_each_entry (voliter, &priv->volumes, vol_list) {
+ if (!glusterd_is_volume_quota_enabled (voliter))
+ continue;
+ if (voliter->status == GLUSTERD_STATUS_STARTED)
+ return _gf_false;
+ }
+
+ return _gf_true;
+}
+
+
int
glusterd_nodesvcs_handle_graph_change (glusterd_volinfo_t *volinfo)
{
int (*shd_op) () = NULL;
int (*nfs_op) () = NULL;
+ int (*qd_op) () = NULL;
shd_op = glusterd_check_generate_start_shd;
nfs_op = glusterd_check_generate_start_nfs;
+ qd_op = glusterd_check_generate_start_quotad;
if (glusterd_are_all_volumes_stopped ()) {
shd_op = glusterd_shd_stop;
nfs_op = glusterd_nfs_server_stop;
- } else if (glusterd_all_replicate_volumes_stopped()) {
- shd_op = glusterd_shd_stop;
+ qd_op = glusterd_quotad_stop;
+ } else {
+ if (glusterd_all_replicate_volumes_stopped()) {
+ shd_op = glusterd_shd_stop;
+ }
+ if (glusterd_all_volumes_with_quota_stopped ()) {
+ qd_op = glusterd_quotad_stop;
+ }
}
- return glusterd_nodesvcs_batch_op (volinfo, nfs_op, shd_op);
+
+ return glusterd_nodesvcs_batch_op (volinfo, nfs_op, shd_op, qd_op);
}
int
@@ -4005,7 +4394,8 @@ glusterd_nodesvcs_handle_reconfigure (glusterd_volinfo_t *volinfo)
{
return glusterd_nodesvcs_batch_op (volinfo,
glusterd_reconfigure_nfs,
- glusterd_reconfigure_shd);
+ glusterd_reconfigure_shd,
+ glusterd_reconfigure_quotad);
}
int
@@ -4671,7 +5061,7 @@ glusterd_add_brick_to_dict (glusterd_volinfo_t *volinfo,
GLUSTERD_GET_BRICK_PIDFILE (pidfile, volinfo, brickinfo, priv);
- brick_online = glusterd_is_service_running (pidfile, &pid);
+ brick_online = gf_is_service_running (pidfile, &pid);
memset (key, 0, sizeof (key));
snprintf (key, sizeof (key), "%s.pid", base_key);
@@ -6073,6 +6463,82 @@ out:
return ret;
}
+int
+glusterd_quotad_statedump (char *options, int option_cnt, char **op_errstr)
+{
+ int ret = -1;
+ xlator_t *this = NULL;
+ glusterd_conf_t *conf = NULL;
+ char pidfile_path[PATH_MAX] = {0,};
+ char path[PATH_MAX] = {0,};
+ FILE *pidfile = NULL;
+ pid_t pid = -1;
+ char dumpoptions_path[PATH_MAX] = {0,};
+ char *option = NULL;
+ char *tmpptr = NULL;
+ char *dup_options = NULL;
+ char msg[256] = {0,};
+
+ this = THIS;
+ GF_ASSERT (this);
+ conf = this->private;
+ GF_ASSERT (conf);
+
+ dup_options = gf_strdup (options);
+ option = strtok_r (dup_options, " ", &tmpptr);
+ if (strcmp (option, "quotad")) {
+ snprintf (msg, sizeof (msg), "for quotad statedump, options "
+ "should be after the key 'quotad'");
+ *op_errstr = gf_strdup (msg);
+ ret = -1;
+ goto out;
+ }
+
+ GLUSTERD_GET_QUOTAD_DIR (path, conf);
+ GLUSTERD_GET_QUOTAD_PIDFILE (pidfile_path, path);
+
+ pidfile = fopen (pidfile_path, "r");
+ if (!pidfile) {
+ gf_log (this->name, GF_LOG_ERROR, "Unable to open pidfile: %s",
+ pidfile_path);
+ ret = -1;
+ goto out;
+ }
+
+ ret = fscanf (pidfile, "%d", &pid);
+ if (ret <= 0) {
+ gf_log (this->name, GF_LOG_ERROR, "Unable to get pid of quotad "
+ "process");
+ ret = -1;
+ goto out;
+ }
+
+ snprintf (dumpoptions_path, sizeof (dumpoptions_path),
+ DEFAULT_VAR_RUN_DIRECTORY"/glusterdump.%d.options", pid);
+ ret = glusterd_set_dump_options (dumpoptions_path, options, option_cnt);
+ if (ret < 0) {
+ gf_log (this->name, GF_LOG_ERROR, "error while parsing "
+ "statedump options");
+ ret = -1;
+ goto out;
+ }
+
+ gf_log (this->name, GF_LOG_INFO, "Performing statedump on quotad with "
+ "pid %d", pid);
+
+ kill (pid, SIGUSR1);
+
+ sleep (1);
+
+ ret = 0;
+out:
+ if (pidfile)
+ fclose (pidfile);
+ unlink (dumpoptions_path);
+ GF_FREE (dup_options);
+ return ret;
+}
+
/* Checks if the given peer contains all the bricks belonging to the
* given volume. Returns true if it does else returns false
*/
@@ -6215,7 +6681,7 @@ glusterd_volume_defrag_restart (glusterd_volinfo_t *volinfo, char *op_errstr,
GLUSTERD_GET_DEFRAG_PID_FILE(pidfile, volinfo, priv);
- if (!glusterd_is_service_running (pidfile, &pid)) {
+ if (!gf_is_service_running (pidfile, &pid)) {
glusterd_handle_defrag_start (volinfo, op_errstr, len, cmd,
cbk, volinfo->rebal.op);
} else {
@@ -7363,6 +7829,90 @@ _profile_volume_add_brick_rsp (dict_t *this, char *key, data_t *value,
}
int
+glusterd_volume_quota_copy_to_op_ctx_dict (dict_t *dict, dict_t *rsp_dict)
+{
+ int ret = -1;
+ int i = 0;
+ int count = 0;
+ int rsp_dict_count = 0;
+ char *uuid_str = NULL;
+ char *uuid_str_dup = NULL;
+ char key[256] = {0,};
+ xlator_t *this = NULL;
+ int type = GF_QUOTA_OPTION_TYPE_NONE;
+
+ this = THIS;
+ GF_ASSERT (this);
+
+ ret = dict_get_int32 (dict, "type", &type);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to get quota opcode");
+ goto out;
+ }
+
+ if ((type != GF_QUOTA_OPTION_TYPE_LIMIT_USAGE) &&
+ (type != GF_QUOTA_OPTION_TYPE_REMOVE)) {
+ dict_copy (rsp_dict, dict);
+ ret = 0;
+ goto out;
+ }
+
+ ret = dict_get_int32 (rsp_dict, "count", &rsp_dict_count);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to get the count of "
+ "gfids from the rsp dict");
+ goto out;
+ }
+
+ ret = dict_get_int32 (dict, "count", &count);
+ if (ret)
+ /* The key "count" is absent in op_ctx when this function is
+ * called after self-staging on the originator. This must not
+ * be treated as error.
+ */
+ gf_log (this->name, GF_LOG_DEBUG, "Failed to get count of gfids"
+ " from req dict. This could be because count is not yet"
+ " copied from rsp_dict into op_ctx");
+
+ for (i = 0; i < rsp_dict_count; i++) {
+ snprintf (key, sizeof(key)-1, "gfid%d", i);
+
+ ret = dict_get_str (rsp_dict, key, &uuid_str);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to get gfid "
+ "from rsp dict");
+ goto out;
+ }
+
+ snprintf (key, sizeof (key)-1, "gfid%d", i + count);
+
+ uuid_str_dup = gf_strdup (uuid_str);
+ if (!uuid_str_dup) {
+ ret = -1;
+ goto out;
+ }
+
+ ret = dict_set_dynstr (dict, key, uuid_str_dup);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to set gfid "
+ "from rsp dict into req dict");
+ GF_FREE (uuid_str_dup);
+ goto out;
+ }
+ }
+
+ ret = dict_set_int32 (dict, "count", rsp_dict_count + count);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to set aggregated "
+ "count in req dict");
+ goto out;
+ }
+
+out:
+ return ret;
+}
+
+int
glusterd_profile_volume_brick_rsp (void *pending_entry,
dict_t *rsp_dict, dict_t *op_ctx,
char **op_errstr, gd_node_type type)
@@ -8068,3 +8618,232 @@ gd_should_i_start_rebalance (glusterd_volinfo_t *volinfo) {
out:
return retval;
}
+
+int
+glusterd_is_volume_quota_enabled (glusterd_volinfo_t *volinfo)
+{
+ return (glusterd_volinfo_get_boolean (volinfo, VKEY_FEATURES_QUOTA));
+}
+
+int
+glusterd_validate_and_set_gfid (dict_t *op_ctx, dict_t *req_dict,
+ char **op_errstr)
+{
+ int ret = -1;
+ int count = 0;
+ int i = 0;
+ int op_code = GF_QUOTA_OPTION_TYPE_NONE;
+ uuid_t uuid1 = {0};
+ uuid_t uuid2 = {0,};
+ char *path = NULL;
+ char key[256] = {0,};
+ char *uuid1_str = NULL;
+ char *uuid1_str_dup = NULL;
+ char *uuid2_str = NULL;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+
+ ret = dict_get_int32 (op_ctx, "type", &op_code);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to get quota opcode");
+ goto out;
+ }
+
+ if ((op_code != GF_QUOTA_OPTION_TYPE_LIMIT_USAGE) &&
+ (op_code != GF_QUOTA_OPTION_TYPE_REMOVE)) {
+ ret = 0;
+ goto out;
+ }
+
+ ret = dict_get_str (op_ctx, "path", &path);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to get path");
+ goto out;
+ }
+
+ ret = dict_get_int32 (op_ctx, "count", &count);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to get count");
+ goto out;
+ }
+
+ /* If count is 0, fail the command with ENOENT.
+ *
+ * If count is 1, treat gfid0 as the gfid on which the operation
+ * is to be performed and resume the command.
+ *
+ * if count > 1, get the 0th gfid from the op_ctx and,
+ * compare it with the remaining 'count -1' gfids.
+ * If they are found to be the same, set gfid0 in the op_ctx and
+ * resume the operation, else error out.
+ */
+
+ if (count == 0) {
+ gf_asprintf (op_errstr, "Failed to get trusted.gfid attribute "
+ "on path %s. Reason : %s", path,
+ strerror (ENOENT));
+ ret = -1;
+ goto out;
+ }
+
+ snprintf (key, sizeof (key) - 1, "gfid%d", 0);
+
+ ret = dict_get_str (op_ctx, key, &uuid1_str);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to get key '%s'",
+ key);
+ goto out;
+ }
+
+ uuid_parse (uuid1_str, uuid1);
+
+ for (i = 1; i < count; i++) {
+ snprintf (key, sizeof (key)-1, "gfid%d", i);
+
+ ret = dict_get_str (op_ctx, key, &uuid2_str);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to get key "
+ "'%s'", key);
+ goto out;
+ }
+
+ uuid_parse (uuid2_str, uuid2);
+
+ if (uuid_compare (uuid1, uuid2)) {
+ gf_asprintf (op_errstr, "gfid mismatch between %s and "
+ "%s for path %s", uuid1_str, uuid2_str,
+ path);
+ ret = -1;
+ goto out;
+ }
+ }
+
+ if (i == count) {
+ uuid1_str_dup = gf_strdup (uuid1_str);
+ if (!uuid1_str_dup) {
+ ret = -1;
+ goto out;
+ }
+
+ ret = dict_set_dynstr (req_dict, "gfid", uuid1_str_dup);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to set gfid");
+ GF_FREE (uuid1_str_dup);
+ goto out;
+ }
+ } else {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to iterate through %d"
+ " entries in the req dict", count);
+ ret = -1;
+ goto out;
+ }
+
+ ret = 0;
+out:
+ return ret;
+}
+
+void
+glusterd_clean_up_quota_store (glusterd_volinfo_t *volinfo)
+{
+ char voldir[PATH_MAX] = {0,};
+ char quota_confpath[PATH_MAX] = {0,};
+ char cksum_path[PATH_MAX] = {0,};
+ xlator_t *this = NULL;
+ glusterd_conf_t *conf = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+ conf = this->private;
+ GF_ASSERT (conf);
+
+ GLUSTERD_GET_VOLUME_DIR (voldir, volinfo, conf);
+
+ snprintf (quota_confpath, sizeof (quota_confpath), "%s/%s", voldir,
+ GLUSTERD_VOLUME_QUOTA_CONFIG);
+ snprintf (cksum_path, sizeof (cksum_path), "%s/%s", voldir,
+ GLUSTERD_VOL_QUOTA_CKSUM_FILE);
+
+ unlink (quota_confpath);
+ unlink (cksum_path);
+
+ gf_store_handle_destroy (volinfo->quota_conf_shandle);
+ volinfo->quota_conf_shandle = NULL;
+ volinfo->quota_conf_version = 0;
+
+}
+
+#define QUOTA_CONF_HEADER \
+ "GlusterFS Quota conf | version: v%d.%d\n"
+
+int
+glusterd_store_quota_conf_skip_header (xlator_t *this, int fd)
+{
+ char buf[PATH_MAX] = {0,};
+
+ snprintf (buf, sizeof(buf)-1, QUOTA_CONF_HEADER, 1, 1);
+ return gf_skip_header_section (fd, strlen (buf));
+}
+
+int
+glusterd_store_quota_conf_stamp_header (xlator_t *this, int fd)
+{
+ char buf[PATH_MAX] = {0,};
+ int buf_len = 0;
+ ssize_t ret = -1;
+ ssize_t written = 0;
+
+ snprintf (buf, sizeof(buf)-1, QUOTA_CONF_HEADER, 1, 1);
+ buf_len = strlen (buf);
+ for (written = 0; written != buf_len; written += ret) {
+ ret = write (fd, buf + written, buf_len - written);
+ if (ret == -1) {
+ goto out;
+ }
+ }
+
+ ret = 0;
+out:
+ return ret;
+}
+
+int
+glusterd_remove_auxiliary_mount (char *volname)
+{
+ int ret = -1;
+ runner_t runner = {0,};
+ char mountdir[PATH_MAX] = {0,};
+ char pidfile[PATH_MAX] = {0,};
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+
+ GLUSTERFS_GET_AUX_MOUNT_PIDFILE (pidfile, volname);
+
+ if (!gf_is_service_running (pidfile, NULL)) {
+ gf_log (this->name, GF_LOG_DEBUG, "Aux mount of volume %s "
+ "absent, hence returning", volname);
+ return 0;
+ }
+
+ snprintf (mountdir, sizeof (mountdir)-1, "/tmp/%s", volname);
+
+ runinit (&runner);
+ runner_add_args (&runner, "umount",
+
+#if GF_LINUX_HOST_OS
+ "-l",
+#endif
+ mountdir, NULL);
+ ret = runner_run_reuse (&runner);
+ if (ret)
+ gf_log (this->name, GF_LOG_ERROR, "umount on %s failed, "
+ "reason : %s", mountdir, strerror (errno));
+ runner_end (&runner);
+
+ rmdir (mountdir);
+ return ret;
+}
diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.h b/xlators/mgmt/glusterd/src/glusterd-utils.h
index 5b0cfca7b55..9907a03d490 100644
--- a/xlators/mgmt/glusterd/src/glusterd-utils.h
+++ b/xlators/mgmt/glusterd/src/glusterd-utils.h
@@ -158,16 +158,14 @@ int32_t
glusterd_compare_friend_data (dict_t *vols, int32_t *status, char *hostname);
int
-glusterd_volume_compute_cksum (glusterd_volinfo_t *volinfo);
+glusterd_compute_cksum (glusterd_volinfo_t *volinfo,
+ gf_boolean_t is_quota_conf);
void
glusterd_get_nodesvc_volfile (char *server, char *workdir,
char *volfile, size_t len);
gf_boolean_t
-glusterd_is_service_running (char *pidfile, int *pid);
-
-gf_boolean_t
glusterd_is_nodesvc_running ();
gf_boolean_t
@@ -188,6 +186,12 @@ glusterd_shd_start ();
int32_t
glusterd_shd_stop ();
+int32_t
+glusterd_quotad_start ();
+
+int32_t
+glusterd_quotad_stop ();
+
void
glusterd_set_socket_filepath (char *sock_filepath, char *sockpath, size_t len);
@@ -228,6 +232,9 @@ int
glusterd_check_generate_start_shd (void);
int
+glusterd_check_generate_start_quotad (void);
+
+int
glusterd_nodesvcs_handle_graph_change (glusterd_volinfo_t *volinfo);
int
@@ -401,8 +408,13 @@ glusterd_brick_statedump (glusterd_volinfo_t *volinfo,
char *options, int option_cnt, char **op_errstr);
int
glusterd_nfs_statedump (char *options, int option_cnt, char **op_errstr);
+
+int
+glusterd_quotad_statedump (char *options, int option_cnt, char **op_errstr);
+
gf_boolean_t
glusterd_is_volume_replicate (glusterd_volinfo_t *volinfo);
+
gf_boolean_t
glusterd_is_brick_decommissioned (glusterd_volinfo_t *volinfo, char *hostname,
char *path);
@@ -493,6 +505,11 @@ int
glusterd_volume_rebalance_use_rsp_dict (dict_t *aggr, dict_t *rsp_dict);
int
glusterd_volume_heal_use_rsp_dict (dict_t *aggr, dict_t *rsp_dict);
+
+int32_t
+glusterd_check_if_quota_trans_enabled (glusterd_volinfo_t *volinfo);
+int
+glusterd_volume_quota_copy_to_op_ctx_dict (dict_t *aggr, dict_t *rsp);
int
_profile_volume_add_brick_rsp (dict_t *this, char *key, data_t *value,
void *data);
@@ -537,6 +554,10 @@ int
glusterd_generate_and_set_task_id (dict_t *dict, char *key);
int
+glusterd_validate_and_set_gfid (dict_t *op_ctx, dict_t *req_dict,
+ char **op_errstr);
+
+int
glusterd_copy_uuid_to_dict (uuid_t uuid, dict_t *dict, char *key);
gf_boolean_t
@@ -576,4 +597,26 @@ glusterd_is_status_tasks_op (glusterd_op_t op, dict_t *dict);
gf_boolean_t
gd_should_i_start_rebalance (glusterd_volinfo_t *volinfo);
+
+int
+glusterd_is_volume_quota_enabled (glusterd_volinfo_t *volinfo);
+
+gf_boolean_t
+glusterd_all_volumes_with_quota_stopped ();
+
+int
+glusterd_reconfigure_quotad ();
+
+void
+glusterd_clean_up_quota_store (glusterd_volinfo_t *volinfo);
+
+int
+glusterd_store_quota_conf_skip_header (xlator_t *this, int fd);
+
+int
+glusterd_store_quota_conf_stamp_header (xlator_t *this, int fd);
+
+int
+glusterd_remove_auxiliary_mount (char *volname);
+
#endif
diff --git a/xlators/mgmt/glusterd/src/glusterd-volgen.c b/xlators/mgmt/glusterd/src/glusterd-volgen.c
index da8ace953cf..dcff8c30517 100644
--- a/xlators/mgmt/glusterd/src/glusterd-volgen.c
+++ b/xlators/mgmt/glusterd/src/glusterd-volgen.c
@@ -1616,7 +1616,21 @@ server_graph_builder (volgen_graph_t *graph, glusterd_volinfo_t *volinfo,
if (ret)
return -1;
- if (dict_get_str_boolean (set_dict, "features.read-only", 0) &&
+ xl = volgen_graph_add (graph, "features/quota", volname);
+ if (!xl)
+ return -1;
+ ret = xlator_set_option (xl, "volume-uuid", volname);
+ if (ret)
+ return -1;
+
+ ret = glusterd_volinfo_get (volinfo, VKEY_FEATURES_QUOTA, &value);
+ if (value) {
+ ret = xlator_set_option (xl, "server-quota", value);
+ if (ret)
+ return -1;
+ }
+
+ if (dict_get_str_boolean (set_dict, "features.read-only", 0) &&
dict_get_str_boolean (set_dict, "features.worm",0)) {
gf_log (THIS->name, GF_LOG_ERROR,
"read-only and worm cannot be set together");
@@ -2331,13 +2345,15 @@ out:
static int
volgen_graph_build_dht_cluster (volgen_graph_t *graph,
- glusterd_volinfo_t *volinfo, size_t child_count)
+ glusterd_volinfo_t *volinfo, size_t child_count,
+ gf_boolean_t is_quotad)
{
int32_t clusters = 0;
int ret = -1;
char *decommissioned_children = NULL;
xlator_t *dht = NULL;
char *voltype = "cluster/distribute";
+ char *name_fmt = NULL;
/* NUFA and Switch section */
if (dict_get_str_boolean (volinfo->dict, "cluster.nufa", 0) &&
@@ -2356,9 +2372,14 @@ volgen_graph_build_dht_cluster (volgen_graph_t *graph,
if (dict_get_str_boolean (volinfo->dict, "cluster.switch", 0))
voltype = "cluster/switch";
+ if (is_quotad)
+ name_fmt = "%s";
+ else
+ name_fmt = "%s-dht";
+
clusters = volgen_graph_build_clusters (graph, volinfo,
voltype,
- "%s-dht",
+ name_fmt,
child_count,
child_count);
if (clusters < 0)
@@ -2383,7 +2404,8 @@ out:
static int
volume_volgen_graph_build_clusters (volgen_graph_t *graph,
- glusterd_volinfo_t *volinfo)
+ glusterd_volinfo_t *volinfo,
+ gf_boolean_t is_quotad)
{
char *replicate_args[] = {"cluster/replicate",
"%s-replicate-%d"};
@@ -2456,8 +2478,8 @@ build_distribute:
}
ret = volgen_graph_build_dht_cluster (graph, volinfo,
- dist_count);
- if (ret == -1)
+ dist_count, is_quotad);
+ if (ret)
goto out;
ret = 0;
@@ -2492,16 +2514,19 @@ static int
client_graph_builder (volgen_graph_t *graph, glusterd_volinfo_t *volinfo,
dict_t *set_dict, void *param)
{
- int ret = 0;
- xlator_t *xl = NULL;
- char *volname = NULL;
+ int ret = 0;
+ xlator_t *xl = NULL;
+ char *volname = NULL;
+ glusterd_conf_t *conf = THIS->private;
+
+ GF_ASSERT (conf);
volname = volinfo->volname;
ret = volgen_graph_build_clients (graph, volinfo, set_dict, param);
if (ret)
goto out;
- ret = volume_volgen_graph_build_clusters (graph, volinfo);
+ ret = volume_volgen_graph_build_clusters (graph, volinfo, _gf_false);
if (ret == -1)
goto out;
@@ -2530,15 +2555,18 @@ client_graph_builder (volgen_graph_t *graph, glusterd_volinfo_t *volinfo,
}
}
- ret = glusterd_volinfo_get_boolean (volinfo, VKEY_FEATURES_QUOTA);
- if (ret == -1)
- goto out;
- if (ret) {
- xl = volgen_graph_add (graph, "features/quota", volname);
-
- if (!xl) {
- ret = -1;
+ if (conf->op_version == GD_OP_VERSION_MIN) {
+ ret = glusterd_volinfo_get_boolean (volinfo,
+ VKEY_FEATURES_QUOTA);
+ if (ret == -1)
goto out;
+ if (ret) {
+ xl = volgen_graph_add (graph, "features/quota",
+ volname);
+ if (!xl) {
+ ret = -1;
+ goto out;
+ }
}
}
@@ -3096,9 +3124,6 @@ build_nfs_graph (volgen_graph_t *graph, dict_t *mod_dict)
return ret;
}
-
-
-
/****************************
*
* Volume generation interface
@@ -3185,7 +3210,100 @@ glusterd_generate_brick_volfile (glusterd_volinfo_t *volinfo,
return ret;
}
+static int
+build_quotad_graph (volgen_graph_t *graph, dict_t *mod_dict)
+{
+ volgen_graph_t cgraph = {0};
+ glusterd_volinfo_t *voliter = NULL;
+ xlator_t *this = NULL;
+ glusterd_conf_t *priv = NULL;
+ dict_t *set_dict = NULL;
+ int ret = 0;
+ xlator_t *quotad_xl = NULL;
+ char *skey = NULL;
+
+ this = THIS;
+ priv = this->private;
+
+ set_dict = dict_new ();
+ if (!set_dict) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ quotad_xl = volgen_graph_add_as (graph, "features/quotad", "quotad");
+ if (!quotad_xl) {
+ ret = -1;
+ goto out;
+ }
+
+ list_for_each_entry (voliter, &priv->volumes, vol_list) {
+ if (voliter->status != GLUSTERD_STATUS_STARTED)
+ continue;
+
+ if (1 != glusterd_is_volume_quota_enabled (voliter))
+ continue;
+
+ ret = dict_set_uint32 (set_dict, "trusted-client",
+ GF_CLIENT_TRUSTED);
+ if (ret)
+ goto out;
+
+ dict_copy (voliter->dict, set_dict);
+ if (mod_dict)
+ dict_copy (mod_dict, set_dict);
+
+ ret = gf_asprintf(&skey, "%s.volume-id", voliter->volname);
+ if (ret == -1) {
+ gf_log("", GF_LOG_ERROR, "Out of memory");
+ goto out;
+ }
+ ret = xlator_set_option(quotad_xl, skey, voliter->volname);
+ GF_FREE(skey);
+ if (ret)
+ goto out;
+
+ memset (&cgraph, 0, sizeof (cgraph));
+ ret = volgen_graph_build_clients (&cgraph, voliter, set_dict,
+ NULL);
+ if (ret)
+ goto out;
+
+ ret = volume_volgen_graph_build_clusters (&cgraph, voliter,
+ _gf_true);
+ if (ret) {
+ ret = -1;
+ goto out;
+ }
+
+ if (mod_dict) {
+ dict_copy (mod_dict, set_dict);
+ ret = volgen_graph_set_options_generic (&cgraph, set_dict,
+ voliter,
+ basic_option_handler);
+ } else {
+ ret = volgen_graph_set_options_generic (&cgraph,
+ voliter->dict,
+ voliter,
+ basic_option_handler);
+ }
+ if (ret)
+ goto out;
+
+ ret = volgen_graph_merge_sub (graph, &cgraph, 1);
+ if (ret)
+ goto out;
+
+ ret = dict_reset (set_dict);
+ if (ret)
+ goto out;
+ }
+out:
+ if (set_dict)
+ dict_unref (set_dict);
+ return ret;
+}
static void
get_vol_tstamp_file (char *filename, glusterd_volinfo_t *volinfo)
@@ -3453,105 +3571,117 @@ out:
}
int
-glusterd_check_nfs_topology_identical (gf_boolean_t *identical)
+glusterd_check_nfs_volfile_identical (gf_boolean_t *identical)
{
char nfsvol[PATH_MAX] = {0,};
char tmpnfsvol[PATH_MAX] = {0,};
glusterd_conf_t *conf = NULL;
- xlator_t *this = THIS;
+ xlator_t *this = NULL;
int ret = -1;
- int tmpclean = 0;
- int tmpfd = -1;
+ int need_unlink = 0;
+ int tmp_fd = -1;
- if ((!identical) || (!this) || (!this->private))
- goto out;
+ this = THIS;
- conf = (glusterd_conf_t *) this->private;
+ GF_ASSERT (this);
+ GF_ASSERT (identical);
+ conf = this->private;
- /* Fetch the original NFS volfile */
glusterd_get_nodesvc_volfile ("nfs", conf->workdir,
nfsvol, sizeof (nfsvol));
- /* Create the temporary NFS volfile */
snprintf (tmpnfsvol, sizeof (tmpnfsvol), "/tmp/gnfs-XXXXXX");
- tmpfd = mkstemp (tmpnfsvol);
- if (tmpfd < 0) {
- gf_log (this->name, GF_LOG_WARNING,
- "Unable to create temp file %s: (%s)",
- tmpnfsvol, strerror (errno));
+
+ tmp_fd = mkstemp (tmpnfsvol);
+ if (tmp_fd < 0) {
+ gf_log ("", GF_LOG_WARNING, "Unable to create temp file %s: "
+ "(%s)", tmpnfsvol, strerror (errno));
goto out;
}
- tmpclean = 1; /* SET the flag to unlink() tmpfile */
+ need_unlink = 1;
ret = glusterd_create_global_volfile (build_nfs_graph,
tmpnfsvol, NULL);
if (ret)
goto out;
- /* Compare the topology of volfiles */
- ret = glusterd_check_topology_identical (nfsvol, tmpnfsvol,
- identical);
+ ret = glusterd_check_files_identical (nfsvol, tmpnfsvol,
+ identical);
+ if (ret)
+ goto out;
+
out:
- if (tmpfd >= 0)
- close (tmpfd);
- if (tmpclean)
+ if (need_unlink)
unlink (tmpnfsvol);
+
+ if (tmp_fd >= 0)
+ close (tmp_fd);
+
return ret;
}
int
-glusterd_check_nfs_volfile_identical (gf_boolean_t *identical)
+glusterd_check_nfs_topology_identical (gf_boolean_t *identical)
{
char nfsvol[PATH_MAX] = {0,};
char tmpnfsvol[PATH_MAX] = {0,};
glusterd_conf_t *conf = NULL;
- xlator_t *this = NULL;
+ xlator_t *this = THIS;
int ret = -1;
- int need_unlink = 0;
- int tmp_fd = -1;
-
- this = THIS;
+ int tmpclean = 0;
+ int tmpfd = -1;
- GF_ASSERT (this);
- GF_ASSERT (identical);
+ if ((!identical) || (!this) || (!this->private))
+ goto out;
- conf = this->private;
+ conf = (glusterd_conf_t *) this->private;
+ /* Fetch the original NFS volfile */
glusterd_get_nodesvc_volfile ("nfs", conf->workdir,
nfsvol, sizeof (nfsvol));
+ /* Create the temporary NFS volfile */
snprintf (tmpnfsvol, sizeof (tmpnfsvol), "/tmp/gnfs-XXXXXX");
-
- tmp_fd = mkstemp (tmpnfsvol);
- if (tmp_fd < 0) {
- gf_log ("", GF_LOG_WARNING, "Unable to create temp file %s: "
- "(%s)", tmpnfsvol, strerror (errno));
+ tmpfd = mkstemp (tmpnfsvol);
+ if (tmpfd < 0) {
+ gf_log (this->name, GF_LOG_WARNING,
+ "Unable to create temp file %s: (%s)",
+ tmpnfsvol, strerror (errno));
goto out;
}
- need_unlink = 1;
+ tmpclean = 1; /* SET the flag to unlink() tmpfile */
ret = glusterd_create_global_volfile (build_nfs_graph,
tmpnfsvol, NULL);
if (ret)
goto out;
- ret = glusterd_check_files_identical (nfsvol, tmpnfsvol,
- identical);
- if (ret)
- goto out;
-
+ /* Compare the topology of volfiles */
+ ret = glusterd_check_topology_identical (nfsvol, tmpnfsvol,
+ identical);
out:
- if (need_unlink)
+ if (tmpfd >= 0)
+ close (tmpfd);
+ if (tmpclean)
unlink (tmpnfsvol);
+ return ret;
+}
- if (tmp_fd >= 0)
- close (tmp_fd);
+int
+glusterd_create_quotad_volfile ()
+{
+ char filepath[PATH_MAX] = {0,};
+ glusterd_conf_t *conf = THIS->private;
- return ret;
+ glusterd_get_nodesvc_volfile ("quotad", conf->workdir,
+ filepath, sizeof (filepath));
+ return glusterd_create_global_volfile (build_quotad_graph,
+ filepath, NULL);
}
+
int
glusterd_delete_volfile (glusterd_volinfo_t *volinfo,
glusterd_brickinfo_t *brickinfo)
diff --git a/xlators/mgmt/glusterd/src/glusterd-volgen.h b/xlators/mgmt/glusterd/src/glusterd-volgen.h
index 2d31c4040a4..1683f905022 100644
--- a/xlators/mgmt/glusterd/src/glusterd-volgen.h
+++ b/xlators/mgmt/glusterd/src/glusterd-volgen.h
@@ -22,6 +22,7 @@
#define VKEY_DIAG_CNT_FOP_HITS "diagnostics.count-fop-hits"
#define VKEY_DIAG_LAT_MEASUREMENT "diagnostics.latency-measurement"
#define VKEY_FEATURES_LIMIT_USAGE "features.limit-usage"
+#define VKEY_FEATURES_SOFT_LIMIT "features.soft-limit"
#define VKEY_MARKER_XTIME GEOREP".indexing"
#define VKEY_MARKER_XTIME_FORCE GEOREP".ignore-pid-check"
#define VKEY_CHANGELOG "changelog.changelog"
@@ -121,6 +122,7 @@ void glusterd_get_shd_filepath (char *filename);
int glusterd_create_nfs_volfile ();
int glusterd_create_shd_volfile ();
+int glusterd_create_quotad_volfile ();
int glusterd_delete_volfile (glusterd_volinfo_t *volinfo,
glusterd_brickinfo_t *brickinfo);
diff --git a/xlators/mgmt/glusterd/src/glusterd-volume-ops.c b/xlators/mgmt/glusterd/src/glusterd-volume-ops.c
index ad33227019c..41555230e92 100644
--- a/xlators/mgmt/glusterd/src/glusterd-volume-ops.c
+++ b/xlators/mgmt/glusterd/src/glusterd-volume-ops.c
@@ -527,9 +527,12 @@ __glusterd_handle_cli_statedump_volume (rpcsvc_request_t *req)
glusterd_op_t cli_op = GD_OP_STATEDUMP_VOLUME;
char err_str[2048] = {0,};
xlator_t *this = NULL;
+ glusterd_conf_t *priv = NULL;
this = THIS;
GF_ASSERT (this);
+ priv = this->private;
+ GF_ASSERT (priv);
GF_ASSERT (req);
@@ -578,6 +581,14 @@ __glusterd_handle_cli_statedump_volume (rpcsvc_request_t *req)
goto out;
}
+ if (priv->op_version == GD_OP_VERSION_MIN &&
+ strstr (options, "quotad")) {
+ snprintf (err_str, sizeof (err_str), "The cluster is operating "
+ "at op-version 1. Taking quotad's statedump is "
+ "disallowed in this state");
+ ret = -1;
+ goto out;
+ }
gf_log (this->name, GF_LOG_INFO, "Received statedump request for "
"volume %s with options %s", volname, options);
@@ -1314,6 +1325,13 @@ glusterd_op_stage_statedump_volume (dict_t *dict, char **op_errstr)
gf_boolean_t is_running = _gf_false;
glusterd_volinfo_t *volinfo = NULL;
char msg[2408] = {0,};
+ xlator_t *this = NULL;
+ glusterd_conf_t *priv = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+ priv = this->private;
+ GF_ASSERT (priv);
ret = glusterd_op_statedump_volume_args_get (dict, &volname, &options,
&option_cnt);
@@ -1322,10 +1340,7 @@ glusterd_op_stage_statedump_volume (dict_t *dict, char **op_errstr)
ret = glusterd_volinfo_find (volname, &volinfo);
if (ret) {
- snprintf (msg, sizeof(msg), "Volume %s does not exist",
- volname);
- gf_log ("", GF_LOG_ERROR, "%s", msg);
- *op_errstr = gf_strdup (msg);
+ snprintf (msg, sizeof(msg), FMTSTR_CHECK_VOL_EXISTS, volname);
goto out;
}
@@ -1335,16 +1350,31 @@ glusterd_op_stage_statedump_volume (dict_t *dict, char **op_errstr)
is_running = glusterd_is_volume_started (volinfo);
if (!is_running) {
- snprintf (msg, sizeof(msg), "Volume %s is not in a started"
+ snprintf (msg, sizeof(msg), "Volume %s is not in the started"
" state", volname);
- gf_log ("", GF_LOG_ERROR, "%s", msg);
- *op_errstr = gf_strdup (msg);
ret = -1;
goto out;
}
+ if (priv->op_version == GD_OP_VERSION_MIN &&
+ strstr (options, "quotad")) {
+ snprintf (msg, sizeof (msg), "The cluster is operating "
+ "at op-version 1. Taking quotad's statedump is "
+ "disallowed in this state");
+ ret = -1;
+ goto out;
+ }
+ if ((strstr (options, "quotad")) &&
+ (!glusterd_is_volume_quota_enabled (volinfo))) {
+ snprintf (msg, sizeof (msg), "Quota is not enabled on "
+ "volume %s", volname);
+ ret = -1;
+ goto out;
+ }
out:
- gf_log ("", GF_LOG_DEBUG, "Returning %d", ret);
+ if (ret && msg[0] != '\0')
+ *op_errstr = gf_strdup (msg);
+ gf_log (this->name, GF_LOG_DEBUG, "Returning %d", ret);
return ret;
}
@@ -1785,6 +1815,10 @@ glusterd_op_delete_volume (dict_t *dict)
goto out;
}
+ ret = glusterd_remove_auxiliary_mount (volname);
+ if (ret)
+ goto out;
+
ret = glusterd_delete_volume (volinfo);
out:
gf_log (this->name, GF_LOG_DEBUG, "returning %d", ret);
@@ -1823,6 +1857,12 @@ glusterd_op_statedump_volume (dict_t *dict, char **op_errstr)
ret = glusterd_nfs_statedump (options, option_cnt, op_errstr);
if (ret)
goto out;
+
+ } else if (strstr (options, "quotad")) {
+ ret = glusterd_quotad_statedump (options, option_cnt,
+ op_errstr);
+ if (ret)
+ goto out;
} else {
list_for_each_entry (brickinfo, &volinfo->bricks,
brick_list) {
diff --git a/xlators/mgmt/glusterd/src/glusterd-volume-set.c b/xlators/mgmt/glusterd/src/glusterd-volume-set.c
index 665a8b29859..131f96ce667 100644
--- a/xlators/mgmt/glusterd/src/glusterd-volume-set.c
+++ b/xlators/mgmt/glusterd/src/glusterd-volume-set.c
@@ -1043,29 +1043,51 @@ struct volopt_map_entry glusterd_volopt_map[] = {
#endif
/* Quota xlator options */
- { .key = VKEY_FEATURES_LIMIT_USAGE,
- .voltype = "features/quota",
- .option = "limit-set",
- .type = NO_DOC,
- .op_version = 1,
- .flags = OPT_FLAG_CLIENT_OPT
- },
- { .key = "features.quota-timeout",
- .voltype = "features/quota",
- .option = "timeout",
- .value = "0",
- .op_version = 1,
- .validate_fn = validate_quota,
- .flags = OPT_FLAG_CLIENT_OPT
- },
- { .key = "features.quota-deem-statfs",
- .voltype = "features/quota",
- .option = "deem-statfs",
- .value = "off",
- .type = DOC,
- .op_version = 3,
- .validate_fn = validate_quota,
- .flags = OPT_FLAG_CLIENT_OPT
+ { .key = VKEY_FEATURES_LIMIT_USAGE,
+ .voltype = "features/quota",
+ .option = "limit-set",
+ .type = NO_DOC,
+ .op_version = 1,
+ },
+ {
+ .key = "features.quota-timeout",
+ .voltype = "features/quota",
+ .option = "timeout",
+ .value = "0",
+ .op_version = 1,
+ .validate_fn = validate_quota,
+ },
+ { .key = "features.default-soft-limit",
+ .voltype = "features/quota",
+ .option = "default-soft-limit",
+ .type = NO_DOC,
+ .op_version = 3,
+ },
+ { .key = "features.soft-timeout",
+ .voltype = "features/quota",
+ .option = "soft-timeout",
+ .type = NO_DOC,
+ .op_version = 3,
+ },
+ { .key = "features.hard-timeout",
+ .voltype = "features/quota",
+ .option = "hard-timeout",
+ .type = NO_DOC,
+ .op_version = 3,
+ },
+ { .key = "features.alert-time",
+ .voltype = "features/quota",
+ .option = "alert-time",
+ .type = NO_DOC,
+ .op_version = 3,
+ },
+ { .key = "features.quota-deem-statfs",
+ .voltype = "features/quota",
+ .option = "deem-statfs",
+ .value = "off",
+ .type = DOC,
+ .op_version = 2,
+ .validate_fn = validate_quota,
},
/* Marker xlator options */
diff --git a/xlators/mgmt/glusterd/src/glusterd.c b/xlators/mgmt/glusterd/src/glusterd.c
index e4c4caee45c..c2be2c9da59 100644
--- a/xlators/mgmt/glusterd/src/glusterd.c
+++ b/xlators/mgmt/glusterd/src/glusterd.c
@@ -1189,6 +1189,15 @@ init (xlator_t *this)
exit (1);
}
+ snprintf (storedir, PATH_MAX, "%s/quotad", workdir);
+ ret = mkdir (storedir, 0777);
+ if ((-1 == ret) && (errno != EEXIST)) {
+ gf_log (this->name, GF_LOG_CRITICAL,
+ "Unable to create quotad directory %s"
+ " ,errno = %d", storedir, errno);
+ exit (1);
+ }
+
snprintf (storedir, PATH_MAX, "%s/groups", workdir);
ret = mkdir (storedir, 0777);
if ((-1 == ret) && (errno != EEXIST)) {
@@ -1253,12 +1262,14 @@ init (xlator_t *this)
conf = GF_CALLOC (1, sizeof (glusterd_conf_t),
gf_gld_mt_glusterd_conf_t);
GF_VALIDATE_OR_GOTO(this->name, conf, out);
- conf->shd = GF_CALLOC (1, sizeof (nodesrv_t),
- gf_gld_mt_nodesrv_t);
+
+ conf->shd = GF_CALLOC (1, sizeof (nodesrv_t), gf_gld_mt_nodesrv_t);
GF_VALIDATE_OR_GOTO(this->name, conf->shd, out);
- conf->nfs = GF_CALLOC (1, sizeof (nodesrv_t),
- gf_gld_mt_nodesrv_t);
+ conf->nfs = GF_CALLOC (1, sizeof (nodesrv_t), gf_gld_mt_nodesrv_t);
GF_VALIDATE_OR_GOTO(this->name, conf->nfs, out);
+ conf->quotad = GF_CALLOC (1, sizeof (nodesrv_t),
+ gf_gld_mt_nodesrv_t);
+ GF_VALIDATE_OR_GOTO(this->name, conf->quotad, out);
INIT_LIST_HEAD (&conf->peers);
INIT_LIST_HEAD (&conf->volumes);
diff --git a/xlators/mgmt/glusterd/src/glusterd.h b/xlators/mgmt/glusterd/src/glusterd.h
index b081ec32eb4..52f8f26b4b6 100644
--- a/xlators/mgmt/glusterd/src/glusterd.h
+++ b/xlators/mgmt/glusterd/src/glusterd.h
@@ -122,39 +122,41 @@ typedef struct {
} gd_global_opts_t;
typedef struct {
- struct _volfile_ctx *volfile;
- pthread_mutex_t mutex;
- struct list_head peers;
- struct list_head xaction_peers;
- gf_boolean_t verify_volfile_checksum;
- gf_boolean_t trace;
- uuid_t uuid;
- char workdir[PATH_MAX];
- rpcsvc_t *rpc;
- nodesrv_t *shd;
- nodesrv_t *nfs;
- struct pmap_registry *pmap;
- struct list_head volumes;
- pthread_mutex_t xprt_lock;
- struct list_head xprt_list;
- gf_store_handle_t *handle;
- gf_timer_t *timer;
- glusterd_sm_tr_log_t op_sm_log;
- struct rpc_clnt_program *gfs_mgmt;
-
- struct list_head mount_specs;
- gf_boolean_t valgrind;
- pthread_t brick_thread;
- void *hooks_priv;
+ struct _volfile_ctx *volfile;
+ pthread_mutex_t mutex;
+ struct list_head peers;
+ struct list_head xaction_peers;
+ gf_boolean_t verify_volfile_checksum;
+ gf_boolean_t trace;
+ uuid_t uuid;
+ char workdir[PATH_MAX];
+ rpcsvc_t *rpc;
+ nodesrv_t *shd;
+ nodesrv_t *nfs;
+ nodesrv_t *quotad;
+ struct pmap_registry *pmap;
+ struct list_head volumes;
+ pthread_mutex_t xprt_lock;
+ struct list_head xprt_list;
+ gf_store_handle_t *handle;
+ gf_timer_t *timer;
+ glusterd_sm_tr_log_t op_sm_log;
+ struct rpc_clnt_program *gfs_mgmt;
+
+ struct list_head mount_specs;
+ gf_boolean_t valgrind;
+ pthread_t brick_thread;
+ void *hooks_priv;
+
/* need for proper handshake_t */
- int op_version; /* Starts with 1 for 3.3.0 */
- xlator_t *xl; /* Should be set to 'THIS' before creating thread */
- gf_boolean_t pending_quorum_action;
- dict_t *opts;
- synclock_t big_lock;
- gf_boolean_t restart_done;
- rpcsvc_t *uds_rpc; /* RPCSVC for the unix domain socket */
- uint32_t base_port;
+ int op_version; /* Starts with 1 for 3.3.0 */
+ xlator_t *xl; /* Should be set to 'THIS' before creating thread */
+ gf_boolean_t pending_quorum_action;
+ dict_t *opts;
+ synclock_t big_lock;
+ gf_boolean_t restart_done;
+ rpcsvc_t *uds_rpc; /* RPCSVC for the unix domain socket */
+ uint32_t base_port;
} glusterd_conf_t;
@@ -281,10 +283,11 @@ struct glusterd_volinfo_ {
distribute volume */
int dist_leaf_count; /* Number of bricks in one
distribute subvolume */
- int port;
- gf_store_handle_t *shandle;
- gf_store_handle_t *rb_shandle;
- gf_store_handle_t *node_state_shandle;
+ int port;
+ gf_store_handle_t *shandle;
+ gf_store_handle_t *rb_shandle;
+ gf_store_handle_t *node_state_shandle;
+ gf_store_handle_t *quota_conf_shandle;
/* Defrag/rebalance related */
glusterd_rebalance_t rebal;
@@ -292,10 +295,12 @@ struct glusterd_volinfo_ {
/* Replace brick status */
glusterd_replace_brick_t rep_brick;
- int version;
- uint32_t cksum;
- gf_transport_type transport_type;
- gf_transport_type nfs_transport_type;
+ int version;
+ uint32_t quota_conf_version;
+ uint32_t cksum;
+ uint32_t quota_conf_cksum;
+ gf_transport_type transport_type;
+ gf_transport_type nfs_transport_type;
dict_t *dict;
@@ -321,6 +326,7 @@ typedef enum gd_node_type_ {
GD_NODE_SHD,
GD_NODE_REBALANCE,
GD_NODE_NFS,
+ GD_NODE_QUOTAD,
} gd_node_type;
typedef struct glusterd_pending_node_ {
@@ -351,12 +357,14 @@ enum glusterd_vol_comp_status_ {
#define GLUSTERD_DEFAULT_WORKDIR "/var/lib/glusterd"
#define GLUSTERD_DEFAULT_PORT GF_DEFAULT_BASE_PORT
#define GLUSTERD_INFO_FILE "glusterd.info"
+#define GLUSTERD_VOLUME_QUOTA_CONFIG "quota.conf"
#define GLUSTERD_VOLUME_DIR_PREFIX "vols"
#define GLUSTERD_PEER_DIR_PREFIX "peers"
#define GLUSTERD_VOLUME_INFO_FILE "info"
#define GLUSTERD_VOLUME_RBSTATE_FILE "rbstate"
#define GLUSTERD_BRICK_INFO_DIR "bricks"
#define GLUSTERD_CKSUM_FILE "cksum"
+#define GLUSTERD_VOL_QUOTA_CKSUM_FILE "quota.cksum"
#define GLUSTERD_TRASH "trash"
#define GLUSTERD_NODE_STATE_FILE "node_state.info"
@@ -383,6 +391,9 @@ typedef ssize_t (*gd_serialize_t) (struct iovec outmsg, void *args);
#define GLUSTERD_GET_NFS_DIR(path, priv) \
snprintf (path, PATH_MAX, "%s/nfs", priv->workdir);
+#define GLUSTERD_GET_QUOTAD_DIR(path, priv) \
+ snprintf (path, PATH_MAX, "%s/quotad", priv->workdir);
+
#define GLUSTERD_REMOVE_SLASH_FROM_PATH(path,string) do { \
int i = 0; \
for (i = 1; i < strlen (path); i++) { \
@@ -406,6 +417,11 @@ typedef ssize_t (*gd_serialize_t) (struct iovec outmsg, void *args);
nfspath); \
}
+#define GLUSTERD_GET_QUOTAD_PIDFILE(pidfile,quotadpath) { \
+ snprintf (pidfile, PATH_MAX, "%s/run/quotad.pid", \
+ quotadpath); \
+ }
+
#define GLUSTERD_STACK_DESTROY(frame) do {\
frame->local = NULL; \
STACK_DESTROY (frame->root); \
@@ -431,6 +447,10 @@ typedef ssize_t (*gd_serialize_t) (struct iovec outmsg, void *args);
uuid_utoa(MY_UUID)); \
} while (0)
+#define GLUSTERFS_GET_AUX_MOUNT_PIDFILE(pidfile, volname) { \
+ snprintf (pidfile, PATH_MAX-1, \
+ DEFAULT_VAR_RUN_DIRECTORY"/%s.pid", volname); \
+ }
int glusterd_uuid_init();
@@ -720,7 +740,7 @@ int glusterd_op_sys_exec (dict_t *dict, char **op_errstr, dict_t *rsp_dict);
int glusterd_op_stage_gsync_create (dict_t *dict, char **op_errstr);
int glusterd_op_gsync_create (dict_t *dict, char **op_errstr, dict_t *rsp_dict);
int glusterd_op_quota (dict_t *dict, char **op_errstr, dict_t *rsp_dict);
-int glusterd_op_stage_quota (dict_t *dict, char **op_errstr);
+int glusterd_op_stage_quota (dict_t *dict, char **op_errstr, dict_t *rsp_dict);
int glusterd_op_stage_replace_brick (dict_t *dict, char **op_errstr,
dict_t *rsp_dict);
int glusterd_op_replace_brick (dict_t *dict, dict_t *rsp_dict);