summaryrefslogtreecommitdiffstats
path: root/xlators/mgmt
diff options
context:
space:
mode:
Diffstat (limited to 'xlators/mgmt')
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-bitrot.c2
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-brick-ops.c2
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-geo-rep.c2
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-handler.c6
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-locks.c6
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-op-sm.c2
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-rebalance.c2
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-snapshot-utils.c8
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-snapshot.c16
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-tier.c2
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-utils.c14
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-volgen.c2
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-volume-ops.c2
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-volume-set.c8
-rw-r--r--xlators/mgmt/glusterd/src/glusterd.c4
15 files changed, 39 insertions, 39 deletions
diff --git a/xlators/mgmt/glusterd/src/glusterd-bitrot.c b/xlators/mgmt/glusterd/src/glusterd-bitrot.c
index 8c5ddfd7896..167c434dffc 100644
--- a/xlators/mgmt/glusterd/src/glusterd-bitrot.c
+++ b/xlators/mgmt/glusterd/src/glusterd-bitrot.c
@@ -475,7 +475,7 @@ glusterd_should_i_stop_bitd ()
return stopped;
}
- /* Before stoping bitrot/scrubber daemon check
+ /* Before stopping bitrot/scrubber daemon check
* other volume also whether respective volume
* host a brick from this node or not.*/
continue;
diff --git a/xlators/mgmt/glusterd/src/glusterd-brick-ops.c b/xlators/mgmt/glusterd/src/glusterd-brick-ops.c
index 3362f7323c9..c3ec249e6ba 100644
--- a/xlators/mgmt/glusterd/src/glusterd-brick-ops.c
+++ b/xlators/mgmt/glusterd/src/glusterd-brick-ops.c
@@ -1749,7 +1749,7 @@ glusterd_op_stage_add_brick (dict_t *dict, char **op_errstr, dict_t *rsp_dict)
goto out;
}
/* op-version check for replica 2 to arbiter conversion. If we
- * dont have this check, an older peer added as arbiter brick
+ * don't have this check, an older peer added as arbiter brick
* will not have the arbiter xlator in its volfile. */
if ((conf->op_version < GD_OP_VERSION_3_8_0) &&
(arbiter_count == 1) && (replica_count == 3)) {
diff --git a/xlators/mgmt/glusterd/src/glusterd-geo-rep.c b/xlators/mgmt/glusterd/src/glusterd-geo-rep.c
index 5a5d4ec2540..6761277be05 100644
--- a/xlators/mgmt/glusterd/src/glusterd-geo-rep.c
+++ b/xlators/mgmt/glusterd/src/glusterd-geo-rep.c
@@ -4070,7 +4070,7 @@ out:
/*
* glusterd_gsync_op_already_set:
- * This funcion checks whether the op_value is same as in the
+ * This function checks whether the op_value is same as in the
* gsyncd.conf file.
*
* RETURN VALUE:
diff --git a/xlators/mgmt/glusterd/src/glusterd-handler.c b/xlators/mgmt/glusterd/src/glusterd-handler.c
index 8fdf9a3819a..d90e82c495c 100644
--- a/xlators/mgmt/glusterd/src/glusterd-handler.c
+++ b/xlators/mgmt/glusterd/src/glusterd-handler.c
@@ -3871,7 +3871,7 @@ set_probe_error_str (int op_ret, int op_errno, char *op_errstr, char *errstr,
break;
case GF_PROBE_VOLUME_CONFLICT:
- snprintf (errstr, len, "Atleast one volume on "
+ snprintf (errstr, len, "At least one volume on "
"%s conflicts with existing volumes "
"in the cluster", hostname);
break;
@@ -4086,7 +4086,7 @@ glusterd_list_friends (rpcsvc_request_t *req, dict_t *dict, int32_t flags)
goto out;
}
- /* Reset ret to 0, needed to prevent failure incase no peers exist */
+ /* Reset ret to 0, needed to prevent failure in case no peers exist */
ret = 0;
rcu_read_lock ();
if (!cds_list_empty (&priv->peers)) {
@@ -6552,7 +6552,7 @@ struct rpcsvc_program gd_svc_cli_prog = {
/**
* This set of RPC progs are deemed to be trusted. Most of the actors support
* read only queries, the only exception being MOUNT/UMOUNT which is required
- * by geo-replication to supprt unprivileged master -> slave sessions.
+ * by geo-replication to support unprivileged master -> slave sessions.
*/
rpcsvc_actor_t gd_svc_cli_trusted_actors[GLUSTER_CLI_MAXVALUE] = {
[GLUSTER_CLI_LIST_FRIENDS] = { "LIST_FRIENDS", GLUSTER_CLI_LIST_FRIENDS, glusterd_handle_cli_list_friends, NULL, 0, DRC_NA},
diff --git a/xlators/mgmt/glusterd/src/glusterd-locks.c b/xlators/mgmt/glusterd/src/glusterd-locks.c
index ab69c78b0f0..cc9c03f5683 100644
--- a/xlators/mgmt/glusterd/src/glusterd-locks.c
+++ b/xlators/mgmt/glusterd/src/glusterd-locks.c
@@ -280,7 +280,7 @@ glusterd_acquire_multiple_locks_per_entity (dict_t *dict, uuid_t uuid,
}
if (count == locked_count) {
- /* If all locking ops went successfuly, return as success */
+ /* If all locking ops went successfully, return as success */
ret = 0;
goto out;
}
@@ -528,7 +528,7 @@ glusterd_multiple_mgmt_v3_lock (dict_t *dict, uuid_t uuid, uint32_t *op_errno)
}
if (locked_count == GF_MAX_LOCKING_ENTITIES) {
- /* If all locking ops went successfuly, return as success */
+ /* If all locking ops went successfully, return as success */
ret = 0;
goto out;
}
@@ -889,7 +889,7 @@ glusterd_mgmt_v3_unlock (const char *name, uuid_t uuid, char *type)
type, name);
ret = 0;
- /* Release owner refernce which was held during lock */
+ /* Release owner reference which was held during lock */
if (mgmt_lock_timer->timer) {
ret = -1;
mgmt_lock_timer_xl = mgmt_lock_timer->xl;
diff --git a/xlators/mgmt/glusterd/src/glusterd-op-sm.c b/xlators/mgmt/glusterd/src/glusterd-op-sm.c
index a19d98a51c2..63220ffd133 100644
--- a/xlators/mgmt/glusterd/src/glusterd-op-sm.c
+++ b/xlators/mgmt/glusterd/src/glusterd-op-sm.c
@@ -5800,7 +5800,7 @@ glusterd_op_txn_complete (uuid_t *txn_id)
/* Based on the op-version, we release the cluster or mgmt_v3 lock */
if (priv->op_version < GD_OP_VERSION_3_6_0) {
ret = glusterd_unlock (MY_UUID);
- /* unlock cant/shouldnt fail here!! */
+ /* unlock can't/shouldn't fail here!! */
if (ret)
gf_msg (this->name, GF_LOG_CRITICAL, 0,
GD_MSG_GLUSTERD_UNLOCK_FAIL,
diff --git a/xlators/mgmt/glusterd/src/glusterd-rebalance.c b/xlators/mgmt/glusterd/src/glusterd-rebalance.c
index 146090924a2..9066e038dd7 100644
--- a/xlators/mgmt/glusterd/src/glusterd-rebalance.c
+++ b/xlators/mgmt/glusterd/src/glusterd-rebalance.c
@@ -264,7 +264,7 @@ glusterd_handle_defrag_start (glusterd_volinfo_t *volinfo, char *op_errstr,
if (dict_get_str (this->options, "transport.socket.bind-address",
&volfileserver) == 0) {
/*In the case of running multiple glusterds on a single machine,
- *we should ensure that log file and unix socket file shouls be
+ *we should ensure that log file and unix socket file should be
*unique in given cluster */
GLUSTERD_GET_DEFRAG_SOCK_FILE_OLD (sockfile, volinfo,
diff --git a/xlators/mgmt/glusterd/src/glusterd-snapshot-utils.c b/xlators/mgmt/glusterd/src/glusterd-snapshot-utils.c
index 249d99ea3dd..1bb1df049f9 100644
--- a/xlators/mgmt/glusterd/src/glusterd-snapshot-utils.c
+++ b/xlators/mgmt/glusterd/src/glusterd-snapshot-utils.c
@@ -63,7 +63,7 @@ glusterd_snapobject_delete (glusterd_snap_t *snap)
/*
* This function is to be called only from glusterd_peer_detach_cleanup()
- * as this continues to delete snaps inspite of faiure while deleting
+ * as this continues to delete snaps in spite of faiure while deleting
* one, as we don't want to fail peer_detach in such a case.
*/
int
@@ -447,7 +447,7 @@ out:
/* Exports a bricks snapshot details only if required
*
- * The details will be exported only if the cluster op-version is greather than
+ * The details will be exported only if the cluster op-version is greater than
* 4, ie. snapshot is supported in the cluster
*/
int
@@ -1473,7 +1473,7 @@ out:
return missed_delete;
}
-/* Genrate and store snap volfiles for imported snap object */
+/* Generate and store snap volfiles for imported snap object */
int32_t
glusterd_gen_snap_volfiles (glusterd_volinfo_t *snap_vol, char *peer_snap_name)
{
@@ -3087,7 +3087,7 @@ glusterd_snap_common_quorum_calculate (glusterd_volinfo_t *volinfo,
default.
AFR does this:
if quorum type is "auto":
- - for odd numner of bricks (n), n/2 + 1
+ - for odd number of bricks (n), n/2 + 1
bricks should be present
- for even number of bricks n, n/2 bricks
should be present along with the 1st
diff --git a/xlators/mgmt/glusterd/src/glusterd-snapshot.c b/xlators/mgmt/glusterd/src/glusterd-snapshot.c
index 3d984fa3f57..ef3ca7014b1 100644
--- a/xlators/mgmt/glusterd/src/glusterd-snapshot.c
+++ b/xlators/mgmt/glusterd/src/glusterd-snapshot.c
@@ -75,7 +75,7 @@ struct snap_create_args_ {
int32_t brickorder;
};
-/* This structure is used to store unsupported options and thier values
+/* This structure is used to store unsupported options and their values
* for snapshotted volume.
*/
struct gd_snap_unsupported_opt_t {
@@ -187,7 +187,7 @@ glusterd_find_missed_snap (dict_t *rsp_dict, glusterd_volinfo_t *vol,
rcu_read_lock ();
cds_list_for_each_entry_rcu (peerinfo, peers, uuid_list) {
if (gf_uuid_compare (peerinfo->uuid, brickinfo->uuid)) {
- /* If the brick doesnt belong to this peer */
+ /* If the brick doesn't belong to this peer */
continue;
}
@@ -475,7 +475,7 @@ out:
/* Third argument of scandir(used in glusterd_copy_geo_rep_session_files)
- * is filter function. As we dont want "." and ".." files present in the
+ * is filter function. As we don't want "." and ".." files present in the
* directory, we are excliding these 2 files.
* "file_select" function here does the job of filtering.
*/
@@ -5679,7 +5679,7 @@ glusterd_snapshot_activate_deactivate_prevalidate (dict_t *dict,
goto out;
}
- /*TODO: When multiple snapvolume are involved a cummulative
+ /*TODO: When multiple snapvolume are involved a cumulative
* logic is required to tell whether is snapshot is
* started/partially started/stopped*/
if (is_op_activate) {
@@ -7681,7 +7681,7 @@ glusterd_get_single_brick_status (char **op_errstr, dict_t *rsp_dict,
if (ret < 0) {
goto out;
}
- /* While getting snap status we should show relevent information
+ /* While getting snap status we should show relevant information
* for deactivated snaps.
*/
if (snap_volinfo->status == GLUSTERD_STATUS_STOPPED) {
@@ -8415,7 +8415,7 @@ glusterd_snapshot_create_postvalidate (dict_t *dict, int32_t op_ret,
* send EVENT_SNAPSHOT_ACTIVATED event. *
* *
* Also check, if hard limit and soft limit is reached in case *
- * of successfuly creating the snapshot, and generate the event *
+ * of successfully creating the snapshot, and generate the event *
*/
if (is_origin_glusterd (dict) == _gf_true) {
snap_activate = dict_get_str_boolean (priv->opts,
@@ -9671,7 +9671,7 @@ glusterd_handle_snapshot_fn (rpcsvc_request_t *req)
break;
default:
gf_msg (this->name, GF_LOG_ERROR, EINVAL,
- GD_MSG_COMMAND_NOT_FOUND, "Unkown snapshot request "
+ GD_MSG_COMMAND_NOT_FOUND, "Unknown snapshot request "
"type (%d)", type);
ret = -1; /* Failure */
}
@@ -10112,7 +10112,7 @@ gd_restore_snap_volume (dict_t *dict, dict_t *rsp_dict,
goto out;
}
- /* Snap volume must be stoped before performing the
+ /* Snap volume must be stopped before performing the
* restore operation.
*/
ret = glusterd_stop_volume (snap_vol);
diff --git a/xlators/mgmt/glusterd/src/glusterd-tier.c b/xlators/mgmt/glusterd/src/glusterd-tier.c
index 446cb33ff11..0823a83449d 100644
--- a/xlators/mgmt/glusterd/src/glusterd-tier.c
+++ b/xlators/mgmt/glusterd/src/glusterd-tier.c
@@ -744,7 +744,7 @@ glusterd_op_tier_start_stop (dict_t *dict, char **op_errstr, dict_t *rsp_dict)
switch (cmd) {
case GF_DEFRAG_CMD_START_TIER:
- /* we check if its running and skip so that we dont get a
+ /* we check if its running and skip so that we don't get a
* failure during force start
*/
ret = dict_get_int32 (dict, "force", &is_force);
diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.c b/xlators/mgmt/glusterd/src/glusterd-utils.c
index 55b2e735dc3..6be40ef8c02 100644
--- a/xlators/mgmt/glusterd/src/glusterd-utils.c
+++ b/xlators/mgmt/glusterd/src/glusterd-utils.c
@@ -1920,7 +1920,7 @@ glusterd_set_brick_socket_filepath (glusterd_volinfo_t *volinfo,
glusterd_set_socket_filepath (sock_filepath, sockpath, len);
}
-/* connection happens only if it is not aleady connected,
+/* connection happens only if it is not already connected,
* reconnections are taken care by rpc-layer
*/
int32_t
@@ -4465,7 +4465,7 @@ glusterd_volinfo_copy_brickinfo (glusterd_volinfo_t *old_volinfo,
if (!realpath (new_brickinfo->path, abspath)) {
/* Here an ENOENT should also be a
* failure as the brick is expected to
- * be in existance
+ * be in existence
*/
gf_msg (this->name, GF_LOG_CRITICAL,
errno,
@@ -4573,7 +4573,7 @@ glusterd_delete_stale_volume (glusterd_volinfo_t *stale_volinfo,
/* If stale volume is in started state, stop the stale bricks if the new
* volume is started else, stop all bricks.
- * We dont want brick_rpc_notify to access already deleted brickinfo,
+ * We don't want brick_rpc_notify to access already deleted brickinfo,
* so disconnect all bricks from stale_volinfo (unconditionally), since
* they are being deleted subsequently.
*/
@@ -12351,7 +12351,7 @@ op_version_check (xlator_t *this, int min_op_version, char *msg, int msglen)
if (priv->op_version < min_op_version) {
snprintf (msg, msglen, "One or more nodes do not support "
"the required op-version. Cluster op-version must "
- "atleast be %d.", min_op_version);
+ "at least be %d.", min_op_version);
gf_msg (this->name, GF_LOG_ERROR, 0,
GD_MSG_UNSUPPORTED_VERSION, "%s", msg);
ret = -1;
@@ -12781,7 +12781,7 @@ glusterd_launch_synctask (synctask_fn_t fn, void *opaque)
* @option - option to be set to default. If NULL, all possible options will be
* set to default
*
- * Returns 0 on sucess and -1 on failure. If @option is given, but doesn't match
+ * Returns 0 on success and -1 on failure. If @option is given, but doesn't match
* any of the options that could be set, it is a success.
*/
/*
@@ -13397,7 +13397,7 @@ glusterd_get_volopt_content (dict_t * ctx, gf_boolean_t xml_out)
gf_msg_debug ("glusterd", 0, "Failed to "
"get %s key from volume option entry",
vme->key);
- goto out; /*Some error while geting key*/
+ goto out; /*Some error while getting key*/
}
ret = xlator_volopt_dynload (vme->voltype,
@@ -14322,7 +14322,7 @@ glusterd_get_dst_brick_info (char **dst_brick, char *volname, char **op_errstr,
/*
* IPv4 address contains '.' and ipv6 addresses contains ':'
- * So finding the last occurance of ':' to
+ * So finding the last occurrence of ':' to
* mark the start of brick path
*/
c = strrchr(*dup_dstbrick, ':');
diff --git a/xlators/mgmt/glusterd/src/glusterd-volgen.c b/xlators/mgmt/glusterd/src/glusterd-volgen.c
index 76266e1e194..fd256083722 100644
--- a/xlators/mgmt/glusterd/src/glusterd-volgen.c
+++ b/xlators/mgmt/glusterd/src/glusterd-volgen.c
@@ -1119,7 +1119,7 @@ get_vol_transport_type (glusterd_volinfo_t *volinfo, char *tt)
transport_type_to_str (volinfo->transport_type, tt);
}
-/* If no value has specfied for tcp,rdma volume from cli
+/* If no value has specified for tcp,rdma volume from cli
* use tcp as default value.Otherwise, use transport type
* mentioned in volinfo
*/
diff --git a/xlators/mgmt/glusterd/src/glusterd-volume-ops.c b/xlators/mgmt/glusterd/src/glusterd-volume-ops.c
index 3973a0da78a..ec4c699c471 100644
--- a/xlators/mgmt/glusterd/src/glusterd-volume-ops.c
+++ b/xlators/mgmt/glusterd/src/glusterd-volume-ops.c
@@ -2705,7 +2705,7 @@ glusterd_op_start_volume (dict_t *dict, char **op_errstr)
if (conf->op_version <= GD_OP_VERSION_3_7_6) {
/*
* Starting tier daemon on originator node will fail if
- * atleast one of the peer host brick for the volume.
+ * at least one of the peer host brick for the volume.
* Because The bricks in the peer haven't started when you
* commit on originator node.
* Please upgrade to version greater than GD_OP_VERSION_3_7_6
diff --git a/xlators/mgmt/glusterd/src/glusterd-volume-set.c b/xlators/mgmt/glusterd/src/glusterd-volume-set.c
index 26a076905e9..c5a4b267692 100644
--- a/xlators/mgmt/glusterd/src/glusterd-volume-set.c
+++ b/xlators/mgmt/glusterd/src/glusterd-volume-set.c
@@ -682,7 +682,7 @@ validate_uss_dir (glusterd_volinfo_t *volinfo, dict_t *dict, char *key,
goto out;
} else if (i < 2) {
snprintf (errstr, sizeof (errstr), "value of %s too short, "
- "expects atleast two characters", key);
+ "expects at least two characters", key);
goto out;
}
@@ -1224,7 +1224,7 @@ out:
* Fifth field is <doctype>, which decides if the option is public and available
* in "set help" or not. "NO_DOC" entries are not part of the public interface
* and are subject to change at any time. This also decides if an option is
- * global (apllies to all volumes) or normal (applies to only specified volume).
+ * global (applies to all volumes) or normal (applies to only specified volume).
*
* Sixth field is <flags>.
*
@@ -1233,7 +1233,7 @@ out:
* Eight field is description of option: If NULL, tried to fetch from
* translator code's xlator_options table.
*
- * Nineth field is validation function: If NULL, xlator's option specific
+ * Ninth field is validation function: If NULL, xlator's option specific
* validation will be tried, otherwise tried at glusterd code itself.
*
* There are two type of entries: basic and special.
@@ -1963,7 +1963,7 @@ struct volopt_map_entry glusterd_volopt_map[] = {
.option = "xattr-cache-list",
.op_version = GD_OP_VERSION_4_0_0,
.flags = VOLOPT_FLAG_CLIENT_OPT,
- .description = "A comma separeted list of xattrs that shall be "
+ .description = "A comma separated list of xattrs that shall be "
"cached by md-cache. The only wildcard allowed is '*'"
},
{ .key = "performance.nl-cache-pass-through",
diff --git a/xlators/mgmt/glusterd/src/glusterd.c b/xlators/mgmt/glusterd/src/glusterd.c
index 7dc45129312..0714714d33e 100644
--- a/xlators/mgmt/glusterd/src/glusterd.c
+++ b/xlators/mgmt/glusterd/src/glusterd.c
@@ -2062,9 +2062,9 @@ fini (xlator_t *this)
#if 0
/* Running threads might be using these resourses, we have to cancel/stop
- * running threads before deallocating the memeory, but we don't have
+ * running threads before deallocating the memory, but we don't have
* control over the running threads to do pthread_cancel().
- * So memeory freeing handover to kernel.
+ * So memory freeing handover to kernel.
*/
/*TODO: cancel/stop the running threads*/