summaryrefslogtreecommitdiffstats
path: root/xlators/cluster
diff options
context:
space:
mode:
Diffstat (limited to 'xlators/cluster')
-rw-r--r--xlators/cluster/afr/src/afr-common.c14
-rw-r--r--xlators/cluster/afr/src/afr-inode-read.c2
-rw-r--r--xlators/cluster/afr/src/afr-inode-write.c2
-rw-r--r--xlators/cluster/afr/src/afr-lk-common.c4
-rw-r--r--xlators/cluster/afr/src/afr.c4
-rw-r--r--xlators/cluster/dht/src/dht-common.c16
-rw-r--r--xlators/cluster/dht/src/dht-diskusage.c2
-rw-r--r--xlators/cluster/dht/src/dht-helper.c4
-rw-r--r--xlators/cluster/dht/src/dht-rebalance.c12
-rw-r--r--xlators/cluster/dht/src/dht-selfheal.c4
-rw-r--r--xlators/cluster/dht/src/tier.c10
-rw-r--r--xlators/cluster/dht/src/tier.h2
-rw-r--r--xlators/cluster/ec/src/ec-code.c2
-rw-r--r--xlators/cluster/ec/src/ec-common.c2
-rw-r--r--xlators/cluster/ec/src/ec-common.h2
-rw-r--r--xlators/cluster/ec/src/ec-heal.c2
-rw-r--r--xlators/cluster/ec/src/ec-helpers.h2
-rw-r--r--xlators/cluster/ec/src/ec-inode-write.c2
-rw-r--r--xlators/cluster/ec/src/ec-locks.c2
-rw-r--r--xlators/cluster/ec/src/ec-types.h2
-rw-r--r--xlators/cluster/ec/src/ec.c2
-rw-r--r--xlators/cluster/stripe/src/stripe-helpers.c2
-rw-r--r--xlators/cluster/stripe/src/stripe.c4
23 files changed, 50 insertions, 50 deletions
diff --git a/xlators/cluster/afr/src/afr-common.c b/xlators/cluster/afr/src/afr-common.c
index e2821f1b295..e60d5315dbe 100644
--- a/xlators/cluster/afr/src/afr-common.c
+++ b/xlators/cluster/afr/src/afr-common.c
@@ -912,7 +912,7 @@ afr_set_split_brain_choice (int ret, call_frame_t *frame, void *opaque)
/* If timer cancel failed here it means that the
* previous cbk will be executed which will set
* spb_choice to -1. So we can consider the
- * 'valid to -1' case to be a sucess
+ * 'valid to -1' case to be a success
* (i.e. ret = 0) and goto unlock.
*/
goto unlock;
@@ -4722,7 +4722,7 @@ afr_ipc_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
int child_index = (long)cookie;
int call_count = 0;
gf_boolean_t failed = _gf_false;
- gf_boolean_t succeded = _gf_false;
+ gf_boolean_t succeeded = _gf_false;
int i = 0;
afr_private_t *priv = NULL;
@@ -4742,7 +4742,7 @@ afr_ipc_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
* return error else return success unless all the subvolumes
* failed.
* TODO: In case of failure, we need to unregister the xattrs
- * from the other subvolumes where it succeded (once upcall
+ * from the other subvolumes where it succeeded (once upcall
* fixes the Bz-1371622)*/
for (i = 0; i < priv->child_count; i++) {
if (!local->replies[i].valid)
@@ -4762,7 +4762,7 @@ afr_ipc_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
break;
}
if (local->replies[i].op_ret == 0) {
- succeded = _gf_true;
+ succeeded = _gf_true;
local->op_ret = 0;
local->op_errno = 0;
if (!local->xdata_rsp && local->replies[i].xdata) {
@@ -4772,7 +4772,7 @@ afr_ipc_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
}
}
- if (!succeded && !failed) {
+ if (!succeeded && !failed) {
local->op_ret = -1;
local->op_errno = ENOTCONN;
}
@@ -5314,7 +5314,7 @@ __afr_handle_child_down_event (xlator_t *this, xlator_t *child_xlator,
if (down_children == priv->child_count) {
gf_msg (this->name, GF_LOG_ERROR, 0, AFR_MSG_SUBVOLS_DOWN,
"All subvolumes are down. Going "
- "offline until atleast one of them "
+ "offline until at least one of them "
"comes back up.");
gf_event (EVENT_AFR_SUBVOLS_DOWN, "subvol=%s", this->name);
} else {
@@ -5364,7 +5364,7 @@ afr_notify (xlator_t *this, int32_t event,
priv->did_discovery = _gf_false;
- /* parent xlators dont need to know about every child_up, child_down
+ /* parent xlators don't need to know about every child_up, child_down
* because of afr ha. If all subvolumes go down, child_down has
* to be triggered. In that state when 1 subvolume comes up child_up
* needs to be triggered. dht optimizes revalidate lookup by sending
diff --git a/xlators/cluster/afr/src/afr-inode-read.c b/xlators/cluster/afr/src/afr-inode-read.c
index 8d3c823043f..e8192a2b753 100644
--- a/xlators/cluster/afr/src/afr-inode-read.c
+++ b/xlators/cluster/afr/src/afr-inode-read.c
@@ -735,7 +735,7 @@ afr_getxattr_node_uuid_cbk (call_frame_t *frame, void *cookie,
/**
* _current_ becomes _next_
- * If done with all childs and yet no success; give up !
+ * If done with all children and yet no success; give up !
*/
curr_call_child = (int) ((long)cookie);
if (++curr_call_child == priv->child_count)
diff --git a/xlators/cluster/afr/src/afr-inode-write.c b/xlators/cluster/afr/src/afr-inode-write.c
index 9a9ef2e90a5..755e928ef62 100644
--- a/xlators/cluster/afr/src/afr-inode-write.c
+++ b/xlators/cluster/afr/src/afr-inode-write.c
@@ -102,7 +102,7 @@ __afr_inode_write_finalize (call_frame_t *frame, xlator_t *this)
below is important.
- Highest precedence: largest op_ret
- - Next precendence: if all op_rets are equal, read subvol
+ - Next precedence: if all op_rets are equal, read subvol
- Least precedence: any succeeded subvol
*/
if ((local->op_ret < local->replies[i].op_ret) ||
diff --git a/xlators/cluster/afr/src/afr-lk-common.c b/xlators/cluster/afr/src/afr-lk-common.c
index dff6644eb96..d7ff2b01a06 100644
--- a/xlators/cluster/afr/src/afr-lk-common.c
+++ b/xlators/cluster/afr/src/afr-lk-common.c
@@ -601,7 +601,7 @@ is_blocking_locks_count_sufficient (call_frame_t *frame, xlator_t *this)
"gfid:%s.", uuid_utoa (local->inode->gfid));
return _gf_false;
} else {
- /*inodelk succeded on atleast one child. */
+ /*inodelk succeeded on at least one child. */
return _gf_true;
}
@@ -611,7 +611,7 @@ is_blocking_locks_count_sufficient (call_frame_t *frame, xlator_t *this)
return _gf_false;
}
/* For FOPS that take multiple sets of locks (mkdir, rename),
- * there must be atleast one brick on which the locks from
+ * there must be at least one brick on which the locks from
* all lock sets were successful. */
for (child = 0; child < priv->child_count; child++) {
ret = _gf_true;
diff --git a/xlators/cluster/afr/src/afr.c b/xlators/cluster/afr/src/afr.c
index 0d6773c96e8..0e86e33d03b 100644
--- a/xlators/cluster/afr/src/afr.c
+++ b/xlators/cluster/afr/src/afr.c
@@ -309,7 +309,7 @@ afr_pending_xattrs_init (afr_private_t *priv, xlator_t *this)
child_count = priv->child_count;
if (priv->thin_arbiter_count) {
/* priv->pending_key[THIN_ARBITER_BRICK_INDEX] is used as the
- * name of the thin arbiter file for persistance across add/
+ * name of the thin arbiter file for persistence across add/
* removal of DHT subvols.*/
child_count++;
}
@@ -621,7 +621,7 @@ fini (xlator_t *this)
UNLOCK (&priv->lock);
this->private = NULL;
afr_priv_destroy (priv);
- //if (this->itable);//I dont see any destroy func
+ //if (this->itable);//I don't see any destroy func
return 0;
}
diff --git a/xlators/cluster/dht/src/dht-common.c b/xlators/cluster/dht/src/dht-common.c
index 45218d23482..1d6564cb962 100644
--- a/xlators/cluster/dht/src/dht-common.c
+++ b/xlators/cluster/dht/src/dht-common.c
@@ -1967,7 +1967,7 @@ cont:
DHT_STRIP_PHASE1_FLAGS (&local->stbuf);
dht_set_fixed_dir_stat (&local->postparent);
- /* local->stbuf is udpated only from subvols which have a layout
+ /* local->stbuf is updated only from subvols which have a layout
* The reason is to avoid choosing attr heal source from newly
* added bricks. In case e.g we have only one subvol and for
* some reason layout is not present on it, then local->stbuf
@@ -2454,7 +2454,7 @@ dht_lookup_everywhere_done (call_frame_t *frame, xlator_t *this)
*
* Performing deletion of stale link file when
* setting key in dict fails, may cause the data
- * loss becase of the above mentioned race.
+ * loss because of the above mentioned race.
*/
@@ -3255,9 +3255,9 @@ dht_lookup_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
"Entry %s missing on subvol %s",
loc->path, prev->name);
- /* lookup-optimize supercedes lookup-unhashed settings,
+ /* lookup-optimize supersedes lookup-unhashed settings,
* - so if it is set, do not process search_unhashed
- * - except, in the case of rebalance deamon, we want to
+ * - except, in the case of rebalance daemon, we want to
* force the lookup_everywhere behavior */
if (!conf->defrag && conf->lookup_optimize && loc->parent) {
ret = dht_inode_ctx_layout_get (loc->parent, this,
@@ -4429,7 +4429,7 @@ dht_vgetxattr_fill_and_set (dht_local_t *local, dict_t **dict, xlator_t *this,
*
* For node-uuid we just don't have all the pretty formatting,
* but since this is a generic routine for pathinfo & node-uuid
- * we dont have conditional space allocation and try to be
+ * we don't have conditional space allocation and try to be
* generic
*/
local->alloc_len += (2 * strlen (this->name))
@@ -6122,7 +6122,7 @@ dht_setxattr (call_frame_t *frame, xlator_t *this,
* promotions and demotions are multithreaded
* so the original frame from gf_defrag_start()
* is not carried. A new frame will be created when
- * we do syncop_setxattr(). This doesnot have the
+ * we do syncop_setxattr(). This does not have the
* frame->root->pid of the original frame. So we pass
* this dic key-value when we do syncop_setxattr() to do
* data migration and set the frame->root->pid to
@@ -8417,7 +8417,7 @@ dht_link_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
goto out;
}
- /* Update parent on success, even if P1/2 checks are positve.
+ /* Update parent on success, even if P1/2 checks are positive.
* The second call on success will further update the parent */
if (local->loc.parent) {
dht_inode_ctx_time_update (local->loc.parent, this,
@@ -9933,7 +9933,7 @@ dht_rmdir_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
goto unlock;
}
- /* Track if rmdir succeeded on atleast one subvol*/
+ /* Track if rmdir succeeded on at least one subvol*/
local->fop_succeeded = 1;
dht_iatt_merge (this, &local->preparent, preparent);
dht_iatt_merge (this, &local->postparent, postparent);
diff --git a/xlators/cluster/dht/src/dht-diskusage.c b/xlators/cluster/dht/src/dht-diskusage.c
index c2b9b2e37c4..f109615e52c 100644
--- a/xlators/cluster/dht/src/dht-diskusage.c
+++ b/xlators/cluster/dht/src/dht-diskusage.c
@@ -470,7 +470,7 @@ dht_subvol_with_free_space_inodes(xlator_t *this, xlator_t *subvol, xlator_t *ig
}
-/* Get subvol which has atleast one inode and maximum space */
+/* Get subvol which has at least one inode and maximum space */
xlator_t *
dht_subvol_maxspace_nonzeroinode (xlator_t *this, xlator_t *subvol,
dht_layout_t *layout)
diff --git a/xlators/cluster/dht/src/dht-helper.c b/xlators/cluster/dht/src/dht-helper.c
index 018389d31eb..561d1199e10 100644
--- a/xlators/cluster/dht/src/dht-helper.c
+++ b/xlators/cluster/dht/src/dht-helper.c
@@ -1322,7 +1322,7 @@ dht_migration_complete_check_task (void *data)
inode = (!local->fd) ? local->loc.inode : local->fd->inode;
/* getxattr on cached_subvol for 'linkto' value. Do path based getxattr
- * as root:root. If a fd is already open, access check wont be done*/
+ * as root:root. If a fd is already open, access check won't be done*/
if (!local->loc.inode) {
ret = syncop_fgetxattr (src_node, local->fd, &dict,
@@ -1600,7 +1600,7 @@ dht_rebalance_inprogress_task (void *data)
inode = (!local->fd) ? local->loc.inode : local->fd->inode;
/* getxattr on cached_subvol for 'linkto' value. Do path based getxattr
- * as root:root. If a fd is already open, access check wont be done*/
+ * as root:root. If a fd is already open, access check won't be done*/
if (local->loc.inode) {
SYNCTASK_SETID (0, 0);
ret = syncop_getxattr (src_node, &local->loc, &dict,
diff --git a/xlators/cluster/dht/src/dht-rebalance.c b/xlators/cluster/dht/src/dht-rebalance.c
index 3b986be97e0..1f1beb8dca3 100644
--- a/xlators/cluster/dht/src/dht-rebalance.c
+++ b/xlators/cluster/dht/src/dht-rebalance.c
@@ -815,7 +815,7 @@ __dht_rebalance_create_dst_file (xlator_t *this, xlator_t *to, xlator_t *from,
*server (posix_layer) and binding it in server (incrementing fd count),
*so if in that time-gap, if other process sends unlink considering it
*as a linkto file, because inode->fd count will be 0, so file will be
- *unlinked at the backend. And because furthur operations are performed
+ *unlinked at the backend. And because further operations are performed
*on fd, so though migration will be done but will end with no file
*at the backend.
*/
@@ -1017,9 +1017,9 @@ __dht_check_free_space (xlator_t *this, xlator_t *to, xlator_t *from,
During rebalance `migrate-data` - Destination subvol experiences
a `reduction` in 'blocks' of free space, at the same time source
subvol gains certain 'blocks' of free space. A valid check is
- necessary here to avoid errorneous move to destination where
+ necessary here to avoid erroneous move to destination where
the space could be scantily available.
- With heterogenous brick support, an actual space comparison could
+ With heterogeneous brick support, an actual space comparison could
prevent any files being migrated to newly added bricks if they are
smaller then the free space available on the existing bricks.
*/
@@ -1119,7 +1119,7 @@ find_new_subvol:
if ((!(*new_subvol)) || (*new_subvol == from)) {
gf_msg (this->name, GF_LOG_WARNING, 0,
DHT_MSG_SUBVOL_INSUFF_SPACE, "Could not find any subvol"
- " with space accomodating the file - %s. Consider "
+ " with space accommodating the file - %s. Consider "
"adding bricks", loc->path);
*target_changed = _gf_false;
@@ -3363,7 +3363,7 @@ gf_defrag_get_entry (xlator_t *this, int i, struct dht_container **container,
tmp_container->df_entry->dict =
dict_ref (df_entry->dict);
- /*Build Container Structue >> END*/
+ /*Build Container Structure >> END*/
ret = 0;
goto out;
@@ -3670,7 +3670,7 @@ gf_defrag_settle_hash (xlator_t *this, gf_defrag_info_t *defrag,
if (conf->local_subvols_cnt == 0 || !conf->lookup_optimize) {
/* Commit hash updates are only done on local subvolumes and
- * only when lookup optmization is needed (for older client
+ * only when lookup optimization is needed (for older client
* support)
*/
return 0;
diff --git a/xlators/cluster/dht/src/dht-selfheal.c b/xlators/cluster/dht/src/dht-selfheal.c
index 6c526bd6990..5d5c8e86ddf 100644
--- a/xlators/cluster/dht/src/dht-selfheal.c
+++ b/xlators/cluster/dht/src/dht-selfheal.c
@@ -1462,7 +1462,7 @@ dht_selfheal_dir_mkdir_lock_cbk (call_frame_t *frame, void *cookie,
if (op_ret < 0) {
/* We get this error when the directory entry was not created
- * on a newky attatched tier subvol. Hence proceed and do mkdir
+ * on a newky attached tier subvol. Hence proceed and do mkdir
* on the tier subvol.
*/
if (op_errno == EINVAL) {
@@ -2047,7 +2047,7 @@ dht_selfheal_dir_getafix (call_frame_t *frame, loc_t *loc,
overlaps = local->selfheal.overlaps_cnt;
if (holes || overlaps) {
- /* If the layout has anomolies which would change the hash
+ /* If the layout has anomalies which would change the hash
* ranges, then we need to reset the commit_hash for this
* directory, as the layout would change and things may not
* be in place as expected */
diff --git a/xlators/cluster/dht/src/tier.c b/xlators/cluster/dht/src/tier.c
index 45a19d96243..fd57ddfaaab 100644
--- a/xlators/cluster/dht/src/tier.c
+++ b/xlators/cluster/dht/src/tier.c
@@ -564,7 +564,7 @@ tier_set_migrate_data (dict_t *migrate_data)
* promotions and demotions are multithreaded
* so the original frame from gf_defrag_start()
* is not carried. A new frame will be created when
- * we do syncop_setxattr(). This doesnot have the
+ * we do syncop_setxattr(). This does not have the
* frame->root->pid of the original frame. So we pass
* this dic key-value when we do syncop_setxattr() to do
* data migration and set the frame->root->pid to
@@ -964,7 +964,7 @@ tier_migrate_using_query_file (void *_args)
* per_file_status and per_link_status
* 0 : success
* -1 : failure
- * 1 : ignore the status and dont count for migration
+ * 1 : ignore the status and don't count for migration
* */
int per_file_status = 0;
int per_link_status = 0;
@@ -2444,7 +2444,7 @@ static void
while (1) {
/*
- * Check if a graph switch occured. If so, stop migration
+ * Check if a graph switch occurred. If so, stop migration
* thread. It will need to be restarted manually.
*/
any = THIS->ctx->active->first;
@@ -2489,8 +2489,8 @@ static void
/* To have proper synchronization amongst all
* brick holding nodes, so that promotion and demotions
- * start atomicly w.r.t promotion/demotion frequency
- * period, all nodes should have thier system time
+ * start atomically w.r.t promotion/demotion frequency
+ * period, all nodes should have their system time
* in-sync with each other either manually set or
* using a NTP server*/
ret = gettimeofday (&current_time, NULL);
diff --git a/xlators/cluster/dht/src/tier.h b/xlators/cluster/dht/src/tier.h
index 764860e6884..ce08fb5a669 100644
--- a/xlators/cluster/dht/src/tier.h
+++ b/xlators/cluster/dht/src/tier.h
@@ -13,7 +13,7 @@
/******************************************************************************/
-/* This is from dht-rebalancer.c as we dont have dht-rebalancer.h */
+/* This is from dht-rebalancer.c as we don't have dht-rebalancer.h */
#include "dht-common.h"
#include "xlator.h"
#include <signal.h>
diff --git a/xlators/cluster/ec/src/ec-code.c b/xlators/cluster/ec/src/ec-code.c
index 44957dd788f..2cdd5bebe11 100644
--- a/xlators/cluster/ec/src/ec-code.c
+++ b/xlators/cluster/ec/src/ec-code.c
@@ -479,7 +479,7 @@ ec_code_space_create(ec_code_t *code, size_t size)
done_close:
/* If everything has succeeded, we already have the memory areas
* mapped. We don't need the file descriptor anymore because the
- * backend storage will be there until the mmaped regions are
+ * backend storage will be there until the mmap()'d regions are
* unmapped. */
sys_close(fd);
done:
diff --git a/xlators/cluster/ec/src/ec-common.c b/xlators/cluster/ec/src/ec-common.c
index e3e34811395..4776b697a8b 100644
--- a/xlators/cluster/ec/src/ec-common.c
+++ b/xlators/cluster/ec/src/ec-common.c
@@ -1631,7 +1631,7 @@ int32_t ec_get_real_size_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
}
/* This function is used to get the trusted.ec.size xattr from a file when
- * no lock is needed on the inode. This is only required to maintan iatt
+ * no lock is needed on the inode. This is only required to maintain iatt
* structs on fops that manipulate directory entries but do not operate
* directly on the inode, like link, rename, ...
*
diff --git a/xlators/cluster/ec/src/ec-common.h b/xlators/cluster/ec/src/ec-common.h
index 372be52470c..a92752952ad 100644
--- a/xlators/cluster/ec/src/ec-common.h
+++ b/xlators/cluster/ec/src/ec-common.h
@@ -41,7 +41,7 @@ enum _ec_xattrop_flags {
};
/* We keep two sets of flags. One to determine what's really providing the
- * currect xattrop and the other to know what the parent fop of the xattrop
+ * current xattrop and the other to know what the parent fop of the xattrop
* needs to proceed. It might happen that a fop needs some information that
* is being already requested by a previous fop. The two sets are stored
* contiguously. */
diff --git a/xlators/cluster/ec/src/ec-heal.c b/xlators/cluster/ec/src/ec-heal.c
index 2ff713b7bbd..6b4c45fa370 100644
--- a/xlators/cluster/ec/src/ec-heal.c
+++ b/xlators/cluster/ec/src/ec-heal.c
@@ -2933,7 +2933,7 @@ ec_need_data_heal (ec_t *ec, inode_t *inode, default_args_cbk_t *replies,
size = alloca0 (ec->nodes * sizeof (*size));
/* When dd is going on and heal info is called there is a very good
- * chance for on disk sizes to mismatch eventhough nothing is wrong
+ * chance for on disk sizes to mismatch even though nothing is wrong
* we don't need ondisk size check there. But if the file is either
* self-locked or the caller wants a thorough check then make sure to
* perform on disk check also. */
diff --git a/xlators/cluster/ec/src/ec-helpers.h b/xlators/cluster/ec/src/ec-helpers.h
index 71b8978abf8..b548f802361 100644
--- a/xlators/cluster/ec/src/ec-helpers.h
+++ b/xlators/cluster/ec/src/ec-helpers.h
@@ -96,7 +96,7 @@ ec_adjust_size_up(ec_t *ec, uint64_t *value, gf_boolean_t scale)
tmp += ec->fragment_size;
} else {
tmp += ec->stripe_size;
- /* If no scaling is requested there's a posibility of
+ /* If no scaling is requested there's a possibility of
* overflow. */
if (tmp < ec->stripe_size) {
tmp = UINT64_MAX;
diff --git a/xlators/cluster/ec/src/ec-inode-write.c b/xlators/cluster/ec/src/ec-inode-write.c
index bf45a867971..c9fbd3cf7df 100644
--- a/xlators/cluster/ec/src/ec-inode-write.c
+++ b/xlators/cluster/ec/src/ec-inode-write.c
@@ -1167,7 +1167,7 @@ void ec_discard_adjust_offset_size(ec_fop_data_t *fop)
ec_t *ec = fop->xl->private;
fop->user_size = fop->size;
- /* If discard length covers atleast a fragment on brick, we will
+ /* If discard length covers at least a fragment on brick, we will
* perform discard operation(when fop->size is non-zero) else we just
* write zeros.
*/
diff --git a/xlators/cluster/ec/src/ec-locks.c b/xlators/cluster/ec/src/ec-locks.c
index 996035de90b..d8ad7721f53 100644
--- a/xlators/cluster/ec/src/ec-locks.c
+++ b/xlators/cluster/ec/src/ec-locks.c
@@ -43,7 +43,7 @@ int32_t ec_lock_check(ec_fop_data_t *fop, uintptr_t *mask)
case EC_LOCK_MODE_NONE:
case EC_LOCK_MODE_ALL:
/* Goal is to treat non-blocking lock as failure
- * even if there is a signle EAGAIN*/
+ * even if there is a single EAGAIN*/
notlocked |= ans->mask;
break;
}
diff --git a/xlators/cluster/ec/src/ec-types.h b/xlators/cluster/ec/src/ec-types.h
index 15b4c77abfe..2724da6415f 100644
--- a/xlators/cluster/ec/src/ec-types.h
+++ b/xlators/cluster/ec/src/ec-types.h
@@ -323,7 +323,7 @@ struct _ec_fop_data {
int32_t id; /* ID of the file operation */
int32_t refs;
int32_t state;
- int32_t minimum; /* Mininum number of successful
+ int32_t minimum; /* Minimum number of successful
operation required to conclude a
fop as successful */
int32_t expected;
diff --git a/xlators/cluster/ec/src/ec.c b/xlators/cluster/ec/src/ec.c
index cb995646959..a82305104c5 100644
--- a/xlators/cluster/ec/src/ec.c
+++ b/xlators/cluster/ec/src/ec.c
@@ -1543,7 +1543,7 @@ struct volume_options options[] =
.op_version = { GD_OP_VERSION_4_0_0 },
.flags = OPT_FLAG_SETTABLE | OPT_FLAG_CLIENT_OPT | OPT_FLAG_DOC,
.tags = { "disperse", "locks", "timeout" },
- .description = "It's equivalent ot eager-lock-timeout option but for "
+ .description = "It's equivalent to eager-lock-timeout option but for "
"non regular files."
},
{ .key = {"background-heals"},
diff --git a/xlators/cluster/stripe/src/stripe-helpers.c b/xlators/cluster/stripe/src/stripe-helpers.c
index 217f4d2b6e8..06568389cc2 100644
--- a/xlators/cluster/stripe/src/stripe-helpers.c
+++ b/xlators/cluster/stripe/src/stripe-helpers.c
@@ -563,7 +563,7 @@ set_stripe_block_size (xlator_t *this, stripe_private_t *priv, char *data)
if (stripe_opt->block_size < STRIPE_MIN_BLOCK_SIZE) {
gf_log (this->name, GF_LOG_ERROR, "Invalid Block-size: "
- "%s. Should be atleast %llu bytes", num,
+ "%s. Should be at least %llu bytes", num,
STRIPE_MIN_BLOCK_SIZE);
goto out;
}
diff --git a/xlators/cluster/stripe/src/stripe.c b/xlators/cluster/stripe/src/stripe.c
index fae80c3e200..a8534cfca1e 100644
--- a/xlators/cluster/stripe/src/stripe.c
+++ b/xlators/cluster/stripe/src/stripe.c
@@ -4436,7 +4436,7 @@ stripe_setxattr_cbk (call_frame_t *frame, void *cookie,
call_cnt = --local->wind_count;
/**
- * We overwrite ->op_* values here for subsequent faliure
+ * We overwrite ->op_* values here for subsequent failure
* conditions, hence we propagate the last errno down the
* stack.
*/
@@ -5553,7 +5553,7 @@ stripe_getxattr (call_frame_t *frame, xlator_t *this,
(void) strncpy (local->xsel, name, strlen (name));
/**
- * for xattrs that need info from all childs, fill ->xsel
+ * for xattrs that need info from all children, fill ->xsel
* as above and call the filler function in cbk based on
* it
*/