summaryrefslogtreecommitdiffstats
path: root/xlators/mgmt/glusterd/src/glusterd-handler.c
diff options
context:
space:
mode:
authorAnand Avati <avati@redhat.com>2012-06-07 00:45:48 -0700
committerAnand Avati <avati@redhat.com>2012-06-07 17:35:34 -0700
commited648c3b393ec06d0da7c1a9af42286fb3cc978e (patch)
treee4298f43561af76e0d26ce4af2c596bb93c13f8f /xlators/mgmt/glusterd/src/glusterd-handler.c
parent7efa697c26b31c52f0f8cc976a505ff34bfbe33d (diff)
glusterd: generate node UUID lazily
A commonly faced problem among glusterfs users is: after a fresh installation of glusterfs in a virtual machine, the VM image is cloned to make multiple instances of the server. This breaks glusterd because right after glusterfs installation on the first boot glusterd would have created the node UUID and this gets inherited into the clone. The result is wierd behavior at the time of peer probe where glusterd does not (yet) deal with UUID collisions in a user friendly way. This patch is for the 'prevention' of the issue. The approach here is to avoid generating a UUID on the first start of glusterd, but instead generate a node UUID only when a node UUID is found to be necessary. This naturally avoids the creation of node UUID on first boot and prevents the issue to a large extent. This issue also needs a 'cure' patch, which gives more meaningful error messages to the user and provides CLI to recover from the situations (gluster peer reset?) Change-Id: Ieaaeeaf76ed35385844e98a8e23fc3dd8df5a208 BUG: 811493 Signed-off-by: Anand Avati <avati@redhat.com> Reviewed-on: http://review.gluster.com/3533 Tested-by: Gluster Build System <jenkins@build.gluster.com> Reviewed-by: Jeff Darcy <jdarcy@redhat.com>
Diffstat (limited to 'xlators/mgmt/glusterd/src/glusterd-handler.c')
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-handler.c10
1 files changed, 5 insertions, 5 deletions
diff --git a/xlators/mgmt/glusterd/src/glusterd-handler.c b/xlators/mgmt/glusterd/src/glusterd-handler.c
index cc8e2d0eea1..51b859857b2 100644
--- a/xlators/mgmt/glusterd/src/glusterd-handler.c
+++ b/xlators/mgmt/glusterd/src/glusterd-handler.c
@@ -446,7 +446,7 @@ glusterd_op_txn_begin (rpcsvc_request_t *req, glusterd_op_t op, void *ctx)
priv = this->private;
GF_ASSERT (priv);
- ret = glusterd_lock (priv->uuid);
+ ret = glusterd_lock (MY_UUID);
if (ret) {
gf_log (this->name, GF_LOG_ERROR,
@@ -471,7 +471,7 @@ glusterd_op_txn_begin (rpcsvc_request_t *req, glusterd_op_t op, void *ctx)
out:
if (locked && ret)
- glusterd_unlock (priv->uuid);
+ glusterd_unlock (MY_UUID);
gf_log (this->name, GF_LOG_DEBUG, "Returning %d", ret);
return ret;
@@ -749,7 +749,7 @@ glusterd_handle_cli_deprobe (rpcsvc_request_t *req)
goto out;
}
- if (!uuid_compare (uuid, priv->uuid)) {
+ if (!uuid_compare (uuid, MY_UUID)) {
op_errno = GF_DEPROBE_LOCALHOST;
ret = -1;
goto out;
@@ -1678,7 +1678,7 @@ glusterd_handle_friend_update (rpcsvc_request_t *req)
continue;
}
- if (!uuid_compare (uuid, priv->uuid)) {
+ if (!uuid_compare (uuid, MY_UUID)) {
gf_log ("", GF_LOG_INFO, "Received my uuid as Friend");
i++;
continue;
@@ -1703,7 +1703,7 @@ glusterd_handle_friend_update (rpcsvc_request_t *req)
}
out:
- uuid_copy (rsp.uuid, priv->uuid);
+ uuid_copy (rsp.uuid, MY_UUID);
ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
(xdrproc_t)xdr_gd1_mgmt_friend_update_rsp);
if (dict) {