summaryrefslogtreecommitdiffstats
path: root/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c
diff options
context:
space:
mode:
authorAnand Avati <avati@redhat.com>2012-06-07 00:45:48 -0700
committerAnand Avati <avati@redhat.com>2012-06-07 17:35:34 -0700
commited648c3b393ec06d0da7c1a9af42286fb3cc978e (patch)
treee4298f43561af76e0d26ce4af2c596bb93c13f8f /xlators/mgmt/glusterd/src/glusterd-rpc-ops.c
parent7efa697c26b31c52f0f8cc976a505ff34bfbe33d (diff)
glusterd: generate node UUID lazily
A commonly faced problem among glusterfs users is: after a fresh installation of glusterfs in a virtual machine, the VM image is cloned to make multiple instances of the server. This breaks glusterd because right after glusterfs installation on the first boot glusterd would have created the node UUID and this gets inherited into the clone. The result is wierd behavior at the time of peer probe where glusterd does not (yet) deal with UUID collisions in a user friendly way. This patch is for the 'prevention' of the issue. The approach here is to avoid generating a UUID on the first start of glusterd, but instead generate a node UUID only when a node UUID is found to be necessary. This naturally avoids the creation of node UUID on first boot and prevents the issue to a large extent. This issue also needs a 'cure' patch, which gives more meaningful error messages to the user and provides CLI to recover from the situations (gluster peer reset?) Change-Id: Ieaaeeaf76ed35385844e98a8e23fc3dd8df5a208 BUG: 811493 Signed-off-by: Anand Avati <avati@redhat.com> Reviewed-on: http://review.gluster.com/3533 Tested-by: Gluster Build System <jenkins@build.gluster.com> Reviewed-by: Jeff Darcy <jdarcy@redhat.com>
Diffstat (limited to 'xlators/mgmt/glusterd/src/glusterd-rpc-ops.c')
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-rpc-ops.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c b/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c
index 6d89e4c03d9..240f808fe25 100644
--- a/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c
+++ b/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c
@@ -1460,7 +1460,7 @@ glusterd3_1_probe (call_frame_t *frame, xlator_t *this,
if (ret)
goto out;
- uuid_copy (req.uuid, priv->uuid);
+ uuid_copy (req.uuid, MY_UUID);
req.hostname = gf_strdup (hostname);
req.port = port;
@@ -1505,7 +1505,7 @@ glusterd3_1_friend_add (call_frame_t *frame, xlator_t *this,
if (ret)
goto out;
- uuid_copy (req.uuid, priv->uuid);
+ uuid_copy (req.uuid, MY_UUID);
req.hostname = peerinfo->hostname;
req.port = peerinfo->port;
@@ -1553,7 +1553,7 @@ glusterd3_1_friend_remove (call_frame_t *frame, xlator_t *this,
peerinfo = event->peerinfo;
- uuid_copy (req.uuid, priv->uuid);
+ uuid_copy (req.uuid, MY_UUID);
req.hostname = peerinfo->hostname;
req.port = peerinfo->port;
ret = glusterd_submit_request (peerinfo->rpc, &req, frame, peerinfo->peer,
@@ -1598,7 +1598,7 @@ glusterd3_1_friend_update (call_frame_t *frame, xlator_t *this,
req.friends.friends_val = dict_buf;
req.friends.friends_len = len;
- uuid_copy (req.uuid, priv->uuid);
+ uuid_copy (req.uuid, MY_UUID);
dummy_frame = create_frame (this, this->ctx->pool);
ret = glusterd_submit_request (peerinfo->rpc, &req, dummy_frame,