summaryrefslogtreecommitdiffstats
path: root/xlators/mgmt/glusterd/src/glusterd-handler.c
diff options
context:
space:
mode:
authorJeff Darcy <jdarcy@redhat.com>2013-02-20 14:11:36 -0500
committerAnand Avati <avati@redhat.com>2013-02-21 17:27:56 -0800
commit1dbe9a05feac5032990457058f7cef686a293973 (patch)
treea66b6420dd244b27f6195d570335df0e3120ae18 /xlators/mgmt/glusterd/src/glusterd-handler.c
parent673287ae4d265f67a445dedb8ace38b06e72dff7 (diff)
glusterd: allow multiple instances of glusterd on one machine
This is needed to support automated testing of cluster-communication features such as probing and quorum. In order to use this, you need to do the following preparatory steps. * Copy /var/lib/glusterd to another directory for each virtual host * Ensure that each virtual host has a different UUID in its glusterd.info Now you can start each copy of glusterd with the following xlator-options. * management.transport.socket.bind-address=$ip_address * management.working-directory=$unique_working_directory You can use 127.x.y.z addresses for binding without needing to assign them to interfaces explicitly. Note that you must use addresses, not names, because of some stuff in the socket code that's not worth fixing just for this usage, but after that you can use names in /etc/hosts instead. At this point you can issue CLI commands to a specific glusterd using the --remote-host option. So far probe, volume create/start/stop, mount, and basic I/O all seem to work as expected with multiple instances. Change-Id: I1beabb44cff8763d2774bc208b2ffcda27c1a550 BUG: 913555 Signed-off-by: Jeff Darcy <jdarcy@redhat.com> Reviewed-on: http://review.gluster.org/4556 Tested-by: Gluster Build System <jenkins@build.gluster.com> Reviewed-by: Anand Avati <avati@redhat.com>
Diffstat (limited to 'xlators/mgmt/glusterd/src/glusterd-handler.c')
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-handler.c26
1 files changed, 25 insertions, 1 deletions
diff --git a/xlators/mgmt/glusterd/src/glusterd-handler.c b/xlators/mgmt/glusterd/src/glusterd-handler.c
index 7d39c1e0785..04e5833b732 100644
--- a/xlators/mgmt/glusterd/src/glusterd-handler.c
+++ b/xlators/mgmt/glusterd/src/glusterd-handler.c
@@ -707,6 +707,7 @@ glusterd_handle_cli_probe (rpcsvc_request_t *req)
glusterd_peerinfo_t *peerinfo = NULL;
gf_boolean_t run_fsm = _gf_true;
xlator_t *this = NULL;
+ char *bind_name = NULL;
GF_ASSERT (req);
this = THIS;
@@ -736,7 +737,16 @@ glusterd_handle_cli_probe (rpcsvc_request_t *req)
gf_log ("glusterd", GF_LOG_INFO, "Received CLI probe req %s %d",
cli_req.hostname, cli_req.port);
- if (glusterd_is_local_addr(cli_req.hostname)) {
+ if (dict_get_str(this->options,"transport.socket.bind-address",
+ &bind_name) == 0) {
+ gf_log ("glusterd", GF_LOG_DEBUG,
+ "only checking probe address vs. bind address");
+ ret = glusterd_is_same_address(bind_name,cli_req.hostname);
+ }
+ else {
+ ret = glusterd_is_local_addr(cli_req.hostname);
+ }
+ if (ret) {
glusterd_xfer_cli_probe_resp (req, 0, GF_PROBE_LOCALHOST, NULL,
cli_req.hostname, cli_req.port);
ret = 0;
@@ -2434,6 +2444,7 @@ glusterd_friend_rpc_create (xlator_t *this, glusterd_peerinfo_t *peerinfo,
dict_t *options = NULL;
int ret = -1;
glusterd_peerctx_t *peerctx = NULL;
+ data_t *data = NULL;
peerctx = GF_CALLOC (1, sizeof (*peerctx), gf_gld_mt_peerctx_t);
if (!peerctx)
@@ -2450,6 +2461,19 @@ glusterd_friend_rpc_create (xlator_t *this, glusterd_peerinfo_t *peerinfo,
if (ret)
goto out;
+ /*
+ * For simulated multi-node testing, we need to make sure that we
+ * create our RPC endpoint with the same address that the peer would
+ * use to reach us.
+ */
+ if (this->options) {
+ data = dict_get(this->options,"transport.socket.bind-address");
+ if (data) {
+ ret = dict_set(options,
+ "transport.socket.source-addr",data);
+ }
+ }
+
ret = glusterd_rpc_create (&peerinfo->rpc, options,
glusterd_peer_rpc_notify, peerctx);
if (ret) {