summaryrefslogtreecommitdiffstats
path: root/xlators/mgmt/glusterd/src/glusterd-pmap.c
diff options
context:
space:
mode:
authorAtin Mukherjee <amukherj@redhat.com>2017-10-17 21:32:44 +0530
committerAtin Mukherjee <amukherj@redhat.com>2017-10-31 04:36:44 +0000
commit30e0b86aae00430823f2523c6efa3c4ebbf0a478 (patch)
tree49920feaa971f799fc3eef7be31fc80743b11f47 /xlators/mgmt/glusterd/src/glusterd-pmap.c
parent15fe99995b8650a677b097028bc14d61a5dd5e1b (diff)
glusterd: clean up portmap on brick disconnect
GlusterD's portmap entry for a brick is cleaned up when a PMAP_SIGNOUT event is initiated by the brick process at the shutdown. But if the brick process crashes or gets killed through SIGKILL then this event is not initiated and glusterd ends up with a stale port. Since GlusterD's portmap traversal happens both ways, forward for allocation and backward for registry search, there is a possibility that glusterd might end up running with a stale port for a brick which eventually will end up with clients to fail to connect to the bricks. Solution is to clean up the port entry in case the process is down as part of the brick disconnect event. Although with this the handling PMAP_SIGNOUT event becomes redundant in most of the cases, but this is the safeguard method to avoid glusterd getting into the stale port issues. Change-Id: I04c5be6d11e772ee4de16caf56dbb37d5c944303 BUG: 1503246 Signed-off-by: Atin Mukherjee <amukherj@redhat.com>
Diffstat (limited to 'xlators/mgmt/glusterd/src/glusterd-pmap.c')
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-pmap.c26
1 files changed, 17 insertions, 9 deletions
diff --git a/xlators/mgmt/glusterd/src/glusterd-pmap.c b/xlators/mgmt/glusterd/src/glusterd-pmap.c
index 6d8cf3f894f..4f045ab17be 100644
--- a/xlators/mgmt/glusterd/src/glusterd-pmap.c
+++ b/xlators/mgmt/glusterd/src/glusterd-pmap.c
@@ -241,7 +241,8 @@ pmap_assign_port (xlator_t *this, int old_port, const char *path)
if (old_port) {
ret = pmap_registry_remove (this, 0, path,
- GF_PMAP_PORT_BRICKSERVER, NULL);
+ GF_PMAP_PORT_BRICKSERVER, NULL,
+ _gf_false);
if (ret) {
gf_msg (this->name, GF_LOG_WARNING,
GD_MSG_PMAP_REGISTRY_REMOVE_FAIL, 0, "Failed to"
@@ -344,7 +345,8 @@ pmap_registry_extend (xlator_t *this, int port, const char *brickname)
int
pmap_registry_remove (xlator_t *this, int port, const char *brickname,
- gf_pmap_port_type_t type, void *xprt)
+ gf_pmap_port_type_t type, void *xprt,
+ gf_boolean_t brick_disconnect)
{
struct pmap_registry *pmap = NULL;
int p = 0;
@@ -391,11 +393,16 @@ remove:
* can delete the entire entry.
*/
if (!pmap->ports[p].xprt) {
- brick_str = pmap->ports[p].brickname;
- if (brick_str) {
- while (*brick_str != '\0') {
- if (*(brick_str++) != ' ') {
- goto out;
+ /* If the signout call is being triggered by brick disconnect
+ * then clean up all the bricks (in case of brick mux)
+ */
+ if (!brick_disconnect) {
+ brick_str = pmap->ports[p].brickname;
+ if (brick_str) {
+ while (*brick_str != '\0') {
+ if (*(brick_str++) != ' ') {
+ goto out;
+ }
}
}
}
@@ -550,14 +557,15 @@ __gluster_pmap_signout (rpcsvc_request_t *req)
goto fail;
}
rsp.op_ret = pmap_registry_remove (THIS, args.port, args.brick,
- GF_PMAP_PORT_BRICKSERVER, req->trans);
+ GF_PMAP_PORT_BRICKSERVER, req->trans,
+ _gf_false);
ret = glusterd_get_brickinfo (THIS, args.brick, args.port, &brickinfo);
if (args.rdma_port) {
snprintf(brick_path, PATH_MAX, "%s.rdma", args.brick);
rsp.op_ret = pmap_registry_remove (THIS, args.rdma_port,
brick_path, GF_PMAP_PORT_BRICKSERVER,
- req->trans);
+ req->trans, _gf_false);
}
/* Update portmap status on brickinfo */
if (brickinfo)