summaryrefslogtreecommitdiffstats
path: root/xlators/mgmt/glusterd/src/glusterd-utils.c
diff options
context:
space:
mode:
authorMohit Agrawal <moagrawa@redhat.com>2017-05-08 19:29:22 +0530
committerAtin Mukherjee <amukherj@redhat.com>2017-05-09 01:30:01 +0000
commit21c7f7baccfaf644805e63682e5a7d2a9864a1e6 (patch)
tree01bbbd50d13f609eb8f7d2cbe2ce5e3af1652e42 /xlators/mgmt/glusterd/src/glusterd-utils.c
parent18e07cf01f975c80152e5469fb4e4274f08dc636 (diff)
glusterd: socketfile & pidfile related fixes for brick multiplexing feature
Problem: While brick-muliplexing is on after restarting glusterd, CLI is not showing pid of all brick processes in all volumes. Solution: While brick-mux is on all local brick process communicated through one UNIX socket but as per current code (glusterd_brick_start) it is trying to communicate with separate UNIX socket for each volume which is populated based on brick-name and vol-name.Because of multiplexing design only one UNIX socket is opened so it is throwing poller error and not able to fetch correct status of brick process through cli process. To resolve the problem write a new function glusterd_set_socket_filepath_for_mux that will call by glusterd_brick_start to validate about the existence of socketpath. To avoid the continuous EPOLLERR erros in logs update socket_connect code. Test: To reproduce the issue followed below steps 1) Create two distributed volumes(dist1 and dist2) 2) Set cluster.brick-multiplex is on 3) kill glusterd 4) run command gluster v status After apply the patch it shows correct pid for all volumes BUG: 1444596 Change-Id: I5d10af69dea0d0ca19511f43870f34295a54a4d2 Signed-off-by: Mohit Agrawal <moagrawa@redhat.com> Reviewed-on: https://review.gluster.org/17101 Smoke: Gluster Build System <jenkins@build.gluster.org> Reviewed-by: Prashanth Pai <ppai@redhat.com> NetBSD-regression: NetBSD Build System <jenkins@build.gluster.org> CentOS-regression: Gluster Build System <jenkins@build.gluster.org> Reviewed-by: Atin Mukherjee <amukherj@redhat.com>
Diffstat (limited to 'xlators/mgmt/glusterd/src/glusterd-utils.c')
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-utils.c122
1 files changed, 97 insertions, 25 deletions
diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.c b/xlators/mgmt/glusterd/src/glusterd-utils.c
index 04dba9d0b41..0a4aea24d85 100644
--- a/xlators/mgmt/glusterd/src/glusterd-utils.c
+++ b/xlators/mgmt/glusterd/src/glusterd-utils.c
@@ -1614,7 +1614,7 @@ glusterd_service_stop (const char *service, char *pidfile, int sig,
goto out;
sleep (1);
- if (gf_is_service_running (pidfile, NULL)) {
+ if (gf_is_service_running (pidfile, &pid)) {
ret = kill (pid, SIGKILL);
if (ret) {
gf_msg (this->name, GF_LOG_ERROR, errno,
@@ -1715,6 +1715,8 @@ glusterd_set_brick_socket_filepath (glusterd_volinfo_t *volinfo,
xlator_t *this = NULL;
glusterd_conf_t *priv = NULL;
int expected_file_len = 0;
+ char export_path[PATH_MAX] = {0,};
+ char sock_filepath[PATH_MAX] = {0,};
expected_file_len = strlen (GLUSTERD_SOCK_DIR) + strlen ("/") +
MD5_DIGEST_LENGTH*2 + strlen (".socket") + 1;
@@ -1725,18 +1727,10 @@ glusterd_set_brick_socket_filepath (glusterd_volinfo_t *volinfo,
priv = this->private;
GLUSTERD_GET_VOLUME_DIR (volume_dir, volinfo, priv);
- if (is_brick_mx_enabled ()) {
- snprintf (sockpath, len, "%s/run/daemon-%s.socket",
- volume_dir, brickinfo->hostname);
- } else {
- char export_path[PATH_MAX] = {0,};
- char sock_filepath[PATH_MAX] = {0,};
- GLUSTERD_REMOVE_SLASH_FROM_PATH (brickinfo->path, export_path);
- snprintf (sock_filepath, PATH_MAX, "%s/run/%s-%s",
- volume_dir, brickinfo->hostname, export_path);
-
- glusterd_set_socket_filepath (sock_filepath, sockpath, len);
- }
+ GLUSTERD_REMOVE_SLASH_FROM_PATH (brickinfo->path, export_path);
+ snprintf (sock_filepath, PATH_MAX, "%s/run/%s-%s",
+ volume_dir, brickinfo->hostname, export_path);
+ glusterd_set_socket_filepath (sock_filepath, sockpath, len);
}
/* connection happens only if it is not aleady connected,
@@ -1830,6 +1824,7 @@ glusterd_volume_start_glusterfs (glusterd_volinfo_t *volinfo,
char rdma_brick_path[PATH_MAX] = {0,};
struct rpc_clnt *rpc = NULL;
rpc_clnt_connection_t *conn = NULL;
+ int pid = -1;
GF_ASSERT (volinfo);
GF_ASSERT (brickinfo);
@@ -1852,7 +1847,7 @@ glusterd_volume_start_glusterfs (glusterd_volinfo_t *volinfo,
}
GLUSTERD_GET_BRICK_PIDFILE (pidfile, volinfo, brickinfo, priv);
- if (gf_is_service_running (pidfile, NULL)) {
+ if (gf_is_service_running (pidfile, &pid)) {
goto connect;
}
@@ -5044,8 +5039,6 @@ attach_brick (xlator_t *this,
GLUSTERD_GET_BRICK_PIDFILE (pidfile1, other_vol, other_brick, conf);
GLUSTERD_GET_BRICK_PIDFILE (pidfile2, volinfo, brickinfo, conf);
- (void) sys_unlink (pidfile2);
- (void) sys_link (pidfile1, pidfile2);
if (volinfo->is_snap_volume) {
snprintf (full_id, sizeof(full_id), "/%s/%s/%s.%s.%s",
@@ -5065,6 +5058,10 @@ attach_brick (xlator_t *this,
GLUSTERD_BRICK_ATTACH);
rpc_clnt_unref (rpc);
if (!ret) {
+ /* PID file is copied once brick has attached
+ successfully
+ */
+ glusterd_copy_file (pidfile1, pidfile2);
return 0;
}
}
@@ -5284,6 +5281,80 @@ find_compatible_brick (glusterd_conf_t *conf,
return NULL;
}
+/* Below function is use to populate sockpath based on passed pid
+ value as a argument after check the value from proc
+*/
+
+void
+glusterd_get_sock_from_brick_pid (int pid, char *sockpath, size_t len)
+{
+ char fname[128] = {0,};
+ char buf[1024] = {0,};
+ char cmdline[2048] = {0,};
+ xlator_t *this = NULL;
+ int fd = -1;
+ int i = 0, j = 0;
+ char *ptr = NULL;
+ char *brptr = NULL;
+ char tmpsockpath[PATH_MAX] = {0,};
+ size_t blen = 0;
+
+ this = THIS;
+ GF_ASSERT (this);
+
+ snprintf(fname, sizeof(fname), "/proc/%d/cmdline", pid);
+
+ if (sys_access (fname , R_OK) != 0) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "brick process %d is not running", pid);
+ return;
+ }
+
+ fd = open(fname, O_RDONLY);
+ if (fd != -1) {
+ blen = (int)sys_read(fd, buf, 1024);
+ } else {
+ gf_log (this->name, GF_LOG_ERROR,
+ "open failed %s to open a file %s", strerror (errno),
+ fname);
+ return;
+ }
+
+ /* convert cmdline to single string */
+ for (i = 0 , j = 0; i < blen; i++) {
+ if (buf[i] == '\0')
+ cmdline[j++] = ' ';
+ else if (buf[i] < 32 || buf[i] > 126) /* remove control char */
+ continue;
+ else if (buf[i] == '"' || buf[i] == '\\') {
+ cmdline[j++] = '\\';
+ cmdline[j++] = buf[i];
+ } else {
+ cmdline[j++] = buf[i];
+ }
+ }
+ cmdline[j] = '\0';
+ if (fd)
+ sys_close(fd);
+
+ ptr = strstr(cmdline, "-S ");
+ ptr = strchr(ptr, '/');
+ brptr = strstr(ptr, "--brick-name");
+ i = 0;
+
+ while (ptr < brptr) {
+ if (*ptr != 32)
+ tmpsockpath[i++] = *ptr;
+ ptr++;
+ }
+
+ if (tmpsockpath[0]) {
+ strncpy (sockpath, tmpsockpath , i);
+ }
+
+}
+
+
int
glusterd_brick_start (glusterd_volinfo_t *volinfo,
glusterd_brickinfo_t *brickinfo,
@@ -5295,7 +5366,6 @@ glusterd_brick_start (glusterd_volinfo_t *volinfo,
glusterd_conf_t *conf = NULL;
int32_t pid = -1;
char pidfile[PATH_MAX] = {0};
- FILE *fp;
char socketpath[PATH_MAX] = {0};
glusterd_volinfo_t *other_vol;
@@ -5349,8 +5419,16 @@ glusterd_brick_start (glusterd_volinfo_t *volinfo,
* same port (on another brick) and re-use that.
* TBD: re-use RPC connection across bricks
*/
- glusterd_set_brick_socket_filepath (volinfo, brickinfo,
- socketpath, sizeof (socketpath));
+ if (is_brick_mx_enabled ())
+ glusterd_get_sock_from_brick_pid (pid, socketpath,
+ sizeof(socketpath));
+ else
+ glusterd_set_brick_socket_filepath (volinfo, brickinfo,
+ socketpath,
+ sizeof (socketpath));
+ gf_log (this->name, GF_LOG_DEBUG,
+ "Using %s as sockfile for brick %s of volume %s ",
+ socketpath, brickinfo->path, volinfo->volname);
(void) glusterd_brick_connect (volinfo, brickinfo,
socketpath);
}
@@ -5389,12 +5467,6 @@ glusterd_brick_start (glusterd_volinfo_t *volinfo,
*
* TBD: pray for GlusterD 2 to be ready soon.
*/
- (void) sys_unlink (pidfile);
- fp = fopen (pidfile, "w+");
- if (fp) {
- (void) fprintf (fp, "0\n");
- (void) fclose (fp);
- }
ret = glusterd_volume_start_glusterfs (volinfo, brickinfo, wait);
if (ret) {