summaryrefslogtreecommitdiffstats
path: root/xlators/mgmt/glusterd/src/glusterd-brick-ops.c
diff options
context:
space:
mode:
authorAmar Tumballi <amarts@redhat.com>2017-06-23 13:10:56 +0530
committerAtin Mukherjee <amukherj@redhat.com>2017-07-24 15:34:34 +0000
commitfebf5ed4848ad705a34413353559482417c61467 (patch)
tree081447d6844b0bb16622c6bfce9fbb680ad42549 /xlators/mgmt/glusterd/src/glusterd-brick-ops.c
parent0b3fec6924cad5c9f38941550ab4106972efa5cc (diff)
posix: option to handle the shared bricks for statvfs()
Currently 'storage/posix' xlator has an option called option `export-statfs-size no`, which exports zero as values for few fields in `struct statvfs`. In a case of backend brick shared between multiple brick processes, the values of these variables should be `field_value / number-of-bricks-at-node`. This way, even the issue of 'min-free-disk' etc at different layers would also be handled properly when the statfs() sys call is made. Fixes #241 Change-Id: I2e320e1fdcc819ab9173277ef3498201432c275f Signed-off-by: Amar Tumballi <amarts@redhat.com> Reviewed-on: https://review.gluster.org/17618 CentOS-regression: Gluster Build System <jenkins@build.gluster.org> Smoke: Gluster Build System <jenkins@build.gluster.org> Reviewed-by: Jeff Darcy <jeff@pl.atyp.us> Reviewed-by: Atin Mukherjee <amukherj@redhat.com>
Diffstat (limited to 'xlators/mgmt/glusterd/src/glusterd-brick-ops.c')
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-brick-ops.c17
1 files changed, 17 insertions, 0 deletions
diff --git a/xlators/mgmt/glusterd/src/glusterd-brick-ops.c b/xlators/mgmt/glusterd/src/glusterd-brick-ops.c
index 8d4ea13af95..c7b618745b3 100644
--- a/xlators/mgmt/glusterd/src/glusterd-brick-ops.c
+++ b/xlators/mgmt/glusterd/src/glusterd-brick-ops.c
@@ -22,6 +22,7 @@
#include "glusterd-server-quorum.h"
#include "run.h"
#include "glusterd-volgen.h"
+#include "syscall.h"
#include <sys/signal.h>
/* misc */
@@ -1322,6 +1323,7 @@ glusterd_op_perform_add_bricks (glusterd_volinfo_t *volinfo, int32_t count,
xlator_t *this = NULL;
glusterd_conf_t *conf = NULL;
gf_boolean_t is_valid_add_brick = _gf_false;
+ struct statvfs brickstat = {0,};
this = THIS;
GF_ASSERT (this);
@@ -1396,6 +1398,21 @@ glusterd_op_perform_add_bricks (glusterd_volinfo_t *volinfo, int32_t count,
if (ret)
goto out;
+ if (!gf_uuid_compare (brickinfo->uuid, MY_UUID)) {
+ ret = sys_statvfs (brickinfo->path, &brickstat);
+ if (ret) {
+ gf_msg (this->name, GF_LOG_ERROR, errno,
+ GD_MSG_STATVFS_FAILED,
+ "Failed to fetch disk utilization "
+ "from the brick (%s:%s). Please check the health of "
+ "the brick. Error code was %s",
+ brickinfo->hostname, brickinfo->path,
+ strerror (errno));
+
+ goto out;
+ }
+ brickinfo->statfs_fsid = brickstat.f_fsid;
+ }
/* hot tier bricks are added to head of brick list */
if (dict_get (dict, "attach-tier")) {
cds_list_add (&brickinfo->brick_list, &volinfo->bricks);