summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAnand Subramanian <anands@redhat.com>2014-05-15 08:19:14 +0530
committerVijay Bellur <vbellur@redhat.com>2014-06-15 10:33:23 -0700
commitec845d93e74644bbfe27e0b085a38dbd7c02480f (patch)
treee4e455e9f9d6f75a3426fcad4a6360c885890902
parent056b99bfbc181a88579b8139d444f7dd2fa3b19e (diff)
Get snapshot info dynamically via new rpc and infra for snapview-server to refresh snaplist
BUG: 1105439 Change-Id: I4bb312a53d88f6f4955e69a3ef2b4955ec17f26d Signed-off-by: Anand Subramanian <anands@redhat.com> Reviewed-on: http://review.gluster.org/8001 Tested-by: Gluster Build System <jenkins@build.gluster.com> Reviewed-by: Vijay Bellur <vbellur@redhat.com>
-rw-r--r--rpc/rpc-lib/src/protocol-common.h7
-rw-r--r--rpc/xdr/src/glusterfs3-xdr.x12
-rw-r--r--xlators/features/snapview-client/src/snapview-client.c4
-rw-r--r--xlators/features/snapview-server/src/snapview-server-mem-types.h1
-rw-r--r--xlators/features/snapview-server/src/snapview-server.c627
-rw-r--r--xlators/features/snapview-server/src/snapview-server.h62
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-handshake.c114
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-snapshot.c96
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-utils.h7
9 files changed, 835 insertions, 95 deletions
diff --git a/rpc/rpc-lib/src/protocol-common.h b/rpc/rpc-lib/src/protocol-common.h
index 00ba2d1fc95..e2b6170b954 100644
--- a/rpc/rpc-lib/src/protocol-common.h
+++ b/rpc/rpc-lib/src/protocol-common.h
@@ -70,6 +70,7 @@ enum gf_handshake_procnum {
GF_HNDSK_SET_LK_VER,
GF_HNDSK_EVENT_NOTIFY,
GF_HNDSK_GET_VOLUME_INFO,
+ GF_HNDSK_GET_SNAPSHOT_INFO,
GF_HNDSK_MAXVALUE,
};
@@ -263,6 +264,12 @@ enum gf_get_volume_info_type {
typedef enum gf_get_volume_info_type gf_get_volume_info_type;
+
+enum gf_get_snapshot_info_type {
+ GF_GET_SNAPSHOT_LIST,
+};
+typedef enum gf_get_snapshot_info_type gf_get_snapshot_info_type;
+
#define GLUSTER_HNDSK_PROGRAM 14398633 /* Completely random */
#define GLUSTER_HNDSK_VERSION 2 /* 0.0.2 */
diff --git a/rpc/xdr/src/glusterfs3-xdr.x b/rpc/xdr/src/glusterfs3-xdr.x
index b2fa4de9737..a68fcae7e1c 100644
--- a/rpc/xdr/src/glusterfs3-xdr.x
+++ b/rpc/xdr/src/glusterfs3-xdr.x
@@ -758,3 +758,15 @@ struct gf_event_notify_rsp {
int op_errno;
opaque dict<>;
};
+
+
+struct gf_getsnap_name_uuid_req {
+ opaque dict<>;
+};
+
+struct gf_getsnap_name_uuid_rsp {
+ int op_ret;
+ int op_errno;
+ string op_errstr<>;
+ opaque dict<>;
+};
diff --git a/xlators/features/snapview-client/src/snapview-client.c b/xlators/features/snapview-client/src/snapview-client.c
index a86339901b9..5ee87ad64ab 100644
--- a/xlators/features/snapview-client/src/snapview-client.c
+++ b/xlators/features/snapview-client/src/snapview-client.c
@@ -159,6 +159,8 @@ svc_lookup_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
do_unwind = _gf_false;
}
}
+ gf_log (this->name, GF_LOG_WARNING,
+ "Lookup on normal graph failed");
goto out;
}
@@ -177,6 +179,8 @@ svc_lookup_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
if (ret < 0) {
op_ret = -1;
op_errno = EINVAL;
+ gf_log (this->name, GF_LOG_WARNING,
+ "Error fetching parent context");
goto out;
}
}
diff --git a/xlators/features/snapview-server/src/snapview-server-mem-types.h b/xlators/features/snapview-server/src/snapview-server-mem-types.h
index 6820b5e16f1..a8035165000 100644
--- a/xlators/features/snapview-server/src/snapview-server-mem-types.h
+++ b/xlators/features/snapview-server/src/snapview-server-mem-types.h
@@ -18,6 +18,7 @@ enum snapview_mem_types {
gf_svs_mt_svs_inode_t,
gf_svs_mt_dirents_t,
gf_svs_mt_svs_fd_t,
+ gf_svs_mt_snaplist_t,
gf_svs_mt_end
};
diff --git a/xlators/features/snapview-server/src/snapview-server.c b/xlators/features/snapview-server/src/snapview-server.c
index 188aff1fbd7..833ffc9cde8 100644
--- a/xlators/features/snapview-server/src/snapview-server.c
+++ b/xlators/features/snapview-server/src/snapview-server.c
@@ -15,9 +15,447 @@
#include "snapview-server.h"
#include "snapview-server-mem-types.h"
-/*
- * Helper functions
- */
+#include "xlator.h"
+#include "rpc-clnt.h"
+#include "xdr-generic.h"
+#include "protocol-common.h"
+#include <pthread.h>
+
+static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
+static pthread_cond_t condvar = PTHREAD_COND_INITIALIZER;
+static gf_boolean_t snap_worker_resume;
+
+void
+snaplist_refresh (void *data)
+{
+ xlator_t *this = NULL;
+ int ret = 0;
+ svs_private_t *priv = NULL;
+
+ this = data;
+ priv = this->private;
+
+ ret = svs_get_snapshot_list (this);
+ if (ret) {
+ gf_log ("snapview-server", GF_LOG_WARNING,
+ "Error retrieving refreshed snapshot list");
+ }
+
+ return;
+}
+
+void *
+snaplist_worker (void *data)
+{
+ xlator_t *this = NULL;
+ int ret = 0;
+ struct timespec timeout = {0, };
+ svs_private_t *priv = NULL;
+ glusterfs_ctx_t *ctx = NULL;
+
+ this = data;
+ priv = this->private;
+ ctx = this->ctx;
+ GF_ASSERT (ctx);
+
+ pthread_mutex_lock (&priv->snaplist_lock);
+ priv->is_snaplist_done = 1;
+ pthread_mutex_unlock (&priv->snaplist_lock);
+
+ while (1) {
+ timeout.tv_sec = 300;
+ timeout.tv_nsec = 0;
+ priv->snap_timer = gf_timer_call_after (ctx, timeout,
+ snaplist_refresh,
+ data);
+
+ pthread_mutex_lock (&mutex);
+ while (!snap_worker_resume) {
+ pthread_cond_wait (&condvar, &mutex);
+ }
+ snap_worker_resume = _gf_false;
+ pthread_mutex_unlock (&mutex);
+ }
+
+ return NULL;
+}
+
+int
+svs_mgmt_submit_request (void *req, call_frame_t *frame,
+ glusterfs_ctx_t *ctx,
+ rpc_clnt_prog_t *prog, int procnum,
+ fop_cbk_fn_t cbkfn, xdrproc_t xdrproc)
+{
+ int ret = -1;
+ int count = 0;
+ struct iovec iov = {0, };
+ struct iobuf *iobuf = NULL;
+ struct iobref *iobref = NULL;
+ ssize_t xdr_size = 0;
+
+ GF_VALIDATE_OR_GOTO ("snapview-server", frame, out);
+ GF_VALIDATE_OR_GOTO ("snapview-server", req, out);
+ GF_VALIDATE_OR_GOTO ("snapview-server", ctx, out);
+ GF_VALIDATE_OR_GOTO ("snapview-server", prog, out);
+
+ GF_ASSERT (frame->this);
+
+ iobref = iobref_new ();
+ if (!iobref) {
+ goto out;
+ }
+
+ if (req) {
+ xdr_size = xdr_sizeof (xdrproc, req);
+
+ iobuf = iobuf_get2 (ctx->iobuf_pool, xdr_size);
+ if (!iobuf) {
+ goto out;
+ }
+
+ iobref_add (iobref, iobuf);
+
+ iov.iov_base = iobuf->ptr;
+ iov.iov_len = iobuf_pagesize (iobuf);
+
+ /* Create the xdr payload */
+ ret = xdr_serialize_generic (iov, req, xdrproc);
+ if (ret == -1) {
+ gf_log (frame->this->name, GF_LOG_WARNING,
+ "Failed to create XDR payload");
+ goto out;
+ }
+ iov.iov_len = ret;
+ count = 1;
+ }
+
+ ret = rpc_clnt_submit (ctx->mgmt, prog, procnum, cbkfn,
+ &iov, count,
+ NULL, 0, iobref, frame, NULL, 0, NULL, 0, NULL);
+
+out:
+ if (iobref)
+ iobref_unref (iobref);
+
+ if (iobuf)
+ iobuf_unref (iobuf);
+ return ret;
+}
+
+
+int mgmt_get_snapinfo_cbk (struct rpc_req *req, struct iovec *iov,
+ int count, void *myframe)
+{
+ gf_getsnap_name_uuid_rsp rsp = {0,};
+ call_frame_t *frame = NULL;
+ glusterfs_ctx_t *ctx = NULL;
+ int ret = 0;
+ dict_t *dict = NULL;
+ char key[1024] = {0};
+ int snapcount = 0;
+ svs_private_t *priv = NULL;
+ xlator_t *this = NULL;
+ int i = 0;
+ int j = 0;
+ char *value = NULL;
+ snap_dirent_t *dirents = NULL;
+ snap_dirent_t *old_dirents = NULL;
+
+ GF_VALIDATE_OR_GOTO ("snapview-server", req, error_out);
+ GF_VALIDATE_OR_GOTO ("snapview-server", myframe, error_out);
+ GF_VALIDATE_OR_GOTO ("snapview-server", iov, error_out);
+
+ frame = myframe;
+ this = frame->this;
+ ctx = frame->this->ctx;
+ priv = this->private;
+ old_dirents = priv->dirents;
+
+ if (!ctx) {
+ gf_log (frame->this->name, GF_LOG_ERROR, "NULL context");
+ errno = EINVAL;
+ ret = -1;
+ goto out;
+ }
+
+ if (-1 == req->rpc_status) {
+ gf_log (frame->this->name, GF_LOG_ERROR,
+ "RPC call is not successful");
+ errno = EINVAL;
+ ret = -1;
+ goto out;
+ }
+
+ ret = xdr_to_generic (*iov, &rsp,
+ (xdrproc_t)xdr_gf_getsnap_name_uuid_rsp);
+ if (ret < 0) {
+ gf_log (frame->this->name, GF_LOG_ERROR,
+ "Failed to decode xdr response, rsp.op_ret = %d",
+ rsp.op_ret);
+ goto out;
+ }
+
+ if (rsp.op_ret == -1) {
+ errno = rsp.op_errno;
+ ret = -1;
+ goto out;
+ }
+
+ if (!rsp.dict.dict_len) {
+ gf_log (frame->this->name, GF_LOG_ERROR,
+ "Response dict is not populated");
+ ret = -1;
+ errno = EINVAL;
+ goto out;
+ }
+
+ dict = dict_new ();
+ if (!dict) {
+ ret = -1;
+ errno = ENOMEM;
+ goto out;
+ }
+
+ ret = dict_unserialize (rsp.dict.dict_val, rsp.dict.dict_len, &dict);
+ if (ret) {
+ gf_log (frame->this->name, GF_LOG_ERROR,
+ "Failed to unserialize dictionary");
+ errno = EINVAL;
+ goto out;
+ }
+
+ ret = dict_get_int32 (dict, "snap-count", (int32_t*)&snapcount);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Error retrieving snapcount");
+ errno = EINVAL;
+ ret = -1;
+ goto out;
+ }
+
+ pthread_mutex_lock (&priv->snaplist_lock);
+
+ if ((priv->num_snaps == 0) &&
+ (snapcount != 0)) {
+ /* first time we are fetching snap list */
+ dirents = GF_CALLOC (snapcount, sizeof (snap_dirent_t),
+ gf_svs_mt_dirents_t);
+ if (!dirents) {
+ gf_log (frame->this->name, GF_LOG_ERROR,
+ "Unable to allocate memory");
+ errno = ENOMEM;
+ ret = -1;
+ goto unlock;
+ }
+ } else {
+ /* fetch snaplist dynamically at run-time */
+ dirents = GF_CALLOC (snapcount, sizeof (snap_dirent_t),
+ gf_svs_mt_dirents_t);
+ if (!dirents) {
+ gf_log (frame->this->name, GF_LOG_ERROR,
+ "Unable to allocate memory");
+ errno = ENOMEM;
+ ret = -1;
+ goto unlock;
+ }
+ }
+
+ for (i = 0; i < snapcount; i++) {
+ snprintf (key, sizeof (key), "snap-volname.%d", i+1);
+ ret = dict_get_str (dict, key, &value);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Error retrieving snap volname %d", i+1);
+ errno = EINVAL;
+ ret = -1;
+ goto unlock;
+ }
+ strncpy (dirents[i].snap_volname, value,
+ sizeof (dirents[i].snap_volname));
+
+ snprintf (key, sizeof (key), "snap-id.%d", i+1);
+ ret = dict_get_str (dict, key, &value);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Error retrieving snap uuid %d", i+1);
+ errno = EINVAL;
+ ret = -1;
+ goto unlock;
+ }
+ strncpy (dirents[i].uuid, value, sizeof (dirents[i].uuid));
+
+ snprintf (key, sizeof (key), "snapname.%d", i+1);
+ ret = dict_get_str (dict, key, &value);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Error retrieving snap name %d", i+1);
+ errno = EINVAL;
+ ret = -1;
+ goto unlock;
+ }
+ strncpy (dirents[i].name, value, sizeof (dirents[i].name));
+ }
+
+ /*
+ * Got the new snap list populated in dirents
+ * The new snap list is either a subset or a superset of
+ * the existing snaplist old_dirents which has priv->num_snaps
+ * number of entries.
+ *
+ * If subset, then clean up the fs for entries which are
+ * no longer relevant.
+ *
+ * For other overlapping entries set the fs for new dirents
+ * entries which have a fs assigned already in old_dirents
+ *
+ * We do this as we don't want to do new glfs_init()s repeatedly
+ * as the dirents entries for snapshot volumes get repatedly
+ * cleaned up and allocated. And if we don't then that will lead
+ * to memleaks
+ */
+ for (i = 0; i < priv->num_snaps; i++) {
+ for (j = 0; j < snapcount; j++) {
+ if ((!strcmp (old_dirents[i].name,
+ dirents[j].name)) &&
+ (!strcmp (old_dirents[i].uuid,
+ dirents[j].uuid))) {
+ dirents[j].fs = old_dirents[i].fs;
+ old_dirents[i].fs = NULL;
+ break;
+ }
+ }
+ }
+
+ if (old_dirents) {
+ for (i=0; i < priv->num_snaps; i++) {
+ if (old_dirents[i].fs)
+ glfs_fini (old_dirents[i].fs);
+ }
+ }
+
+ priv->dirents = dirents;
+ priv->num_snaps = snapcount;
+
+ GF_FREE (old_dirents);
+
+ ret = 0;
+
+unlock:
+ /*
+ *
+ * We will unlock the snaplist_lock here for two reasons:
+ * 1. We ideally would like to avoid nested locks
+ * 2. The snaplist_lock and the mutex protecting the condvar
+ * are independent of each other and don't need to be
+ * mixed together
+ */
+ pthread_mutex_unlock (&priv->snaplist_lock);
+
+out:
+ pthread_mutex_lock (&mutex);
+ snap_worker_resume = _gf_true;
+ if (priv->is_snaplist_done) {
+ /*
+ * No need to signal if it is the first time
+ * refresh of the snaplist as no thread is
+ * waiting on this. It is only when the snaplist_worker
+ * is started that we have a thread waiting on this
+ */
+ pthread_cond_signal (&condvar);
+ }
+ pthread_mutex_unlock (&mutex);
+
+ if (dict) {
+ dict_unref (dict);
+ }
+ free (rsp.dict.dict_val);
+ free (rsp.op_errstr);
+
+ if (myframe)
+ SVS_STACK_DESTROY (myframe);
+
+error_out:
+ return ret;
+}
+
+int
+svs_get_snapshot_list (xlator_t *this)
+{
+ gf_getsnap_name_uuid_req req = {{0,}};
+ int ret = 0;
+ dict_t *dict = NULL;
+ glusterfs_ctx_t *ctx = NULL;
+ call_frame_t *frame = NULL;
+ svs_private_t *priv = NULL;
+
+ ctx = this->ctx;
+ if (!ctx) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "ctx is NULL");
+ ret = -1;
+ goto out;
+ }
+
+ frame = create_frame (this, ctx->pool);
+ if (!frame) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Error allocating frame");
+ ret = -1;
+ goto out;
+ }
+
+ priv = this->private;
+
+ dict = dict_new ();
+ if (!dict) {
+ ret = -1;
+ goto frame_destroy;
+ }
+
+ ret = dict_set_str (dict, "volname", priv->volname);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Error setting volname in dict");
+ goto frame_destroy;
+ }
+ ret = dict_allocate_and_serialize (dict, &req.dict.dict_val,
+ &req.dict.dict_len);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to serialize dictionary");
+ ret = -1;
+ goto frame_destroy;
+ }
+
+ ret = svs_mgmt_submit_request (&req, frame, ctx,
+ &svs_clnt_handshake_prog,
+ GF_HNDSK_GET_SNAPSHOT_INFO,
+ mgmt_get_snapinfo_cbk,
+ (xdrproc_t)xdr_gf_getsnap_name_uuid_req);
+
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Error sending snapshot names RPC request");
+ goto frame_destroy;
+ }
+
+out:
+ if (dict) {
+ dict_unref (dict);
+ }
+ GF_FREE (req.dict.dict_val);
+
+ return ret;
+
+frame_destroy:
+ /*
+ * Destroy the frame if we encountered an error
+ * Else we need to clean it up in
+ * mgmt_get_snapinfo_cbk
+ */
+ SVS_STACK_DESTROY (frame);
+ goto out;
+}
int
__svs_inode_ctx_set (xlator_t *this, inode_t *inode, svs_inode_t *svs_inode)
@@ -411,7 +849,13 @@ svs_get_snap_dirent (xlator_t *this, const char *name)
private = this->private;
+ pthread_mutex_lock (&private->snaplist_lock);
+
dirents = private->dirents;
+ if (!dirents) {
+ pthread_mutex_unlock (&private->snaplist_lock);
+ goto out;
+ }
tmp_dirent = dirents;
for (i = 0; i < private->num_snaps; i++) {
@@ -422,6 +866,8 @@ svs_get_snap_dirent (xlator_t *this, const char *name)
tmp_dirent++;
}
+ pthread_mutex_unlock (&private->snaplist_lock);
+
out:
return dirent;
}
@@ -457,7 +903,7 @@ svs_initialise_snapshot_volume (xlator_t *this, const char *name)
}
snprintf (volname, sizeof (volname), "/snaps/%s/%s",
- dirent->name, dirent->uuid);
+ dirent->name, dirent->snap_volname);
fs = glfs_new (volname);
if (!fs) {
@@ -484,7 +930,8 @@ svs_initialise_snapshot_volume (xlator_t *this, const char *name)
}
snprintf (logfile, sizeof (logfile),
- DEFAULT_SVD_LOG_FILE_DIRECTORY "/%s-%s.log", name, dirent->uuid);
+ DEFAULT_SVD_LOG_FILE_DIRECTORY "/%s-%s.log",
+ name, dirent->uuid);
ret = glfs_set_logging(fs, logfile, loglevel);
if (ret) {
@@ -513,19 +960,21 @@ svs_get_latest_snap_entry (xlator_t *this)
svs_private_t *priv = NULL;
snap_dirent_t *dirents = NULL;
snap_dirent_t *dirent = NULL;
- snap_dirent_t *tmp_dirent = NULL;
GF_VALIDATE_OR_GOTO ("svs", this, out);
priv = this->private;
+ pthread_mutex_lock (&priv->snaplist_lock);
dirents = priv->dirents;
-
- if (priv->num_snaps && dirents) {
- tmp_dirent = &dirents[priv->num_snaps - 1];
- dirent = tmp_dirent;
+ if (!dirents) {
+ pthread_mutex_unlock (&priv->snaplist_lock);
+ goto out;
}
+ if (priv->num_snaps)
+ dirent = &dirents[priv->num_snaps - 1];
+ pthread_mutex_unlock (&priv->snaplist_lock);
out:
return dirent;
}
@@ -535,13 +984,18 @@ svs_get_latest_snapshot (xlator_t *this)
{
glfs_t *fs = NULL;
snap_dirent_t *dirent = NULL;
+ svs_private_t *priv = NULL;
GF_VALIDATE_OR_GOTO ("svs", this, out);
+ priv = this->private;
dirent = svs_get_latest_snap_entry (this);
- if (dirent)
+ if (dirent) {
+ pthread_mutex_lock (&priv->snaplist_lock);
fs = dirent->fs;
+ pthread_mutex_unlock (&priv->snaplist_lock);
+ }
out:
return fs;
@@ -991,7 +1445,7 @@ svs_lookup (call_frame_t *frame, xlator_t *this, loc_t *loc, dict_t *xdata)
if (loc->name && strlen (loc->name)) {
ret = dict_get_str_boolean (xdata, "entry-point", _gf_false);
if (ret == -1) {
- gf_log (this->name, GF_LOG_ERROR, "failed to get the "
+ gf_log (this->name, GF_LOG_DEBUG, "failed to get the "
"entry point info");
entry_point = _gf_false;
} else {
@@ -1013,8 +1467,10 @@ svs_lookup (call_frame_t *frame, xlator_t *this, loc_t *loc, dict_t *xdata)
/* Initialize latest snapshot, which is used for nameless lookups */
dirent = svs_get_latest_snap_entry (this);
- if (dirent && !dirent->fs)
+
+ if (dirent && !dirent->fs) {
fs = svs_initialise_snapshot_volume (this, dirent->name);
+ }
/* lookup is on the entry point to the snapshot world */
if (entry_point) {
@@ -1459,68 +1915,6 @@ out:
return 0;
}
-/* As of now, the list of snapshots is obtained by reading a predefined file
- (which the user has to generate using these commands)
-* /usr/local/sbin/gluster snapshot info | grep -i snap | grep -i Volume |
- grep -i Name | cut -d':' -f 2 > /tmp/tmp-snap-uuids
- /usr/local/sbin/gluster snapshot info | grep Snapshot | cut -d':' -f 2
- > /tmp/tmp-snap_names
- This is a temporary workaround which will be changed to a notification
- based mechanism where snapd gets the list of snapshots from glusterd
-*/
-int
-svs_get_snapshot_list (xlator_t *this, svs_private_t *priv)
-{
- int ret = -1;
- char str_uuid[256] = {'\0'};
- char str_name[256] = {'\0'};
- int snap_count = 0;
- snap_dirent_t *dirents = NULL;
- FILE *fpn = NULL;
- FILE *fpu = NULL;
- int i = 0;
-
- GF_VALIDATE_OR_GOTO ("snapview-server", this, out);
- GF_VALIDATE_OR_GOTO (this->name, priv, out);
-
- dirents = GF_CALLOC (sizeof (*dirents), SNAP_VIEW_MAX_NUM_SNAPS,
- gf_svs_mt_dirents_t);
- if (!dirents) {
- gf_log (this->name, GF_LOG_ERROR, "failed to allocate memory");
- goto out;
- /* error, bail */
- }
- priv->dirents = dirents;
-
- fpu = fopen ("/tmp/tmp-snap-uuids", "r+");
- fpn = fopen ("/tmp/tmp-snap_names", "r+");
-
- if (!fpu || !fpn) {
- gf_log (this->name, GF_LOG_ERROR, "failed to open the file");
- goto out;
- }
-
- while ((fscanf (fpu, "%s", str_uuid) != -1) &&
- (fscanf (fpn, "%s", str_name) != -1)) {
- strncpy (dirents[i].uuid, str_uuid, strlen (str_uuid) + 1);
- strncpy (dirents[i].name, str_name, strlen (str_name) + 1);
- ++snap_count;
- ++i;
- }
- priv->num_snaps = snap_count;
-
- fclose (fpn);
- fclose (fpu);
-
- ret = 0;
-
-out:
- if (ret)
- GF_FREE (dirents);
-
- return ret;
-}
-
int
svs_fill_readdir (xlator_t *this, gf_dirent_t *entries, size_t size, off_t off)
{
@@ -1536,20 +1930,23 @@ svs_fill_readdir (xlator_t *this, gf_dirent_t *entries, size_t size, off_t off)
GF_VALIDATE_OR_GOTO ("snap-view-daemon", entries, out);
priv = this->private;
+ GF_ASSERT (priv);
+
/* create the dir entries */
+ pthread_mutex_lock (&priv->snaplist_lock);
dirents = priv->dirents;
for (i = off; i < priv->num_snaps; ) {
this_size = sizeof (gf_dirent_t) +
strlen (dirents[i].name) + 1;
if (this_size + filled_size > size )
- goto out;
+ goto unlock;
entry = gf_dirent_for_name (dirents[i].name);
if (!entry) {
gf_log (this->name, GF_LOG_ERROR, "failed to allocate "
"dentry for %s", dirents[i].name);
- goto out;
+ goto unlock;
}
entry->d_off = i + 1;
@@ -1561,6 +1958,9 @@ svs_fill_readdir (xlator_t *this, gf_dirent_t *entries, size_t size, off_t off)
filled_size += this_size;
}
+unlock:
+ pthread_mutex_unlock (&priv->snaplist_lock);
+
out:
return count;
}
@@ -2347,8 +2747,9 @@ mem_acct_init (xlator_t *this)
int32_t
init (xlator_t *this)
{
- svs_private_t *priv = NULL;
- int ret = -1;
+ svs_private_t *priv = NULL;
+ int ret = -1;
+ pthread_t snap_thread;
/* This can be the top of graph in certain cases */
if (!this->parents) {
@@ -2356,19 +2757,47 @@ init (xlator_t *this)
"dangling volume. check volfile ");
}
- /* TODO: define a mem-type structure */
priv = GF_CALLOC (1, sizeof (*priv), gf_svs_mt_priv_t);
if (!priv)
goto out;
this->private = priv;
+ GF_OPTION_INIT ("volname", priv->volname, str, out);
+ pthread_mutex_init (&(priv->snaplist_lock), NULL);
+ priv->is_snaplist_done = 0;
+ priv->num_snaps = 0;
+ snap_worker_resume = _gf_false;
+
/* get the list of snaps first to return to client xlator */
- ret = svs_get_snapshot_list (this, priv);
+ ret = svs_get_snapshot_list (this);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Error initializing snaplist infrastructure");
+ ret = -1;
+ goto out;
+ }
+
+ if ((ret = pthread_attr_init (&priv->thr_attr)) != 0) {
+ gf_log (this->name, GF_LOG_ERROR, "pthread attr init failed");
+ goto out;
+ }
+
+ ret = gf_thread_create (&snap_thread,
+ &priv->thr_attr,
+ snaplist_worker,
+ this);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to create snaplist worker thread");
+ goto out;
+ }
ret = 0;
+
out:
if (ret && priv) {
+ GF_FREE (priv->dirents);
GF_FREE (priv);
}
@@ -2378,14 +2807,48 @@ out:
void
fini (xlator_t *this)
{
- svs_private_t *priv = NULL;
+ svs_private_t *priv = NULL;
+ glusterfs_ctx_t *ctx = NULL;
+ int ret = 0;
+
+ GF_ASSERT (this);
priv = this->private;
this->private = NULL;
+ ctx = this->ctx;
+ if (!ctx)
+ gf_log (this->name, GF_LOG_ERROR,
+ "Invalid ctx found");
if (priv) {
+ gf_timer_call_cancel (ctx, priv->snap_timer);
+ priv->snap_timer = NULL;
+ ret = pthread_mutex_destroy (&priv->snaplist_lock);
+ if (ret != 0) {
+ gf_log (this->name, GF_LOG_WARNING,
+ "Could not destroy mutex snaplist_lock");
+ }
+ ret = pthread_attr_destroy (&priv->thr_attr);
+ if (ret != 0) {
+ gf_log (this->name, GF_LOG_WARNING,
+ "Could not destroy pthread attr");
+ }
+ if (priv->dirents) {
+ GF_FREE (priv->dirents);
+ }
GF_FREE (priv);
}
+ ret = pthread_mutex_destroy (&mutex);
+ if (ret != 0) {
+ gf_log (this->name, GF_LOG_WARNING,
+ "Could not destroy mutex");
+ }
+ pthread_cond_destroy (&condvar);
+ if (ret != 0) {
+ gf_log (this->name, GF_LOG_WARNING,
+ "Could not destroy condition variable");
+ }
+
return;
}
diff --git a/xlators/features/snapview-server/src/snapview-server.h b/xlators/features/snapview-server/src/snapview-server.h
index b0e051e0fc3..510599c3650 100644
--- a/xlators/features/snapview-server/src/snapview-server.h
+++ b/xlators/features/snapview-server/src/snapview-server.h
@@ -35,14 +35,51 @@
#include "glusterfs-acl.h"
#include "syncop.h"
#include "list.h"
-
-/*
- * The max number of snap entries we consider currently
- */
-#define SNAP_VIEW_MAX_NUM_SNAPS 128
+#include "timer.h"
+#include "rpc-clnt.h"
+#include "protocol-common.h"
#define DEFAULT_SVD_LOG_FILE_DIRECTORY DATADIR "/log/glusterfs"
+#define SNAP_VIEW_MAX_GLFS_T 256
+#define SNAP_VIEW_MAX_GLFS_FDS 1024
+#define SNAP_VIEW_MAX_GLFS_OBJ_HANDLES 1024
+
+#define SVS_STACK_DESTROY(_frame) \
+ do { \
+ ((call_frame_t *)_frame)->local = NULL; \
+ STACK_DESTROY (((call_frame_t *)_frame)->root); \
+ } while (0)
+
+int
+svs_mgmt_submit_request (void *req, call_frame_t *frame,
+ glusterfs_ctx_t *ctx,
+ rpc_clnt_prog_t *prog, int procnum,
+ fop_cbk_fn_t cbkfn, xdrproc_t xdrproc);
+
+int
+svs_get_snapshot_list (xlator_t *this);
+
+int
+mgmt_get_snapinfo_cbk (struct rpc_req *req, struct iovec *iov,
+ int count, void *myframe);
+
+char *clnt_handshake_procs[GF_HNDSK_MAXVALUE] = {
+ [GF_HNDSK_NULL] = "NULL",
+ [GF_HNDSK_SETVOLUME] = "SETVOLUME",
+ [GF_HNDSK_GETSPEC] = "GETSPEC",
+ [GF_HNDSK_PING] = "PING",
+ [GF_HNDSK_EVENT_NOTIFY] = "EVENTNOTIFY",
+};
+
+rpc_clnt_prog_t svs_clnt_handshake_prog = {
+ .progname = "GlusterFS Handshake",
+ .prognum = GLUSTER_HNDSK_PROGRAM,
+ .progver = GLUSTER_HNDSK_VERSION,
+ .procnames = clnt_handshake_procs,
+};
+
+
typedef enum {
SNAP_VIEW_ENTRY_POINT_INODE = 0,
SNAP_VIEW_VIRTUAL_INODE
@@ -67,15 +104,22 @@ struct svs_fd {
typedef struct svs_fd svs_fd_t;
struct snap_dirent {
- char name[NAME_MAX];
- char uuid[UUID_CANONICAL_FORM_LEN + 1];
+ char name[NAME_MAX];
+ char uuid[UUID_CANONICAL_FORM_LEN + 1];
+ char snap_volname[NAME_MAX];
glfs_t *fs;
};
typedef struct snap_dirent snap_dirent_t;
struct svs_private {
- snap_dirent_t *dirents;
- int num_snaps;
+ snap_dirent_t *dirents;
+ int num_snaps;
+ char *volname;
+ struct list_head snaplist;
+ pthread_mutex_t snaplist_lock;
+ uint32_t is_snaplist_done;
+ gf_timer_t *snap_timer;
+ pthread_attr_t thr_attr;
};
typedef struct svs_private svs_private_t;
diff --git a/xlators/mgmt/glusterd/src/glusterd-handshake.c b/xlators/mgmt/glusterd/src/glusterd-handshake.c
index 7be1e9017bd..3cb62624a07 100644
--- a/xlators/mgmt/glusterd/src/glusterd-handshake.c
+++ b/xlators/mgmt/glusterd/src/glusterd-handshake.c
@@ -32,6 +32,7 @@ extern struct rpc_clnt_program gd_peer_prog;
extern struct rpc_clnt_program gd_mgmt_prog;
extern struct rpc_clnt_program gd_mgmt_v3_prog;
+
#define TRUSTED_PREFIX "trusted-"
typedef ssize_t (*gfs_serialize_t) (struct iovec outmsg, void *data);
@@ -1046,8 +1047,8 @@ __server_get_volume_info (rpcsvc_request_t *req)
{
int ret = -1;
int32_t op_errno = ENOENT;
- gf_get_volume_info_req vol_info_req = {{0,}};
- gf_get_volume_info_rsp vol_info_rsp = {0,};
+ gf_get_volume_info_req vol_info_req = {{0,}};
+ gf_get_volume_info_rsp vol_info_rsp = {0,};
char *volname = NULL;
glusterd_volinfo_t *volinfo = NULL;
dict_t *dict = NULL;
@@ -1058,7 +1059,7 @@ __server_get_volume_info (rpcsvc_request_t *req)
ret = xdr_to_generic (req->msg[0], &vol_info_req,
(xdrproc_t)xdr_gf_get_volume_info_req);
if (ret < 0) {
- //failed to decode msg;
+ /* failed to decode msg */
req->rpc_err = GARBAGE_ARGS;
goto out;
}
@@ -1098,7 +1099,7 @@ __server_get_volume_info (rpcsvc_request_t *req)
}
if (!flags) {
- //Nothing to query about. Just return success
+ /* Nothing to query about. Just return success */
gf_log (THIS->name, GF_LOG_ERROR, "No flags set");
ret = 0;
goto out;
@@ -1177,11 +1178,116 @@ server_get_volume_info (rpcsvc_request_t *req)
__server_get_volume_info);
}
+
+/*
+ * glusterd function to get the list of snapshot names and uuids
+ */
+int
+__server_get_snap_info (rpcsvc_request_t *req)
+{
+ int ret = -1;
+ int op_errno = ENOENT;
+ gf_getsnap_name_uuid_req snap_info_req = {{0,}};
+ gf_getsnap_name_uuid_rsp snap_info_rsp = {0,};
+ dict_t *dict = NULL;
+ dict_t *dict_rsp = NULL;
+ glusterd_volinfo_t *volinfo = NULL;
+ char *volname = NULL;
+
+ GF_ASSERT (req);
+
+ ret = xdr_to_generic (req->msg[0], &snap_info_req,
+ (xdrproc_t)xdr_gf_getsnap_name_uuid_req);
+ if (ret < 0) {
+ req->rpc_err = GARBAGE_ARGS;
+ gf_log ("glusterd", GF_LOG_ERROR,
+ "Failed to decode management handshake response");
+ goto out;
+ }
+
+ if (snap_info_req.dict.dict_len) {
+ dict = dict_new ();
+ if (!dict) {
+ op_errno = ENOMEM;
+ ret = -1;
+ goto out;
+ }
+
+ ret = dict_unserialize (snap_info_req.dict.dict_val,
+ snap_info_req.dict.dict_len,
+ &dict);
+ if (ret < 0) {
+ gf_log ("glusterd", GF_LOG_ERROR,
+ "Failed to unserialize dictionary");
+ op_errno = EINVAL;
+ ret = -1;
+ goto out;
+ } else {
+ dict->extra_stdfree = snap_info_req.dict.dict_val;
+ }
+ }
+
+ ret = dict_get_str (dict, "volname", &volname);
+ if (ret) {
+ op_errno = EINVAL;
+ gf_log ("glusterd", GF_LOG_ERROR,
+ "Failed to retrieve volname");
+ ret = -1;
+ goto out;
+ }
+
+ dict_rsp = dict_new ();
+ if (!dict_rsp) {
+ op_errno = ENOMEM;
+ ret = -1;
+ goto out;
+ }
+
+ ret = glusterd_snapshot_get_volnames_uuids (dict_rsp, volname,
+ &snap_info_rsp);
+
+ if (ret) {
+ gf_log ("glusterd", GF_LOG_ERROR,
+ "Error getting snapshot volume names and uuids : %s",
+ volname);
+ op_errno = EINVAL;
+ }
+
+out:
+ snap_info_rsp.op_ret = ret;
+ snap_info_rsp.op_errno = op_errno;
+ snap_info_rsp.op_errstr = "";
+ glusterd_submit_reply (req, &snap_info_rsp, NULL, 0, NULL,
+ (xdrproc_t)xdr_gf_getsnap_name_uuid_rsp);
+
+ if (dict) {
+ dict_unref (dict);
+ }
+
+ if (dict_rsp) {
+ dict_unref (dict_rsp);
+ }
+
+ if (snap_info_rsp.dict.dict_val) {
+ GF_FREE (snap_info_rsp.dict.dict_val);
+ }
+
+ return 0;
+}
+
+int
+server_get_snap_info (rpcsvc_request_t *req)
+{
+ return glusterd_big_locked_handler (req,
+ __server_get_snap_info);
+}
+
rpcsvc_actor_t gluster_handshake_actors[GF_HNDSK_MAXVALUE] = {
[GF_HNDSK_NULL] = {"NULL", GF_HNDSK_NULL, NULL, NULL, 0, DRC_NA},
[GF_HNDSK_GETSPEC] = {"GETSPEC", GF_HNDSK_GETSPEC, server_getspec, NULL, 0, DRC_NA},
[GF_HNDSK_EVENT_NOTIFY] = {"EVENTNOTIFY", GF_HNDSK_EVENT_NOTIFY, server_event_notify, NULL, 0, DRC_NA},
[GF_HNDSK_GET_VOLUME_INFO] = {"GETVOLUMEINFO", GF_HNDSK_GET_VOLUME_INFO, server_get_volume_info, NULL, 0, DRC_NA},
+ [GF_HNDSK_GET_SNAPSHOT_INFO] = {"GETSNAPINFO", GF_HNDSK_GET_SNAPSHOT_INFO, server_get_snap_info, NULL, 0, DRC_NA},
};
diff --git a/xlators/mgmt/glusterd/src/glusterd-snapshot.c b/xlators/mgmt/glusterd/src/glusterd-snapshot.c
index 117004b4452..cca94f17133 100644
--- a/xlators/mgmt/glusterd/src/glusterd-snapshot.c
+++ b/xlators/mgmt/glusterd/src/glusterd-snapshot.c
@@ -55,6 +55,8 @@
#include "glusterd-mgmt.h"
#include "glusterd-syncop.h"
+#include "glusterfs3.h"
+
#include "syscall.h"
#include "cli1-xdr.h"
#include "xdr-generic.h"
@@ -7712,3 +7714,97 @@ out:
return ret;
}
+
+
+
+int
+glusterd_snapshot_get_volnames_uuids (dict_t *dict,
+ char *volname,
+ gf_getsnap_name_uuid_rsp *snap_info_rsp)
+{
+ int ret = -1;
+ int snapcount = 0;
+ char key[PATH_MAX] = {0,};
+ glusterd_volinfo_t *snap_vol = NULL;
+ glusterd_volinfo_t *volinfo = NULL;
+ glusterd_volinfo_t *tmp_vol = NULL;
+ xlator_t *this = NULL;
+ int op_errno = 0;
+
+ this = THIS;
+ GF_ASSERT (this);
+ GF_ASSERT (volname);
+ GF_VALIDATE_OR_GOTO_WITH_ERROR (this->name, dict, out,
+ op_errno, EINVAL);
+ GF_VALIDATE_OR_GOTO_WITH_ERROR (this->name, volname, out,
+ op_errno, EINVAL);
+ GF_VALIDATE_OR_GOTO_WITH_ERROR (this->name, snap_info_rsp, out,
+ op_errno, EINVAL);
+
+ ret = glusterd_volinfo_find (volname, &volinfo);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to get volinfo of volume %s",
+ volname);
+ op_errno = EINVAL;
+ goto out;
+ }
+
+ list_for_each_entry_safe (snap_vol, tmp_vol, &volinfo->snap_volumes,
+ snapvol_list) {
+ snapcount++;
+
+ /* Set Snap Name */
+ snprintf (key, sizeof (key), "snapname.%d", snapcount);
+ ret = dict_set_dynstr_with_alloc (dict, key,
+ snap_vol->snapshot->snapname);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to set "
+ "snap name in dictionary");
+ goto out;
+ }
+
+ /* Set Snap ID */
+ snprintf (key, sizeof (key), "snap-id.%d", snapcount);
+ ret = dict_set_dynstr_with_alloc (dict, key,
+ uuid_utoa(snap_vol->snapshot->snap_id));
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to set "
+ "snap id in dictionary");
+ goto out;
+ }
+
+ /* Snap Volname which is used to activate the snap vol */
+ snprintf (key, sizeof (key), "snap-volname.%d", snapcount);
+ ret = dict_set_dynstr_with_alloc (dict, key, snap_vol->volname);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to set "
+ "snap id in dictionary");
+ goto out;
+ }
+ }
+
+ ret = dict_set_int32 (dict, "snap-count", snapcount);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to set snapcount");
+ op_errno = -ret;
+ goto out;
+ }
+
+ ret = dict_allocate_and_serialize (dict, &snap_info_rsp->dict.dict_val,
+ &snap_info_rsp->dict.dict_len);
+ if (ret) {
+ op_errno = -ret;
+ ret = -1;
+ goto out;
+ }
+
+ ret = 0;
+
+out:
+ snap_info_rsp->op_ret = ret;
+ snap_info_rsp->op_errno = op_errno;
+ snap_info_rsp->op_errstr = "";
+
+ return ret;
+}
diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.h b/xlators/mgmt/glusterd/src/glusterd-utils.h
index 1764f642d3f..aae91cdff8d 100644
--- a/xlators/mgmt/glusterd/src/glusterd-utils.h
+++ b/xlators/mgmt/glusterd/src/glusterd-utils.h
@@ -28,6 +28,8 @@
#include "rpc-clnt.h"
#include "protocol-common.h"
+#include "glusterfs3-xdr.h"
+
#define GLUSTERD_SOCK_DIR "/var/run"
#define GLUSTERD_ASSIGN_BRICKID_TO_BRICKINFO(brickinfo, volinfo, brickid) do {\
sprintf (brickinfo->brick_id, "%s-client-%d",\
@@ -902,6 +904,11 @@ glusterd_is_snap_soft_limit_reached (glusterd_volinfo_t *volinfo,
int32_t
glusterd_find_brick_mount_path (char *brick_path, int32_t brick_count,
char **brick_mount_path);
+/*
+ * Function to retrieve list of snap volnames and their uuids
+ */
+int glusterd_snapshot_get_volnames_uuids (dict_t *dict,
+ char *volname, gf_getsnap_name_uuid_rsp *snap_info_rsp);
int
glusterd_update_fstype (char *orig_brick_path,