summaryrefslogtreecommitdiffstats
path: root/xlators/features/snapview-server
diff options
context:
space:
mode:
authorAnand Subramanian <anands@redhat.com>2014-05-15 08:19:14 +0530
committerVijay Bellur <vbellur@redhat.com>2014-06-15 10:33:23 -0700
commitec845d93e74644bbfe27e0b085a38dbd7c02480f (patch)
treee4e455e9f9d6f75a3426fcad4a6360c885890902 /xlators/features/snapview-server
parent056b99bfbc181a88579b8139d444f7dd2fa3b19e (diff)
Get snapshot info dynamically via new rpc and infra for snapview-server to refresh snaplist
BUG: 1105439 Change-Id: I4bb312a53d88f6f4955e69a3ef2b4955ec17f26d Signed-off-by: Anand Subramanian <anands@redhat.com> Reviewed-on: http://review.gluster.org/8001 Tested-by: Gluster Build System <jenkins@build.gluster.com> Reviewed-by: Vijay Bellur <vbellur@redhat.com>
Diffstat (limited to 'xlators/features/snapview-server')
-rw-r--r--xlators/features/snapview-server/src/snapview-server-mem-types.h1
-rw-r--r--xlators/features/snapview-server/src/snapview-server.c627
-rw-r--r--xlators/features/snapview-server/src/snapview-server.h62
3 files changed, 599 insertions, 91 deletions
diff --git a/xlators/features/snapview-server/src/snapview-server-mem-types.h b/xlators/features/snapview-server/src/snapview-server-mem-types.h
index 6820b5e..a803516 100644
--- a/xlators/features/snapview-server/src/snapview-server-mem-types.h
+++ b/xlators/features/snapview-server/src/snapview-server-mem-types.h
@@ -18,6 +18,7 @@ enum snapview_mem_types {
gf_svs_mt_svs_inode_t,
gf_svs_mt_dirents_t,
gf_svs_mt_svs_fd_t,
+ gf_svs_mt_snaplist_t,
gf_svs_mt_end
};
diff --git a/xlators/features/snapview-server/src/snapview-server.c b/xlators/features/snapview-server/src/snapview-server.c
index 188aff1..833ffc9 100644
--- a/xlators/features/snapview-server/src/snapview-server.c
+++ b/xlators/features/snapview-server/src/snapview-server.c
@@ -15,9 +15,447 @@
#include "snapview-server.h"
#include "snapview-server-mem-types.h"
-/*
- * Helper functions
- */
+#include "xlator.h"
+#include "rpc-clnt.h"
+#include "xdr-generic.h"
+#include "protocol-common.h"
+#include <pthread.h>
+
+static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
+static pthread_cond_t condvar = PTHREAD_COND_INITIALIZER;
+static gf_boolean_t snap_worker_resume;
+
+void
+snaplist_refresh (void *data)
+{
+ xlator_t *this = NULL;
+ int ret = 0;
+ svs_private_t *priv = NULL;
+
+ this = data;
+ priv = this->private;
+
+ ret = svs_get_snapshot_list (this);
+ if (ret) {
+ gf_log ("snapview-server", GF_LOG_WARNING,
+ "Error retrieving refreshed snapshot list");
+ }
+
+ return;
+}
+
+void *
+snaplist_worker (void *data)
+{
+ xlator_t *this = NULL;
+ int ret = 0;
+ struct timespec timeout = {0, };
+ svs_private_t *priv = NULL;
+ glusterfs_ctx_t *ctx = NULL;
+
+ this = data;
+ priv = this->private;
+ ctx = this->ctx;
+ GF_ASSERT (ctx);
+
+ pthread_mutex_lock (&priv->snaplist_lock);
+ priv->is_snaplist_done = 1;
+ pthread_mutex_unlock (&priv->snaplist_lock);
+
+ while (1) {
+ timeout.tv_sec = 300;
+ timeout.tv_nsec = 0;
+ priv->snap_timer = gf_timer_call_after (ctx, timeout,
+ snaplist_refresh,
+ data);
+
+ pthread_mutex_lock (&mutex);
+ while (!snap_worker_resume) {
+ pthread_cond_wait (&condvar, &mutex);
+ }
+ snap_worker_resume = _gf_false;
+ pthread_mutex_unlock (&mutex);
+ }
+
+ return NULL;
+}
+
+int
+svs_mgmt_submit_request (void *req, call_frame_t *frame,
+ glusterfs_ctx_t *ctx,
+ rpc_clnt_prog_t *prog, int procnum,
+ fop_cbk_fn_t cbkfn, xdrproc_t xdrproc)
+{
+ int ret = -1;
+ int count = 0;
+ struct iovec iov = {0, };
+ struct iobuf *iobuf = NULL;
+ struct iobref *iobref = NULL;
+ ssize_t xdr_size = 0;
+
+ GF_VALIDATE_OR_GOTO ("snapview-server", frame, out);
+ GF_VALIDATE_OR_GOTO ("snapview-server", req, out);
+ GF_VALIDATE_OR_GOTO ("snapview-server", ctx, out);
+ GF_VALIDATE_OR_GOTO ("snapview-server", prog, out);
+
+ GF_ASSERT (frame->this);
+
+ iobref = iobref_new ();
+ if (!iobref) {
+ goto out;
+ }
+
+ if (req) {
+ xdr_size = xdr_sizeof (xdrproc, req);
+
+ iobuf = iobuf_get2 (ctx->iobuf_pool, xdr_size);
+ if (!iobuf) {
+ goto out;
+ }
+
+ iobref_add (iobref, iobuf);
+
+ iov.iov_base = iobuf->ptr;
+ iov.iov_len = iobuf_pagesize (iobuf);
+
+ /* Create the xdr payload */
+ ret = xdr_serialize_generic (iov, req, xdrproc);
+ if (ret == -1) {
+ gf_log (frame->this->name, GF_LOG_WARNING,
+ "Failed to create XDR payload");
+ goto out;
+ }
+ iov.iov_len = ret;
+ count = 1;
+ }
+
+ ret = rpc_clnt_submit (ctx->mgmt, prog, procnum, cbkfn,
+ &iov, count,
+ NULL, 0, iobref, frame, NULL, 0, NULL, 0, NULL);
+
+out:
+ if (iobref)
+ iobref_unref (iobref);
+
+ if (iobuf)
+ iobuf_unref (iobuf);
+ return ret;
+}
+
+
+int mgmt_get_snapinfo_cbk (struct rpc_req *req, struct iovec *iov,
+ int count, void *myframe)
+{
+ gf_getsnap_name_uuid_rsp rsp = {0,};
+ call_frame_t *frame = NULL;
+ glusterfs_ctx_t *ctx = NULL;
+ int ret = 0;
+ dict_t *dict = NULL;
+ char key[1024] = {0};
+ int snapcount = 0;
+ svs_private_t *priv = NULL;
+ xlator_t *this = NULL;
+ int i = 0;
+ int j = 0;
+ char *value = NULL;
+ snap_dirent_t *dirents = NULL;
+ snap_dirent_t *old_dirents = NULL;
+
+ GF_VALIDATE_OR_GOTO ("snapview-server", req, error_out);
+ GF_VALIDATE_OR_GOTO ("snapview-server", myframe, error_out);
+ GF_VALIDATE_OR_GOTO ("snapview-server", iov, error_out);
+
+ frame = myframe;
+ this = frame->this;
+ ctx = frame->this->ctx;
+ priv = this->private;
+ old_dirents = priv->dirents;
+
+ if (!ctx) {
+ gf_log (frame->this->name, GF_LOG_ERROR, "NULL context");
+ errno = EINVAL;
+ ret = -1;
+ goto out;
+ }
+
+ if (-1 == req->rpc_status) {
+ gf_log (frame->this->name, GF_LOG_ERROR,
+ "RPC call is not successful");
+ errno = EINVAL;
+ ret = -1;
+ goto out;
+ }
+
+ ret = xdr_to_generic (*iov, &rsp,
+ (xdrproc_t)xdr_gf_getsnap_name_uuid_rsp);
+ if (ret < 0) {
+ gf_log (frame->this->name, GF_LOG_ERROR,
+ "Failed to decode xdr response, rsp.op_ret = %d",
+ rsp.op_ret);
+ goto out;
+ }
+
+ if (rsp.op_ret == -1) {
+ errno = rsp.op_errno;
+ ret = -1;
+ goto out;
+ }
+
+ if (!rsp.dict.dict_len) {
+ gf_log (frame->this->name, GF_LOG_ERROR,
+ "Response dict is not populated");
+ ret = -1;
+ errno = EINVAL;
+ goto out;
+ }
+
+ dict = dict_new ();
+ if (!dict) {
+ ret = -1;
+ errno = ENOMEM;
+ goto out;
+ }
+
+ ret = dict_unserialize (rsp.dict.dict_val, rsp.dict.dict_len, &dict);
+ if (ret) {
+ gf_log (frame->this->name, GF_LOG_ERROR,
+ "Failed to unserialize dictionary");
+ errno = EINVAL;
+ goto out;
+ }
+
+ ret = dict_get_int32 (dict, "snap-count", (int32_t*)&snapcount);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Error retrieving snapcount");
+ errno = EINVAL;
+ ret = -1;
+ goto out;
+ }
+
+ pthread_mutex_lock (&priv->snaplist_lock);
+
+ if ((priv->num_snaps == 0) &&
+ (snapcount != 0)) {
+ /* first time we are fetching snap list */
+ dirents = GF_CALLOC (snapcount, sizeof (snap_dirent_t),
+ gf_svs_mt_dirents_t);
+ if (!dirents) {
+ gf_log (frame->this->name, GF_LOG_ERROR,
+ "Unable to allocate memory");
+ errno = ENOMEM;
+ ret = -1;
+ goto unlock;
+ }
+ } else {
+ /* fetch snaplist dynamically at run-time */
+ dirents = GF_CALLOC (snapcount, sizeof (snap_dirent_t),
+ gf_svs_mt_dirents_t);
+ if (!dirents) {
+ gf_log (frame->this->name, GF_LOG_ERROR,
+ "Unable to allocate memory");
+ errno = ENOMEM;
+ ret = -1;
+ goto unlock;
+ }
+ }
+
+ for (i = 0; i < snapcount; i++) {
+ snprintf (key, sizeof (key), "snap-volname.%d", i+1);
+ ret = dict_get_str (dict, key, &value);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Error retrieving snap volname %d", i+1);
+ errno = EINVAL;
+ ret = -1;
+ goto unlock;
+ }
+ strncpy (dirents[i].snap_volname, value,
+ sizeof (dirents[i].snap_volname));
+
+ snprintf (key, sizeof (key), "snap-id.%d", i+1);
+ ret = dict_get_str (dict, key, &value);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Error retrieving snap uuid %d", i+1);
+ errno = EINVAL;
+ ret = -1;
+ goto unlock;
+ }
+ strncpy (dirents[i].uuid, value, sizeof (dirents[i].uuid));
+
+ snprintf (key, sizeof (key), "snapname.%d", i+1);
+ ret = dict_get_str (dict, key, &value);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Error retrieving snap name %d", i+1);
+ errno = EINVAL;
+ ret = -1;
+ goto unlock;
+ }
+ strncpy (dirents[i].name, value, sizeof (dirents[i].name));
+ }
+
+ /*
+ * Got the new snap list populated in dirents
+ * The new snap list is either a subset or a superset of
+ * the existing snaplist old_dirents which has priv->num_snaps
+ * number of entries.
+ *
+ * If subset, then clean up the fs for entries which are
+ * no longer relevant.
+ *
+ * For other overlapping entries set the fs for new dirents
+ * entries which have a fs assigned already in old_dirents
+ *
+ * We do this as we don't want to do new glfs_init()s repeatedly
+ * as the dirents entries for snapshot volumes get repatedly
+ * cleaned up and allocated. And if we don't then that will lead
+ * to memleaks
+ */
+ for (i = 0; i < priv->num_snaps; i++) {
+ for (j = 0; j < snapcount; j++) {
+ if ((!strcmp (old_dirents[i].name,
+ dirents[j].name)) &&
+ (!strcmp (old_dirents[i].uuid,
+ dirents[j].uuid))) {
+ dirents[j].fs = old_dirents[i].fs;
+ old_dirents[i].fs = NULL;
+ break;
+ }
+ }
+ }
+
+ if (old_dirents) {
+ for (i=0; i < priv->num_snaps; i++) {
+ if (old_dirents[i].fs)
+ glfs_fini (old_dirents[i].fs);
+ }
+ }
+
+ priv->dirents = dirents;
+ priv->num_snaps = snapcount;
+
+ GF_FREE (old_dirents);
+
+ ret = 0;
+
+unlock:
+ /*
+ *
+ * We will unlock the snaplist_lock here for two reasons:
+ * 1. We ideally would like to avoid nested locks
+ * 2. The snaplist_lock and the mutex protecting the condvar
+ * are independent of each other and don't need to be
+ * mixed together
+ */
+ pthread_mutex_unlock (&priv->snaplist_lock);
+
+out:
+ pthread_mutex_lock (&mutex);
+ snap_worker_resume = _gf_true;
+ if (priv->is_snaplist_done) {
+ /*
+ * No need to signal if it is the first time
+ * refresh of the snaplist as no thread is
+ * waiting on this. It is only when the snaplist_worker
+ * is started that we have a thread waiting on this
+ */
+ pthread_cond_signal (&condvar);
+ }
+ pthread_mutex_unlock (&mutex);
+
+ if (dict) {
+ dict_unref (dict);
+ }
+ free (rsp.dict.dict_val);
+ free (rsp.op_errstr);
+
+ if (myframe)
+ SVS_STACK_DESTROY (myframe);
+
+error_out:
+ return ret;
+}
+
+int
+svs_get_snapshot_list (xlator_t *this)
+{
+ gf_getsnap_name_uuid_req req = {{0,}};
+ int ret = 0;
+ dict_t *dict = NULL;
+ glusterfs_ctx_t *ctx = NULL;
+ call_frame_t *frame = NULL;
+ svs_private_t *priv = NULL;
+
+ ctx = this->ctx;
+ if (!ctx) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "ctx is NULL");
+ ret = -1;
+ goto out;
+ }
+
+ frame = create_frame (this, ctx->pool);
+ if (!frame) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Error allocating frame");
+ ret = -1;
+ goto out;
+ }
+
+ priv = this->private;
+
+ dict = dict_new ();
+ if (!dict) {
+ ret = -1;
+ goto frame_destroy;
+ }
+
+ ret = dict_set_str (dict, "volname", priv->volname);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Error setting volname in dict");
+ goto frame_destroy;
+ }
+ ret = dict_allocate_and_serialize (dict, &req.dict.dict_val,
+ &req.dict.dict_len);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to serialize dictionary");
+ ret = -1;
+ goto frame_destroy;
+ }
+
+ ret = svs_mgmt_submit_request (&req, frame, ctx,
+ &svs_clnt_handshake_prog,
+ GF_HNDSK_GET_SNAPSHOT_INFO,
+ mgmt_get_snapinfo_cbk,
+ (xdrproc_t)xdr_gf_getsnap_name_uuid_req);
+
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Error sending snapshot names RPC request");
+ goto frame_destroy;
+ }
+
+out:
+ if (dict) {
+ dict_unref (dict);
+ }
+ GF_FREE (req.dict.dict_val);
+
+ return ret;
+
+frame_destroy:
+ /*
+ * Destroy the frame if we encountered an error
+ * Else we need to clean it up in
+ * mgmt_get_snapinfo_cbk
+ */
+ SVS_STACK_DESTROY (frame);
+ goto out;
+}
int
__svs_inode_ctx_set (xlator_t *this, inode_t *inode, svs_inode_t *svs_inode)
@@ -411,7 +849,13 @@ svs_get_snap_dirent (xlator_t *this, const char *name)
private = this->private;
+ pthread_mutex_lock (&private->snaplist_lock);
+
dirents = private->dirents;
+ if (!dirents) {
+ pthread_mutex_unlock (&private->snaplist_lock);
+ goto out;
+ }
tmp_dirent = dirents;
for (i = 0; i < private->num_snaps; i++) {
@@ -422,6 +866,8 @@ svs_get_snap_dirent (xlator_t *this, const char *name)
tmp_dirent++;
}
+ pthread_mutex_unlock (&private->snaplist_lock);
+
out:
return dirent;
}
@@ -457,7 +903,7 @@ svs_initialise_snapshot_volume (xlator_t *this, const char *name)
}
snprintf (volname, sizeof (volname), "/snaps/%s/%s",
- dirent->name, dirent->uuid);
+ dirent->name, dirent->snap_volname);
fs = glfs_new (volname);
if (!fs) {
@@ -484,7 +930,8 @@ svs_initialise_snapshot_volume (xlator_t *this, const char *name)
}
snprintf (logfile, sizeof (logfile),
- DEFAULT_SVD_LOG_FILE_DIRECTORY "/%s-%s.log", name, dirent->uuid);
+ DEFAULT_SVD_LOG_FILE_DIRECTORY "/%s-%s.log",
+ name, dirent->uuid);
ret = glfs_set_logging(fs, logfile, loglevel);
if (ret) {
@@ -513,19 +960,21 @@ svs_get_latest_snap_entry (xlator_t *this)
svs_private_t *priv = NULL;
snap_dirent_t *dirents = NULL;
snap_dirent_t *dirent = NULL;
- snap_dirent_t *tmp_dirent = NULL;
GF_VALIDATE_OR_GOTO ("svs", this, out);
priv = this->private;
+ pthread_mutex_lock (&priv->snaplist_lock);
dirents = priv->dirents;
-
- if (priv->num_snaps && dirents) {
- tmp_dirent = &dirents[priv->num_snaps - 1];
- dirent = tmp_dirent;
+ if (!dirents) {
+ pthread_mutex_unlock (&priv->snaplist_lock);
+ goto out;
}
+ if (priv->num_snaps)
+ dirent = &dirents[priv->num_snaps - 1];
+ pthread_mutex_unlock (&priv->snaplist_lock);
out:
return dirent;
}
@@ -535,13 +984,18 @@ svs_get_latest_snapshot (xlator_t *this)
{
glfs_t *fs = NULL;
snap_dirent_t *dirent = NULL;
+ svs_private_t *priv = NULL;
GF_VALIDATE_OR_GOTO ("svs", this, out);
+ priv = this->private;
dirent = svs_get_latest_snap_entry (this);
- if (dirent)
+ if (dirent) {
+ pthread_mutex_lock (&priv->snaplist_lock);
fs = dirent->fs;
+ pthread_mutex_unlock (&priv->snaplist_lock);
+ }
out:
return fs;
@@ -991,7 +1445,7 @@ svs_lookup (call_frame_t *frame, xlator_t *this, loc_t *loc, dict_t *xdata)
if (loc->name && strlen (loc->name)) {
ret = dict_get_str_boolean (xdata, "entry-point", _gf_false);
if (ret == -1) {
- gf_log (this->name, GF_LOG_ERROR, "failed to get the "
+ gf_log (this->name, GF_LOG_DEBUG, "failed to get the "
"entry point info");
entry_point = _gf_false;
} else {
@@ -1013,8 +1467,10 @@ svs_lookup (call_frame_t *frame, xlator_t *this, loc_t *loc, dict_t *xdata)
/* Initialize latest snapshot, which is used for nameless lookups */
dirent = svs_get_latest_snap_entry (this);
- if (dirent && !dirent->fs)
+
+ if (dirent && !dirent->fs) {
fs = svs_initialise_snapshot_volume (this, dirent->name);
+ }
/* lookup is on the entry point to the snapshot world */
if (entry_point) {
@@ -1459,68 +1915,6 @@ out:
return 0;
}
-/* As of now, the list of snapshots is obtained by reading a predefined file
- (which the user has to generate using these commands)
-* /usr/local/sbin/gluster snapshot info | grep -i snap | grep -i Volume |
- grep -i Name | cut -d':' -f 2 > /tmp/tmp-snap-uuids
- /usr/local/sbin/gluster snapshot info | grep Snapshot | cut -d':' -f 2
- > /tmp/tmp-snap_names
- This is a temporary workaround which will be changed to a notification
- based mechanism where snapd gets the list of snapshots from glusterd
-*/
-int
-svs_get_snapshot_list (xlator_t *this, svs_private_t *priv)
-{
- int ret = -1;
- char str_uuid[256] = {'\0'};
- char str_name[256] = {'\0'};
- int snap_count = 0;
- snap_dirent_t *dirents = NULL;
- FILE *fpn = NULL;
- FILE *fpu = NULL;
- int i = 0;
-
- GF_VALIDATE_OR_GOTO ("snapview-server", this, out);
- GF_VALIDATE_OR_GOTO (this->name, priv, out);
-
- dirents = GF_CALLOC (sizeof (*dirents), SNAP_VIEW_MAX_NUM_SNAPS,
- gf_svs_mt_dirents_t);
- if (!dirents) {
- gf_log (this->name, GF_LOG_ERROR, "failed to allocate memory");
- goto out;
- /* error, bail */
- }
- priv->dirents = dirents;
-
- fpu = fopen ("/tmp/tmp-snap-uuids", "r+");
- fpn = fopen ("/tmp/tmp-snap_names", "r+");
-
- if (!fpu || !fpn) {
- gf_log (this->name, GF_LOG_ERROR, "failed to open the file");
- goto out;
- }
-
- while ((fscanf (fpu, "%s", str_uuid) != -1) &&
- (fscanf (fpn, "%s", str_name) != -1)) {
- strncpy (dirents[i].uuid, str_uuid, strlen (str_uuid) + 1);
- strncpy (dirents[i].name, str_name, strlen (str_name) + 1);
- ++snap_count;
- ++i;
- }
- priv->num_snaps = snap_count;
-
- fclose (fpn);
- fclose (fpu);
-
- ret = 0;
-
-out:
- if (ret)
- GF_FREE (dirents);
-
- return ret;
-}
-
int
svs_fill_readdir (xlator_t *this, gf_dirent_t *entries, size_t size, off_t off)
{
@@ -1536,20 +1930,23 @@ svs_fill_readdir (xlator_t *this, gf_dirent_t *entries, size_t size, off_t off)
GF_VALIDATE_OR_GOTO ("snap-view-daemon", entries, out);
priv = this->private;
+ GF_ASSERT (priv);
+
/* create the dir entries */
+ pthread_mutex_lock (&priv->snaplist_lock);
dirents = priv->dirents;
for (i = off; i < priv->num_snaps; ) {
this_size = sizeof (gf_dirent_t) +
strlen (dirents[i].name) + 1;
if (this_size + filled_size > size )
- goto out;
+ goto unlock;
entry = gf_dirent_for_name (dirents[i].name);
if (!entry) {
gf_log (this->name, GF_LOG_ERROR, "failed to allocate "
"dentry for %s", dirents[i].name);
- goto out;
+ goto unlock;
}
entry->d_off = i + 1;
@@ -1561,6 +1958,9 @@ svs_fill_readdir (xlator_t *this, gf_dirent_t *entries, size_t size, off_t off)
filled_size += this_size;
}
+unlock:
+ pthread_mutex_unlock (&priv->snaplist_lock);
+
out:
return count;
}
@@ -2347,8 +2747,9 @@ mem_acct_init (xlator_t *this)
int32_t
init (xlator_t *this)
{
- svs_private_t *priv = NULL;
- int ret = -1;
+ svs_private_t *priv = NULL;
+ int ret = -1;
+ pthread_t snap_thread;
/* This can be the top of graph in certain cases */
if (!this->parents) {
@@ -2356,19 +2757,47 @@ init (xlator_t *this)
"dangling volume. check volfile ");
}
- /* TODO: define a mem-type structure */
priv = GF_CALLOC (1, sizeof (*priv), gf_svs_mt_priv_t);
if (!priv)
goto out;
this->private = priv;
+ GF_OPTION_INIT ("volname", priv->volname, str, out);
+ pthread_mutex_init (&(priv->snaplist_lock), NULL);
+ priv->is_snaplist_done = 0;
+ priv->num_snaps = 0;
+ snap_worker_resume = _gf_false;
+
/* get the list of snaps first to return to client xlator */
- ret = svs_get_snapshot_list (this, priv);
+ ret = svs_get_snapshot_list (this);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Error initializing snaplist infrastructure");
+ ret = -1;
+ goto out;
+ }
+
+ if ((ret = pthread_attr_init (&priv->thr_attr)) != 0) {
+ gf_log (this->name, GF_LOG_ERROR, "pthread attr init failed");
+ goto out;
+ }
+
+ ret = gf_thread_create (&snap_thread,
+ &priv->thr_attr,
+ snaplist_worker,
+ this);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to create snaplist worker thread");
+ goto out;
+ }
ret = 0;
+
out:
if (ret && priv) {
+ GF_FREE (priv->dirents);
GF_FREE (priv);
}
@@ -2378,14 +2807,48 @@ out:
void
fini (xlator_t *this)
{
- svs_private_t *priv = NULL;
+ svs_private_t *priv = NULL;
+ glusterfs_ctx_t *ctx = NULL;
+ int ret = 0;
+
+ GF_ASSERT (this);
priv = this->private;
this->private = NULL;
+ ctx = this->ctx;
+ if (!ctx)
+ gf_log (this->name, GF_LOG_ERROR,
+ "Invalid ctx found");
if (priv) {
+ gf_timer_call_cancel (ctx, priv->snap_timer);
+ priv->snap_timer = NULL;
+ ret = pthread_mutex_destroy (&priv->snaplist_lock);
+ if (ret != 0) {
+ gf_log (this->name, GF_LOG_WARNING,
+ "Could not destroy mutex snaplist_lock");
+ }
+ ret = pthread_attr_destroy (&priv->thr_attr);
+ if (ret != 0) {
+ gf_log (this->name, GF_LOG_WARNING,
+ "Could not destroy pthread attr");
+ }
+ if (priv->dirents) {
+ GF_FREE (priv->dirents);
+ }
GF_FREE (priv);
}
+ ret = pthread_mutex_destroy (&mutex);
+ if (ret != 0) {
+ gf_log (this->name, GF_LOG_WARNING,
+ "Could not destroy mutex");
+ }
+ pthread_cond_destroy (&condvar);
+ if (ret != 0) {
+ gf_log (this->name, GF_LOG_WARNING,
+ "Could not destroy condition variable");
+ }
+
return;
}
diff --git a/xlators/features/snapview-server/src/snapview-server.h b/xlators/features/snapview-server/src/snapview-server.h
index b0e051e..510599c 100644
--- a/xlators/features/snapview-server/src/snapview-server.h
+++ b/xlators/features/snapview-server/src/snapview-server.h
@@ -35,14 +35,51 @@
#include "glusterfs-acl.h"
#include "syncop.h"
#include "list.h"
-
-/*
- * The max number of snap entries we consider currently
- */
-#define SNAP_VIEW_MAX_NUM_SNAPS 128
+#include "timer.h"
+#include "rpc-clnt.h"
+#include "protocol-common.h"
#define DEFAULT_SVD_LOG_FILE_DIRECTORY DATADIR "/log/glusterfs"
+#define SNAP_VIEW_MAX_GLFS_T 256
+#define SNAP_VIEW_MAX_GLFS_FDS 1024
+#define SNAP_VIEW_MAX_GLFS_OBJ_HANDLES 1024
+
+#define SVS_STACK_DESTROY(_frame) \
+ do { \
+ ((call_frame_t *)_frame)->local = NULL; \
+ STACK_DESTROY (((call_frame_t *)_frame)->root); \
+ } while (0)
+
+int
+svs_mgmt_submit_request (void *req, call_frame_t *frame,
+ glusterfs_ctx_t *ctx,
+ rpc_clnt_prog_t *prog, int procnum,
+ fop_cbk_fn_t cbkfn, xdrproc_t xdrproc);
+
+int
+svs_get_snapshot_list (xlator_t *this);
+
+int
+mgmt_get_snapinfo_cbk (struct rpc_req *req, struct iovec *iov,
+ int count, void *myframe);
+
+char *clnt_handshake_procs[GF_HNDSK_MAXVALUE] = {
+ [GF_HNDSK_NULL] = "NULL",
+ [GF_HNDSK_SETVOLUME] = "SETVOLUME",
+ [GF_HNDSK_GETSPEC] = "GETSPEC",
+ [GF_HNDSK_PING] = "PING",
+ [GF_HNDSK_EVENT_NOTIFY] = "EVENTNOTIFY",
+};
+
+rpc_clnt_prog_t svs_clnt_handshake_prog = {
+ .progname = "GlusterFS Handshake",
+ .prognum = GLUSTER_HNDSK_PROGRAM,
+ .progver = GLUSTER_HNDSK_VERSION,
+ .procnames = clnt_handshake_procs,
+};
+
+
typedef enum {
SNAP_VIEW_ENTRY_POINT_INODE = 0,
SNAP_VIEW_VIRTUAL_INODE
@@ -67,15 +104,22 @@ struct svs_fd {
typedef struct svs_fd svs_fd_t;
struct snap_dirent {
- char name[NAME_MAX];
- char uuid[UUID_CANONICAL_FORM_LEN + 1];
+ char name[NAME_MAX];
+ char uuid[UUID_CANONICAL_FORM_LEN + 1];
+ char snap_volname[NAME_MAX];
glfs_t *fs;
};
typedef struct snap_dirent snap_dirent_t;
struct svs_private {
- snap_dirent_t *dirents;
- int num_snaps;
+ snap_dirent_t *dirents;
+ int num_snaps;
+ char *volname;
+ struct list_head snaplist;
+ pthread_mutex_t snaplist_lock;
+ uint32_t is_snaplist_done;
+ gf_timer_t *snap_timer;
+ pthread_attr_t thr_attr;
};
typedef struct svs_private svs_private_t;