summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--doc/admin-guide/en-US/markdown/admin_managing_snapshots.md47
-rw-r--r--rpc/rpc-lib/src/protocol-common.h1
-rw-r--r--tests/basic/uss.t16
-rw-r--r--xlators/features/snapview-client/src/snapview-client.c18
-rw-r--r--xlators/features/snapview-server/src/Makefile.am2
-rw-r--r--xlators/features/snapview-server/src/snapview-server-helpers.c567
-rw-r--r--xlators/features/snapview-server/src/snapview-server-mgmt.c480
-rw-r--r--xlators/features/snapview-server/src/snapview-server.c1155
-rw-r--r--xlators/features/snapview-server/src/snapview-server.h81
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-snapshot.c4
-rw-r--r--xlators/mgmt/glusterd/src/glusterd.c37
-rw-r--r--xlators/mgmt/glusterd/src/glusterd.h3
12 files changed, 1294 insertions, 1117 deletions
diff --git a/doc/admin-guide/en-US/markdown/admin_managing_snapshots.md b/doc/admin-guide/en-US/markdown/admin_managing_snapshots.md
index 4ae6e4e062b..40995e59f08 100644
--- a/doc/admin-guide/en-US/markdown/admin_managing_snapshots.md
+++ b/doc/admin-guide/en-US/markdown/admin_managing_snapshots.md
@@ -169,3 +169,50 @@ Details:
This command will de-activate the mentioned snapshot.
-------------------------------------------------------------------------
+
+**Accessing the snapshot**
+
+Snapshots can be activated in 2 ways.
+
+1) Mounting the snapshot:
+
+The snapshot can be accessed via FUSE mount (only fuse). To do that it has to be
+mounted first. A snapshot can be mounted via fuse by below command
+
+*mount -t glusterfs <hostname>:/snaps/<snap-name>/<volume-name> <mount-path>*
+
+i.e. say "host1" is one of the peers. Let "vol" be the volume name and "my-snap"
+be the snapshot name. In this case a snapshot can be mounted via this command
+
+*mount -t glusterfs host1:/snaps/my-snap/vol /mnt/snapshot*
+
+
+2) User serviceability:
+
+Apart from the above method of mounting the snapshot, a list of available
+snapshots and the contents of each snapshot can be viewed from any of the mount
+points accessing the glusterfs volume (either FUSE or NFS or SMB). For having
+user serviceable snapshots, it has to be enabled for a volume first. User
+serviceability can be enabled for a volume using the below command.
+
+*gluster volume set <volname> features.uss enable*
+
+Once enabled, from any of the directory (including root of the filesystem) an
+access point will be created to the snapshot world. The access point is a hidden
+directory cding into which will make the user enter the snapshot world. By
+default the hidden directory is ".snaps". Once user serviceability is enabled,
+one will be able to cd into .snaps from any directory. Doing "ls" on that
+directory shows a list of directories which are nothing but the snapshots
+present for that volume. Say if there are 3 snapshots ("snap1", "snap2",
+"snap3"), then doing ls in .snaps directory will show those 3 names as the
+directory entries. They represent the state of the directory from which .snaps
+was entered, at different points in time.
+
+NOTE: The access to the snapshots are read-only.
+
+Also, the name of the hidden directory (or the access point to the snapshot
+world) can be changed using the below command.
+
+*gluster volume set <volname> snapshot-directory <new-name>*
+
+--------------------------------------------------------------------------------------
diff --git a/rpc/rpc-lib/src/protocol-common.h b/rpc/rpc-lib/src/protocol-common.h
index b3e677afd17..8731a5d2254 100644
--- a/rpc/rpc-lib/src/protocol-common.h
+++ b/rpc/rpc-lib/src/protocol-common.h
@@ -130,6 +130,7 @@ enum gf_cbk_procnum {
GF_CBK_FETCHSPEC,
GF_CBK_INO_FLUSH,
GF_CBK_EVENT_NOTIFY,
+ GF_CBK_GET_SNAPS,
GF_CBK_MAXVALUE,
};
diff --git a/tests/basic/uss.t b/tests/basic/uss.t
index 8c6a8982eea..e59006d1cd8 100644
--- a/tests/basic/uss.t
+++ b/tests/basic/uss.t
@@ -243,4 +243,20 @@ TEST fd_close $fd1;
TEST fd_close $fd2;
TEST fd_close $fd3;
+#test 131
+TEST $CLI snapshot create snap5 $V0
+TEST ls $M0/.history;
+
+function count_snaps
+{
+ local mount_point=$1;
+ local num_snaps;
+
+ num_snaps=$(ls $mount_point/.history | wc -l);
+
+ echo $num_snaps;
+}
+
+EXPECT_WITHIN 30 "5" count_snaps $M0;
+
cleanup;
diff --git a/xlators/features/snapview-client/src/snapview-client.c b/xlators/features/snapview-client/src/snapview-client.c
index ad022101715..c9077351007 100644
--- a/xlators/features/snapview-client/src/snapview-client.c
+++ b/xlators/features/snapview-client/src/snapview-client.c
@@ -148,12 +148,14 @@ svc_lookup_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
So if lookup fails with ENOENT and the inode context is not there,
then send the lookup to the 2nd child of svc.
*/
- ret = svc_inode_ctx_get (this, inode, &inode_type);
if (op_ret) {
- if (op_errno == ENOENT && (ret < 0) &&
- !uuid_is_null (local->loc.gfid) &&
- !__is_root_gfid (local->loc.gfid)) {
- if (subvolume == FIRST_CHILD (this)) {
+ if (op_errno == ENOENT &&
+ !uuid_is_null (local->loc.gfid)) {
+ ret = svc_inode_ctx_get (this, inode, &inode_type);
+ if (ret < 0 && subvolume == FIRST_CHILD (this)) {
+ gf_log (this->name, GF_LOG_DEBUG,
+ "Lookup on normal graph failed. "
+ "Sending lookup to snapview-server");
subvolume = SECOND_CHILD (this);
STACK_WIND (frame, svc_lookup_cbk, subvolume,
subvolume->fops->lookup,
@@ -162,8 +164,10 @@ svc_lookup_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
}
}
- gf_log (this->name, GF_LOG_WARNING,
- "Lookup on normal graph failed");
+ gf_log (this->name,
+ (op_errno == ENOENT)?GF_LOG_DEBUG:GF_LOG_ERROR,
+ "Lookup on normal graph failed with error %s",
+ strerror (op_errno));
goto out;
}
diff --git a/xlators/features/snapview-server/src/Makefile.am b/xlators/features/snapview-server/src/Makefile.am
index 0966ae4cc56..df58d7bef71 100644
--- a/xlators/features/snapview-server/src/Makefile.am
+++ b/xlators/features/snapview-server/src/Makefile.am
@@ -3,7 +3,7 @@ xlatordir = $(libdir)/glusterfs/$(PACKAGE_VERSION)/xlator/features
snapview_server_la_LDFLAGS = -module -avoid-version
-snapview_server_la_SOURCES = snapview-server.c
+snapview_server_la_SOURCES = snapview-server.c snapview-server-mgmt.c snapview-server-helpers.c
snapview_server_la_LIBADD = $(top_builddir)/libglusterfs/src/libglusterfs.la\
$(top_builddir)/api/src/libgfapi.la\
$(RLLIBS) $(top_builddir)/rpc/xdr/src/libgfxdr.la \
diff --git a/xlators/features/snapview-server/src/snapview-server-helpers.c b/xlators/features/snapview-server/src/snapview-server-helpers.c
new file mode 100644
index 00000000000..0817e145e26
--- /dev/null
+++ b/xlators/features/snapview-server/src/snapview-server-helpers.c
@@ -0,0 +1,567 @@
+/*
+ Copyright (c) 2014 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+#ifndef _CONFIG_H
+#define _CONFIG_H
+#include "config.h"
+#endif
+
+#include "snapview-server.h"
+#include "snapview-server-mem-types.h"
+
+#include "xlator.h"
+#include "rpc-clnt.h"
+#include "xdr-generic.h"
+#include "protocol-common.h"
+#include <pthread.h>
+
+
+int
+__svs_inode_ctx_set (xlator_t *this, inode_t *inode, svs_inode_t *svs_inode)
+{
+ uint64_t value = 0;
+ int ret = -1;
+
+ GF_VALIDATE_OR_GOTO ("snapview-server", this, out);
+ GF_VALIDATE_OR_GOTO (this->name, inode, out);
+ GF_VALIDATE_OR_GOTO (this->name, svs_inode, out);
+
+ value = (uint64_t)(long) svs_inode;
+
+ ret = __inode_ctx_set (inode, this, &value);
+
+out:
+ return ret;
+}
+
+svs_inode_t *
+__svs_inode_ctx_get (xlator_t *this, inode_t *inode)
+{
+ svs_inode_t *svs_inode = NULL;
+ uint64_t value = 0;
+ int ret = -1;
+
+ GF_VALIDATE_OR_GOTO ("snapview-server", this, out);
+ GF_VALIDATE_OR_GOTO (this->name, inode, out);
+
+ ret = __inode_ctx_get (inode, this, &value);
+ if (ret)
+ goto out;
+
+ svs_inode = (svs_inode_t *) ((long) value);
+
+out:
+ return svs_inode;
+}
+
+svs_inode_t *
+svs_inode_ctx_get (xlator_t *this, inode_t *inode)
+{
+ svs_inode_t *svs_inode = NULL;
+
+ GF_VALIDATE_OR_GOTO ("snapview-server", this, out);
+ GF_VALIDATE_OR_GOTO (this->name, inode, out);
+
+ LOCK (&inode->lock);
+ {
+ svs_inode = __svs_inode_ctx_get (this, inode);
+ }
+ UNLOCK (&inode->lock);
+
+out:
+ return svs_inode;
+}
+
+int32_t
+svs_inode_ctx_set (xlator_t *this, inode_t *inode, svs_inode_t *svs_inode)
+{
+ int32_t ret = -1;
+
+ GF_VALIDATE_OR_GOTO ("snapview-server", this, out);
+ GF_VALIDATE_OR_GOTO (this->name, inode, out);
+ GF_VALIDATE_OR_GOTO (this->name, svs_inode, out);
+
+ LOCK (&inode->lock);
+ {
+ ret = __svs_inode_ctx_set (this, inode, svs_inode);
+ }
+ UNLOCK (&inode->lock);
+
+out:
+ return ret;
+}
+
+svs_inode_t *
+svs_inode_new ()
+{
+ svs_inode_t *svs_inode = NULL;
+
+ svs_inode = GF_CALLOC (1, sizeof (*svs_inode), gf_svs_mt_svs_inode_t);
+
+ return svs_inode;
+}
+
+svs_inode_t *
+svs_inode_ctx_get_or_new (xlator_t *this, inode_t *inode)
+{
+ svs_inode_t *svs_inode = NULL;
+ int ret = -1;
+
+ GF_VALIDATE_OR_GOTO ("snapview-server", this, out);
+ GF_VALIDATE_OR_GOTO (this->name, inode, out);
+
+ LOCK (&inode->lock);
+ {
+ svs_inode = __svs_inode_ctx_get (this, inode);
+ if (!svs_inode) {
+ svs_inode = svs_inode_new (this, inode);
+ if (svs_inode) {
+ ret = __svs_inode_ctx_set (this, inode,
+ svs_inode);
+ if (ret) {
+ GF_FREE (svs_inode);
+ svs_inode = NULL;
+ }
+ }
+ }
+ }
+ UNLOCK (&inode->lock);
+
+out:
+ return svs_inode;
+}
+
+svs_fd_t *
+svs_fd_new ()
+{
+ svs_fd_t *svs_fd = NULL;
+
+ svs_fd = GF_CALLOC (1, sizeof (*svs_fd), gf_svs_mt_svs_fd_t);
+
+ return svs_fd;
+}
+
+int
+__svs_fd_ctx_set (xlator_t *this, fd_t *fd, svs_fd_t *svs_fd)
+{
+ uint64_t value = 0;
+ int ret = -1;
+
+ GF_VALIDATE_OR_GOTO ("snapview-server", this, out);
+ GF_VALIDATE_OR_GOTO (this->name, fd, out);
+ GF_VALIDATE_OR_GOTO (this->name, svs_fd, out);
+
+ value = (uint64_t)(long) svs_fd;
+
+ ret = __fd_ctx_set (fd, this, value);
+
+out:
+ return ret;
+}
+
+svs_fd_t *
+__svs_fd_ctx_get (xlator_t *this, fd_t *fd)
+{
+ svs_fd_t *svs_fd = NULL;
+ uint64_t value = 0;
+ int ret = -1;
+
+ GF_VALIDATE_OR_GOTO ("snapview-server", this, out);
+ GF_VALIDATE_OR_GOTO (this->name, fd, out);
+
+ ret = __fd_ctx_get (fd, this, &value);
+ if (ret)
+ return NULL;
+
+ svs_fd = (svs_fd_t *) ((long) value);
+
+out:
+ return svs_fd;
+}
+
+svs_fd_t *
+svs_fd_ctx_get (xlator_t *this, fd_t *fd)
+{
+ svs_fd_t *svs_fd = NULL;
+
+ GF_VALIDATE_OR_GOTO ("snapview-server", this, out);
+ GF_VALIDATE_OR_GOTO (this->name, fd, out);
+
+ LOCK (&fd->lock);
+ {
+ svs_fd = __svs_fd_ctx_get (this, fd);
+ }
+ UNLOCK (&fd->lock);
+
+out:
+ return svs_fd;
+}
+
+int32_t
+svs_fd_ctx_set (xlator_t *this, fd_t *fd, svs_fd_t *svs_fd)
+{
+ int32_t ret = -1;
+
+ GF_VALIDATE_OR_GOTO ("snapview-server", this, out);
+ GF_VALIDATE_OR_GOTO (this->name, fd, out);
+ GF_VALIDATE_OR_GOTO (this->name, svs_fd, out);
+
+ LOCK (&fd->lock);
+ {
+ ret = __svs_fd_ctx_set (this, fd, svs_fd);
+ }
+ UNLOCK (&fd->lock);
+
+out:
+ return ret;
+}
+
+svs_fd_t *
+__svs_fd_ctx_get_or_new (xlator_t *this, fd_t *fd)
+{
+ svs_fd_t *svs_fd = NULL;
+ int ret = -1;
+ glfs_t *fs = NULL;
+ glfs_object_t *object = NULL;
+ svs_inode_t *inode_ctx = NULL;
+ glfs_fd_t *glfd = NULL;
+ inode_t *inode = NULL;
+
+ GF_VALIDATE_OR_GOTO ("snapview-server", this, out);
+ GF_VALIDATE_OR_GOTO (this->name, fd, out);
+
+ inode = fd->inode;
+ svs_fd = __svs_fd_ctx_get (this, fd);
+ if (svs_fd) {
+ ret = 0;
+ goto out;
+ }
+
+ svs_fd = svs_fd_new (this, fd);
+ if (!svs_fd) {
+ gf_log (this->name, GF_LOG_ERROR, "failed to allocate new fd "
+ "context for gfid %s", uuid_utoa (inode->gfid));
+ goto out;
+ }
+
+ if (fd_is_anonymous (fd)) {
+ inode_ctx = svs_inode_ctx_get (this, inode);
+ if (!inode_ctx) {
+ gf_log (this->name, GF_LOG_ERROR, "failed to get inode "
+ "context for %s", uuid_utoa (inode->gfid));
+ goto out;
+ }
+
+ fs = inode_ctx->fs;
+ object = inode_ctx->object;
+
+ if (inode->ia_type == IA_IFDIR) {
+ glfd = glfs_h_opendir (fs, object);
+ if (!glfd) {
+ gf_log (this->name, GF_LOG_ERROR, "failed to "
+ "open the directory %s",
+ uuid_utoa (inode->gfid));
+ goto out;
+ }
+ }
+
+ if (inode->ia_type == IA_IFREG) {
+ glfd = glfs_h_open (fs, object, O_RDONLY|O_LARGEFILE);
+ if (!glfd) {
+ gf_log (this->name, GF_LOG_ERROR, "failed to "
+ "open the file %s",
+ uuid_utoa (inode->gfid));
+ goto out;
+ }
+ }
+
+ svs_fd->fd = glfd;
+ }
+
+ ret = __svs_fd_ctx_set (this, fd, svs_fd);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "failed to set fd context "
+ "for gfid %s", uuid_utoa (inode->gfid));
+ if (svs_fd->fd) {
+ if (inode->ia_type == IA_IFDIR) {
+ ret = glfs_closedir (svs_fd->fd);
+ if (ret)
+ gf_log (this->name, GF_LOG_ERROR,
+ "failed to close the fd for %s",
+ uuid_utoa (inode->gfid));
+ }
+ if (inode->ia_type == IA_IFREG) {
+ ret = glfs_close (svs_fd->fd);
+ if (ret)
+ gf_log (this->name, GF_LOG_ERROR,
+ "failed to close the fd for %s",
+ uuid_utoa (inode->gfid));
+ }
+ }
+ ret = -1;
+ }
+
+out:
+ if (ret) {
+ GF_FREE (svs_fd);
+ svs_fd = NULL;
+ }
+
+ return svs_fd;
+}
+
+svs_fd_t *
+svs_fd_ctx_get_or_new (xlator_t *this, fd_t *fd)
+{
+ svs_fd_t *svs_fd = NULL;
+
+ GF_VALIDATE_OR_GOTO ("snapview-server", this, out);
+ GF_VALIDATE_OR_GOTO (this->name, fd, out);
+
+ LOCK (&fd->lock);
+ {
+ svs_fd = __svs_fd_ctx_get_or_new (this, fd);
+ }
+ UNLOCK (&fd->lock);
+
+out:
+ return svs_fd;
+}
+
+void
+svs_fill_ino_from_gfid (struct iatt *buf)
+{
+ uint64_t temp_ino = 0;
+ int j = 0;
+ int i = 0;
+ xlator_t *this = NULL;
+
+ this = THIS;
+
+ GF_VALIDATE_OR_GOTO ("snapview-server", this, out);
+ GF_VALIDATE_OR_GOTO (this->name, buf, out);
+
+ /* consider least significant 8 bytes of value out of gfid */
+ if (uuid_is_null (buf->ia_gfid)) {
+ buf->ia_ino = -1;
+ goto out;
+ }
+ for (i = 15; i > (15 - 8); i--) {
+ temp_ino += (uint64_t)(buf->ia_gfid[i]) << j;
+ j += 8;
+ }
+ buf->ia_ino = temp_ino;
+out:
+ return;
+}
+
+void
+svs_iatt_fill (uuid_t gfid, struct iatt *buf)
+{
+ struct timeval tv = {0, };
+ xlator_t *this = NULL;
+
+ this = THIS;
+
+ GF_VALIDATE_OR_GOTO ("snapview-server", this, out);
+ GF_VALIDATE_OR_GOTO (this->name, buf, out);
+
+ buf->ia_type = IA_IFDIR;
+ buf->ia_uid = 0;
+ buf->ia_gid = 0;
+ buf->ia_size = 0;
+ buf->ia_nlink = 2;
+ buf->ia_blocks = 8;
+ buf->ia_size = 4096;
+
+ uuid_copy (buf->ia_gfid, gfid);
+ svs_fill_ino_from_gfid (buf);
+
+ buf->ia_prot = ia_prot_from_st_mode (0755);
+
+ gettimeofday (&tv, 0);
+
+ buf->ia_mtime = buf->ia_atime = buf->ia_ctime = tv.tv_sec;
+ buf->ia_mtime_nsec = buf->ia_atime_nsec = buf->ia_ctime_nsec =
+ (tv.tv_usec * 1000);
+
+out:
+ return;
+}
+
+snap_dirent_t *
+svs_get_snap_dirent (xlator_t *this, const char *name)
+{
+ svs_private_t *private = NULL;
+ int i = 0;
+ snap_dirent_t *dirents = NULL;
+ snap_dirent_t *tmp_dirent = NULL;
+ snap_dirent_t *dirent = NULL;
+
+ GF_VALIDATE_OR_GOTO ("snapview-server", this, out);
+ GF_VALIDATE_OR_GOTO (this->name, this->private, out);
+ GF_VALIDATE_OR_GOTO (this->name, name, out);
+
+ private = this->private;
+
+ LOCK (&private->snaplist_lock);
+ {
+ dirents = private->dirents;
+ if (!dirents) {
+ goto unlock;
+ }
+
+ tmp_dirent = dirents;
+ for (i = 0; i < private->num_snaps; i++) {
+ if (!strcmp (tmp_dirent->name, name)) {
+ dirent = tmp_dirent;
+ break;
+ }
+ tmp_dirent++;
+ }
+ }
+unlock:
+ UNLOCK (&private->snaplist_lock);
+
+out:
+ return dirent;
+}
+
+glfs_t *
+svs_initialise_snapshot_volume (xlator_t *this, const char *name)
+{
+ svs_private_t *priv = NULL;
+ int32_t ret = -1;
+ snap_dirent_t *dirent = NULL;
+ char volname[PATH_MAX] = {0, };
+ glfs_t *fs = NULL;
+ int loglevel = GF_LOG_INFO;
+ char logfile[PATH_MAX] = {0, };
+
+ GF_VALIDATE_OR_GOTO ("snapview-server", this, out);
+ GF_VALIDATE_OR_GOTO (this->name, this->private, out);
+ GF_VALIDATE_OR_GOTO (this->name, name, out);
+
+ priv = this->private;
+
+ dirent = svs_get_snap_dirent (this, name);
+ if (!dirent) {
+ gf_log (this->name, GF_LOG_ERROR, "snap entry for name %s "
+ "not found", name);
+ goto out;
+ }
+
+ if (dirent->fs) {
+ ret = 0;
+ fs = dirent->fs;
+ goto out;
+ }
+
+ snprintf (volname, sizeof (volname), "/snaps/%s/%s",
+ dirent->name, dirent->snap_volname);
+
+ fs = glfs_new (volname);
+ if (!fs) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "glfs instance for snap volume %s "
+ "failed", dirent->name);
+ goto out;
+ }
+
+ ret = glfs_set_volfile_server (fs, "tcp", "localhost",
+ 24007);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "setting the "
+ "volfile srever for snap volume %s "
+ "failed", dirent->name);
+ goto out;
+ }
+
+ ret = glfs_init (fs);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "initing the "
+ "fs for %s failed", dirent->name);
+ goto out;
+ }
+
+ snprintf (logfile, sizeof (logfile),
+ DEFAULT_SVD_LOG_FILE_DIRECTORY "/%s-%s.log",
+ name, dirent->uuid);
+
+ ret = glfs_set_logging(fs, logfile, loglevel);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "failed to set the "
+ "log file path");
+ goto out;
+ }
+
+ ret = 0;
+
+out:
+ if (ret && fs) {
+ glfs_fini (fs);
+ fs = NULL;
+ }
+
+ if (fs)
+ dirent->fs = fs;
+
+ return fs;
+}
+
+snap_dirent_t *
+svs_get_latest_snap_entry (xlator_t *this)
+{
+ svs_private_t *priv = NULL;
+ snap_dirent_t *dirents = NULL;
+ snap_dirent_t *dirent = NULL;
+
+ GF_VALIDATE_OR_GOTO ("svs", this, out);
+
+ priv = this->private;
+
+ LOCK (&priv->snaplist_lock);
+ {
+ dirents = priv->dirents;
+ if (!dirents) {
+ goto unlock;
+ }
+ if (priv->num_snaps)
+ dirent = &dirents[priv->num_snaps - 1];
+ }
+unlock:
+ UNLOCK (&priv->snaplist_lock);
+
+out:
+ return dirent;
+}
+
+glfs_t *
+svs_get_latest_snapshot (xlator_t *this)
+{
+ glfs_t *fs = NULL;
+ snap_dirent_t *dirent = NULL;
+ svs_private_t *priv = NULL;
+
+ GF_VALIDATE_OR_GOTO ("svs", this, out);
+ priv = this->private;
+
+ dirent = svs_get_latest_snap_entry (this);
+
+ if (dirent) {
+ LOCK (&priv->snaplist_lock);
+ {
+ fs = dirent->fs;
+ }
+ UNLOCK (&priv->snaplist_lock);
+ }
+
+out:
+ return fs;
+}
diff --git a/xlators/features/snapview-server/src/snapview-server-mgmt.c b/xlators/features/snapview-server/src/snapview-server-mgmt.c
new file mode 100644
index 00000000000..f2a1e7b7893
--- /dev/null
+++ b/xlators/features/snapview-server/src/snapview-server-mgmt.c
@@ -0,0 +1,480 @@
+/*
+ Copyright (c) 2014 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+#ifndef _CONFIG_H
+#define _CONFIG_H
+#include "config.h"
+#endif
+
+#include "snapview-server.h"
+#include "snapview-server-mem-types.h"
+#include <pthread.h>
+
+int
+mgmt_cbk_snap (struct rpc_clnt *rpc, void *mydata, void *data)
+{
+ xlator_t *this = NULL;
+
+ this = mydata;
+ GF_ASSERT (this);
+
+ gf_log ("mgmt", GF_LOG_INFO, "list of snapshots changed");
+
+ svs_get_snapshot_list (this);
+ return 0;
+}
+
+rpcclnt_cb_actor_t svs_cbk_actors[GF_CBK_MAXVALUE] = {
+ [GF_CBK_GET_SNAPS] = {"GETSNAPS", GF_CBK_GET_SNAPS, mgmt_cbk_snap},
+};
+
+struct rpcclnt_cb_program svs_cbk_prog = {
+ .progname = "GlusterFS Callback",
+ .prognum = GLUSTER_CBK_PROGRAM,
+ .progver = GLUSTER_CBK_VERSION,
+ .actors = svs_cbk_actors,
+ .numactors = GF_CBK_MAXVALUE,
+};
+
+char *clnt_handshake_procs[GF_HNDSK_MAXVALUE] = {
+ [GF_HNDSK_NULL] = "NULL",
+ [GF_HNDSK_EVENT_NOTIFY] = "EVENTNOTIFY",
+};
+
+rpc_clnt_prog_t svs_clnt_handshake_prog = {
+ .progname = "GlusterFS Handshake",
+ .prognum = GLUSTER_HNDSK_PROGRAM,
+ .progver = GLUSTER_HNDSK_VERSION,
+ .procnames = clnt_handshake_procs,
+};
+
+int
+svs_mgmt_init (xlator_t *this)
+{
+ int ret = -1;
+ svs_private_t *priv = NULL;
+ dict_t *options = NULL;
+ int port = GF_DEFAULT_BASE_PORT;
+ char *host = NULL;
+ cmd_args_t *cmd_args = NULL;
+ glusterfs_ctx_t *ctx = NULL;
+
+ GF_VALIDATE_OR_GOTO ("snapview-server", this, out);
+ GF_VALIDATE_OR_GOTO (this->name, this->private, out);
+ GF_VALIDATE_OR_GOTO (this->name, this->ctx, out);
+
+ priv = this->private;
+
+ ctx = this->ctx;
+ cmd_args = &ctx->cmd_args;
+
+ host = "localhost";
+ if (cmd_args->volfile_server)
+ host = cmd_args->volfile_server;
+
+ ret = rpc_transport_inet_options_build (&options, host, port);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "failed to build the "
+ "transport options");
+ goto out;
+ }
+
+ priv->rpc = rpc_clnt_new (options, this->ctx, this->name, 8);
+ if (!priv->rpc) {
+ gf_log (this->name, GF_LOG_ERROR, "failed to initialize RPC");
+ goto out;
+ }
+
+ ret = rpcclnt_cbk_program_register (priv->rpc, &svs_cbk_prog,
+ this);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "failed to register callback program");
+ goto out;
+ }
+
+ ret = rpc_clnt_start (priv->rpc);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "failed to start the rpc "
+ "client");
+ goto out;
+ }
+
+ ret = 0;
+
+ gf_log (this->name, GF_LOG_DEBUG, "svs mgmt init successful");
+
+out:
+ if (ret) {
+ rpc_clnt_connection_cleanup (&priv->rpc->conn);
+ rpc_clnt_unref (priv->rpc);
+ priv->rpc = NULL;
+ }
+
+ return ret;
+}
+
+int
+svs_mgmt_submit_request (void *req, call_frame_t *frame,
+ glusterfs_ctx_t *ctx,
+ rpc_clnt_prog_t *prog, int procnum,
+ fop_cbk_fn_t cbkfn, xdrproc_t xdrproc)
+{
+ int ret = -1;
+ int count = 0;
+ struct iovec iov = {0, };
+ struct iobuf *iobuf = NULL;
+ struct iobref *iobref = NULL;
+ ssize_t xdr_size = 0;
+
+ GF_VALIDATE_OR_GOTO ("snapview-server", frame, out);
+ GF_VALIDATE_OR_GOTO ("snapview-server", req, out);
+ GF_VALIDATE_OR_GOTO ("snapview-server", ctx, out);
+ GF_VALIDATE_OR_GOTO ("snapview-server", prog, out);
+
+ GF_ASSERT (frame->this);
+
+ iobref = iobref_new ();
+ if (!iobref) {
+ goto out;
+ }
+
+ if (req) {
+ xdr_size = xdr_sizeof (xdrproc, req);
+
+ iobuf = iobuf_get2 (ctx->iobuf_pool, xdr_size);
+ if (!iobuf) {
+ goto out;
+ }
+
+ iobref_add (iobref, iobuf);
+
+ iov.iov_base = iobuf->ptr;
+ iov.iov_len = iobuf_pagesize (iobuf);
+
+ /* Create the xdr payload */
+ ret = xdr_serialize_generic (iov, req, xdrproc);
+ if (ret == -1) {
+ gf_log (frame->this->name, GF_LOG_WARNING,
+ "Failed to create XDR payload");
+ goto out;
+ }
+ iov.iov_len = ret;
+ count = 1;
+ }
+
+ ret = rpc_clnt_submit (ctx->mgmt, prog, procnum, cbkfn,
+ &iov, count,
+ NULL, 0, iobref, frame, NULL, 0, NULL, 0, NULL);
+
+out:
+ if (iobref)
+ iobref_unref (iobref);
+
+ if (iobuf)
+ iobuf_unref (iobuf);
+ return ret;
+}
+
+
+int
+mgmt_get_snapinfo_cbk (struct rpc_req *req, struct iovec *iov,
+ int count, void *myframe)
+{
+ gf_getsnap_name_uuid_rsp rsp = {0,};
+ call_frame_t *frame = NULL;
+ glusterfs_ctx_t *ctx = NULL;
+ int ret = -1;
+ dict_t *dict = NULL;
+ char key[1024] = {0};
+ int snapcount = 0;
+ svs_private_t *priv = NULL;
+ xlator_t *this = NULL;
+ int i = 0;
+ int j = 0;
+ char *value = NULL;
+ snap_dirent_t *dirents = NULL;
+ snap_dirent_t *old_dirents = NULL;
+ int oldcount = 0;
+
+ GF_VALIDATE_OR_GOTO ("snapview-server", req, error_out);
+ GF_VALIDATE_OR_GOTO ("snapview-server", myframe, error_out);
+ GF_VALIDATE_OR_GOTO ("snapview-server", iov, error_out);
+
+ frame = myframe;
+ this = frame->this;
+ ctx = frame->this->ctx;
+ priv = this->private;
+ old_dirents = priv->dirents;
+
+ if (!ctx) {
+ gf_log (frame->this->name, GF_LOG_ERROR, "NULL context");
+ errno = EINVAL;
+ goto out;
+ }
+
+ if (-1 == req->rpc_status) {
+ gf_log (frame->this->name, GF_LOG_ERROR,
+ "RPC call is not successful");
+ errno = EINVAL;
+ goto out;
+ }
+
+ ret = xdr_to_generic (*iov, &rsp,
+ (xdrproc_t)xdr_gf_getsnap_name_uuid_rsp);
+ if (ret < 0) {
+ gf_log (frame->this->name, GF_LOG_ERROR,
+ "Failed to decode xdr response, rsp.op_ret = %d",
+ rsp.op_ret);
+ goto out;
+ }
+
+ if (rsp.op_ret == -1) {
+ errno = rsp.op_errno;
+ ret = -1;
+ goto out;
+ }
+
+ if (!rsp.dict.dict_len) {
+ gf_log (frame->this->name, GF_LOG_ERROR,
+ "Response dict is not populated");
+ ret = -1;
+ errno = EINVAL;
+ goto out;
+ }
+
+ dict = dict_new ();
+ if (!dict) {
+ ret = -1;
+ errno = ENOMEM;
+ goto out;
+ }
+
+ ret = dict_unserialize (rsp.dict.dict_val, rsp.dict.dict_len, &dict);
+ if (ret) {
+ gf_log (frame->this->name, GF_LOG_ERROR,
+ "Failed to unserialize dictionary");
+ errno = EINVAL;
+ goto out;
+ }
+
+ ret = dict_get_int32 (dict, "snap-count", (int32_t*)&snapcount);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Error retrieving snapcount");
+ errno = EINVAL;
+ ret = -1;
+ goto out;
+ }
+
+ if (snapcount > 0) {
+ /* first time we are fetching snap list */
+ dirents = GF_CALLOC (snapcount, sizeof (snap_dirent_t),
+ gf_svs_mt_dirents_t);
+ if (!dirents) {
+ gf_log (frame->this->name, GF_LOG_ERROR,
+ "Unable to allocate memory");
+ errno = ENOMEM;
+ ret = -1;
+ goto out;
+ }
+ }
+
+ for (i = 0; i < snapcount; i++) {
+ snprintf (key, sizeof (key), "snap-volname.%d", i+1);
+ ret = dict_get_str (dict, key, &value);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Error retrieving snap volname %d",
+ i+1);
+ errno = EINVAL;
+ ret = -1;
+ goto out;
+ }
+
+ strncpy (dirents[i].snap_volname, value,
+ sizeof (dirents[i].snap_volname));
+
+ snprintf (key, sizeof (key), "snap-id.%d", i+1);
+ ret = dict_get_str (dict, key, &value);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Error retrieving snap uuid %d", i+1);
+ errno = EINVAL;
+ ret = -1;
+ goto out;
+ }
+ strncpy (dirents[i].uuid, value,
+ sizeof (dirents[i].uuid));
+
+ snprintf (key, sizeof (key), "snapname.%d", i+1);
+ ret = dict_get_str (dict, key, &value);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Error retrieving snap name %d", i+1);
+ errno = EINVAL;
+ ret = -1;
+ goto out;
+ }
+ strncpy (dirents[i].name, value,
+ sizeof (dirents[i].name));
+ }
+
+ /*
+ * Got the new snap list populated in dirents
+ * The new snap list is either a subset or a superset of
+ * the existing snaplist old_dirents which has priv->num_snaps
+ * number of entries.
+ *
+ * If subset, then clean up the fs for entries which are
+ * no longer relevant.
+ *
+ * For other overlapping entries set the fs for new dirents
+ * entries which have a fs assigned already in old_dirents
+ *
+ * We do this as we don't want to do new glfs_init()s repeatedly
+ * as the dirents entries for snapshot volumes get repatedly
+ * cleaned up and allocated. And if we don't then that will lead
+ * to memleaks
+ */
+
+ LOCK (&priv->snaplist_lock);
+ {
+ oldcount = priv->num_snaps;
+ for (i = 0; i < priv->num_snaps; i++) {
+ for (j = 0; j < snapcount; j++) {
+ if ((!strcmp (old_dirents[i].name,
+ dirents[j].name)) &&
+ (!strcmp (old_dirents[i].uuid,
+ dirents[j].uuid))) {
+ dirents[j].fs = old_dirents[i].fs;
+ old_dirents[i].fs = NULL;
+ break;
+ }
+ }
+ }
+
+ priv->dirents = dirents;
+ priv->num_snaps = snapcount;
+ }
+ UNLOCK (&priv->snaplist_lock);
+
+ if (old_dirents) {
+ for (i = 0; i < oldcount; i++) {
+ if (old_dirents[i].fs)
+ glfs_fini (old_dirents[i].fs);
+ }
+ }
+
+ GF_FREE (old_dirents);
+
+ ret = 0;
+
+out:
+ if (dict) {
+ dict_unref (dict);
+ }
+ free (rsp.dict.dict_val);
+ free (rsp.op_errstr);
+
+ if (ret && dirents) {
+ gf_log (this->name, GF_LOG_WARNING,
+ "Could not update dirents with refreshed snap list");
+ GF_FREE (dirents);
+ }
+
+ if (myframe)
+ SVS_STACK_DESTROY (myframe);
+
+error_out:
+ return ret;
+}
+
+int
+svs_get_snapshot_list (xlator_t *this)
+{
+ gf_getsnap_name_uuid_req req = {{0,}};
+ int ret = -1;
+ dict_t *dict = NULL;
+ glusterfs_ctx_t *ctx = NULL;
+ call_frame_t *frame = NULL;
+ svs_private_t *priv = NULL;
+ gf_boolean_t frame_cleanup = _gf_true;
+
+ GF_VALIDATE_OR_GOTO ("snapview-server", this, out);
+
+ ctx = this->ctx;
+ if (!ctx) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "ctx is NULL");
+ goto out;
+ }
+
+ frame = create_frame (this, ctx->pool);
+ if (!frame) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Error allocating frame");
+ goto out;
+ }
+
+ priv = this->private;
+
+ dict = dict_new ();
+ if (!dict) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Error allocating dictionary");
+ goto out;
+ }
+
+ ret = dict_set_str (dict, "volname", priv->volname);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Error setting volname in dict");
+ goto out;
+ }
+
+ ret = dict_allocate_and_serialize (dict, &req.dict.dict_val,
+ &req.dict.dict_len);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to serialize dictionary");
+ ret = -1;
+ goto out;
+ }
+
+ ret = svs_mgmt_submit_request (&req, frame, ctx,
+ &svs_clnt_handshake_prog,
+ GF_HNDSK_GET_SNAPSHOT_INFO,
+ mgmt_get_snapinfo_cbk,
+ (xdrproc_t)xdr_gf_getsnap_name_uuid_req);
+
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Error sending snapshot names RPC request");
+ }
+
+ frame_cleanup = _gf_false;
+
+out:
+ if (dict) {
+ dict_unref (dict);
+ }
+ GF_FREE (req.dict.dict_val);
+
+ if (frame_cleanup) {
+ /*
+ * Destroy the frame if we encountered an error
+ * Else we need to clean it up in
+ * mgmt_get_snapinfo_cbk
+ */
+ SVS_STACK_DESTROY (frame);
+ }
+
+ return ret;
+}
diff --git a/xlators/features/snapview-server/src/snapview-server.c b/xlators/features/snapview-server/src/snapview-server.c
index b90ad24d7d0..21f3dbee563 100644
--- a/xlators/features/snapview-server/src/snapview-server.c
+++ b/xlators/features/snapview-server/src/snapview-server.c
@@ -22,1026 +22,6 @@
#include "syscall.h"
#include <pthread.h>
-static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
-static pthread_cond_t condvar = PTHREAD_COND_INITIALIZER;
-static gf_boolean_t snap_worker_resume;
-
-void
-snaplist_refresh (void *data)
-{
- xlator_t *this = NULL;
- int ret = 0;
- svs_private_t *priv = NULL;
-
- this = data;
- priv = this->private;
-
- ret = svs_get_snapshot_list (this);
- if (ret) {
- gf_log ("snapview-server", GF_LOG_WARNING,
- "Error retrieving refreshed snapshot list");
- }
-
- return;
-}
-
-void *
-snaplist_worker (void *data)
-{
- xlator_t *this = NULL;
- int ret = 0;
- struct timespec timeout = {0, };
- svs_private_t *priv = NULL;
- glusterfs_ctx_t *ctx = NULL;
-
- this = data;
- priv = this->private;
- ctx = this->ctx;
- GF_ASSERT (ctx);
-
- ret = pthread_mutex_lock (&priv->snaplist_lock);
- if (ret != 0) {
- goto out;
- }
-
- priv->is_snaplist_done = 1;
-
- ret = pthread_mutex_unlock (&priv->snaplist_lock);
- if (ret != 0) {
- goto out;
- }
-
- while (1) {
- timeout.tv_sec = 300;
- timeout.tv_nsec = 0;
- priv->snap_timer = gf_timer_call_after (ctx, timeout,
- snaplist_refresh,
- data);
- ret = pthread_mutex_lock (&mutex);
- if (ret != 0) {
- goto out;
- }
- /*
- * We typically expect this mutex lock to succeed
- * A corner case might be when snaplist_worker is
- * scheduled and it tries to acquire this lock
- * but we are in the middle of xlator _fini()
- * when the mutex is itself being destroyed.
- * To prevent any undefined behavior or segfault
- * at that point, we check the ret here.
- * If mutex is destroyed we expect a EINVAL for a
- * mutex which is not initialized properly.
- * Bail then.
- * Same for the unlock case.
- */
- while (!snap_worker_resume) {
- pthread_cond_wait (&condvar, &mutex);
- }
-
- snap_worker_resume = _gf_false;
-
- ret = pthread_mutex_unlock (&mutex);
- if (ret != 0) {
- goto out;
- }
- }
-
-out:
- return NULL;
-}
-
-int
-svs_mgmt_submit_request (void *req, call_frame_t *frame,
- glusterfs_ctx_t *ctx,
- rpc_clnt_prog_t *prog, int procnum,
- fop_cbk_fn_t cbkfn, xdrproc_t xdrproc)
-{
- int ret = -1;
- int count = 0;
- struct iovec iov = {0, };
- struct iobuf *iobuf = NULL;
- struct iobref *iobref = NULL;
- ssize_t xdr_size = 0;
-
- GF_VALIDATE_OR_GOTO ("snapview-server", frame, out);
- GF_VALIDATE_OR_GOTO ("snapview-server", req, out);
- GF_VALIDATE_OR_GOTO ("snapview-server", ctx, out);
- GF_VALIDATE_OR_GOTO ("snapview-server", prog, out);
-
- GF_ASSERT (frame->this);
-
- iobref = iobref_new ();
- if (!iobref) {
- goto out;
- }
-
- if (req) {
- xdr_size = xdr_sizeof (xdrproc, req);
-
- iobuf = iobuf_get2 (ctx->iobuf_pool, xdr_size);
- if (!iobuf) {
- goto out;
- }
-
- iobref_add (iobref, iobuf);
-
- iov.iov_base = iobuf->ptr;
- iov.iov_len = iobuf_pagesize (iobuf);
-
- /* Create the xdr payload */
- ret = xdr_serialize_generic (iov, req, xdrproc);
- if (ret == -1) {
- gf_log (frame->this->name, GF_LOG_WARNING,
- "Failed to create XDR payload");
- goto out;
- }
- iov.iov_len = ret;
- count = 1;
- }
-
- ret = rpc_clnt_submit (ctx->mgmt, prog, procnum, cbkfn,
- &iov, count,
- NULL, 0, iobref, frame, NULL, 0, NULL, 0, NULL);
-
-out:
- if (iobref)
- iobref_unref (iobref);
-
- if (iobuf)
- iobuf_unref (iobuf);
- return ret;
-}
-
-
-int mgmt_get_snapinfo_cbk (struct rpc_req *req, struct iovec *iov,
- int count, void *myframe)
-{
- gf_getsnap_name_uuid_rsp rsp = {0,};
- call_frame_t *frame = NULL;
- glusterfs_ctx_t *ctx = NULL;
- int ret = 0;
- dict_t *dict = NULL;
- char key[1024] = {0};
- int snapcount = 0;
- svs_private_t *priv = NULL;
- xlator_t *this = NULL;
- int i = 0;
- int j = 0;
- char *value = NULL;
- snap_dirent_t *dirents = NULL;
- snap_dirent_t *old_dirents = NULL;
-
- GF_VALIDATE_OR_GOTO ("snapview-server", req, error_out);
- GF_VALIDATE_OR_GOTO ("snapview-server", myframe, error_out);
- GF_VALIDATE_OR_GOTO ("snapview-server", iov, error_out);
-
- frame = myframe;
- this = frame->this;
- ctx = frame->this->ctx;
- priv = this->private;
- old_dirents = priv->dirents;
-
- if (!ctx) {
- gf_log (frame->this->name, GF_LOG_ERROR, "NULL context");
- errno = EINVAL;
- ret = -1;
- goto out;
- }
-
- if (-1 == req->rpc_status) {
- gf_log (frame->this->name, GF_LOG_ERROR,
- "RPC call is not successful");
- errno = EINVAL;
- ret = -1;
- goto out;
- }
-
- ret = xdr_to_generic (*iov, &rsp,
- (xdrproc_t)xdr_gf_getsnap_name_uuid_rsp);
- if (ret < 0) {
- gf_log (frame->this->name, GF_LOG_ERROR,
- "Failed to decode xdr response, rsp.op_ret = %d",
- rsp.op_ret);
- goto out;
- }
-
- if (rsp.op_ret == -1) {
- errno = rsp.op_errno;
- ret = -1;
- goto out;
- }
-
- if (!rsp.dict.dict_len) {
- gf_log (frame->this->name, GF_LOG_ERROR,
- "Response dict is not populated");
- ret = -1;
- errno = EINVAL;
- goto out;
- }
-
- dict = dict_new ();
- if (!dict) {
- ret = -1;
- errno = ENOMEM;
- goto out;
- }
-
- ret = dict_unserialize (rsp.dict.dict_val, rsp.dict.dict_len, &dict);
- if (ret) {
- gf_log (frame->this->name, GF_LOG_ERROR,
- "Failed to unserialize dictionary");
- errno = EINVAL;
- goto out;
- }
-
- ret = dict_get_int32 (dict, "snap-count", (int32_t*)&snapcount);
- if (ret) {
- gf_log (this->name, GF_LOG_ERROR,
- "Error retrieving snapcount");
- errno = EINVAL;
- ret = -1;
- goto out;
- }
-
- pthread_mutex_lock (&priv->snaplist_lock);
-
- if ((priv->num_snaps == 0) &&
- (snapcount != 0)) {
- /* first time we are fetching snap list */
- dirents = GF_CALLOC (snapcount, sizeof (snap_dirent_t),
- gf_svs_mt_dirents_t);
- if (!dirents) {
- gf_log (frame->this->name, GF_LOG_ERROR,
- "Unable to allocate memory");
- errno = ENOMEM;
- ret = -1;
- goto unlock;
- }
- } else {
- /* fetch snaplist dynamically at run-time */
- dirents = GF_CALLOC (snapcount, sizeof (snap_dirent_t),
- gf_svs_mt_dirents_t);
- if (!dirents) {
- gf_log (frame->this->name, GF_LOG_ERROR,
- "Unable to allocate memory");
- errno = ENOMEM;
- ret = -1;
- goto unlock;
- }
- }
-
- for (i = 0; i < snapcount; i++) {
- snprintf (key, sizeof (key), "snap-volname.%d", i+1);
- ret = dict_get_str (dict, key, &value);
- if (ret) {
- gf_log (this->name, GF_LOG_ERROR,
- "Error retrieving snap volname %d", i+1);
- errno = EINVAL;
- ret = -1;
- goto unlock;
- }
- strncpy (dirents[i].snap_volname, value,
- sizeof (dirents[i].snap_volname));
-
- snprintf (key, sizeof (key), "snap-id.%d", i+1);
- ret = dict_get_str (dict, key, &value);
- if (ret) {
- gf_log (this->name, GF_LOG_ERROR,
- "Error retrieving snap uuid %d", i+1);
- errno = EINVAL;
- ret = -1;
- goto unlock;
- }
- strncpy (dirents[i].uuid, value, sizeof (dirents[i].uuid));
-
- snprintf (key, sizeof (key), "snapname.%d", i+1);
- ret = dict_get_str (dict, key, &value);
- if (ret) {
- gf_log (this->name, GF_LOG_ERROR,
- "Error retrieving snap name %d", i+1);
- errno = EINVAL;
- ret = -1;
- goto unlock;
- }
- strncpy (dirents[i].name, value, sizeof (dirents[i].name));
- }
-
- /*
- * Got the new snap list populated in dirents
- * The new snap list is either a subset or a superset of
- * the existing snaplist old_dirents which has priv->num_snaps
- * number of entries.
- *
- * If subset, then clean up the fs for entries which are
- * no longer relevant.
- *
- * For other overlapping entries set the fs for new dirents
- * entries which have a fs assigned already in old_dirents
- *
- * We do this as we don't want to do new glfs_init()s repeatedly
- * as the dirents entries for snapshot volumes get repatedly
- * cleaned up and allocated. And if we don't then that will lead
- * to memleaks
- */
- for (i = 0; i < priv->num_snaps; i++) {
- for (j = 0; j < snapcount; j++) {
- if ((!strcmp (old_dirents[i].name,
- dirents[j].name)) &&
- (!strcmp (old_dirents[i].uuid,
- dirents[j].uuid))) {
- dirents[j].fs = old_dirents[i].fs;
- old_dirents[i].fs = NULL;
- break;
- }
- }
- }
-
- if (old_dirents) {
- for (i=0; i < priv->num_snaps; i++) {
- if (old_dirents[i].fs)
- glfs_fini (old_dirents[i].fs);
- }
- }
-
- priv->dirents = dirents;
- priv->num_snaps = snapcount;
-
- GF_FREE (old_dirents);
-
- ret = 0;
-
-unlock:
- /*
- *
- * We will unlock the snaplist_lock here for two reasons:
- * 1. We ideally would like to avoid nested locks
- * 2. The snaplist_lock and the mutex protecting the condvar
- * are independent of each other and don't need to be
- * mixed together
- */
- pthread_mutex_unlock (&priv->snaplist_lock);
-
-out:
- pthread_mutex_lock (&mutex);
- snap_worker_resume = _gf_true;
- if (priv->is_snaplist_done) {
- /*
- * No need to signal if it is the first time
- * refresh of the snaplist as no thread is
- * waiting on this. It is only when the snaplist_worker
- * is started that we have a thread waiting on this
- */
- pthread_cond_signal (&condvar);
- }
- pthread_mutex_unlock (&mutex);
-
- if (dict) {
- dict_unref (dict);
- }
- free (rsp.dict.dict_val);
- free (rsp.op_errstr);
-
- if (ret && dirents) {
- gf_log (this->name, GF_LOG_WARNING,
- "Could not update dirents with refreshed snap list");
- GF_FREE (dirents);
- }
-
- if (myframe)
- SVS_STACK_DESTROY (myframe);
-
-error_out:
- return ret;
-}
-
-int
-svs_get_snapshot_list (xlator_t *this)
-{
- gf_getsnap_name_uuid_req req = {{0,}};
- int ret = 0;
- dict_t *dict = NULL;
- glusterfs_ctx_t *ctx = NULL;
- call_frame_t *frame = NULL;
- svs_private_t *priv = NULL;
- gf_boolean_t frame_cleanup = _gf_false;
-
- ctx = this->ctx;
- if (!ctx) {
- gf_log (this->name, GF_LOG_ERROR,
- "ctx is NULL");
- ret = -1;
- goto out;
- }
-
- frame = create_frame (this, ctx->pool);
- if (!frame) {
- gf_log (this->name, GF_LOG_ERROR,
- "Error allocating frame");
- ret = -1;
- goto out;
- }
-
- priv = this->private;
-
- dict = dict_new ();
- if (!dict) {
- ret = -1;
- gf_log (this->name, GF_LOG_ERROR,
- "Error allocating dictionary");
- frame_cleanup = _gf_true;
- goto out;
- }
-
- ret = dict_set_str (dict, "volname", priv->volname);
- if (ret) {
- gf_log (this->name, GF_LOG_ERROR,
- "Error setting volname in dict");
- frame_cleanup = _gf_true;
- goto out;
- }
-
- ret = dict_allocate_and_serialize (dict, &req.dict.dict_val,
- &req.dict.dict_len);
- if (ret) {
- gf_log (this->name, GF_LOG_ERROR,
- "Failed to serialize dictionary");
- ret = -1;
- frame_cleanup = _gf_true;
- goto out;
- }
-
- ret = svs_mgmt_submit_request (&req, frame, ctx,
- &svs_clnt_handshake_prog,
- GF_HNDSK_GET_SNAPSHOT_INFO,
- mgmt_get_snapinfo_cbk,
- (xdrproc_t)xdr_gf_getsnap_name_uuid_req);
-
- if (ret) {
- gf_log (this->name, GF_LOG_ERROR,
- "Error sending snapshot names RPC request");
- }
-
-out:
- if (dict) {
- dict_unref (dict);
- }
- GF_FREE (req.dict.dict_val);
-
- if (frame_cleanup) {
- /*
- * Destroy the frame if we encountered an error
- * Else we need to clean it up in
- * mgmt_get_snapinfo_cbk
- */
- SVS_STACK_DESTROY (frame);
- }
-
- return ret;
-}
-
-int
-__svs_inode_ctx_set (xlator_t *this, inode_t *inode, svs_inode_t *svs_inode)
-{
- uint64_t value = 0;
- int ret = -1;
-
- GF_VALIDATE_OR_GOTO ("snapview-server", this, out);
- GF_VALIDATE_OR_GOTO (this->name, inode, out);
- GF_VALIDATE_OR_GOTO (this->name, svs_inode, out);
-
- value = (uint64_t)(long) svs_inode;
-
- ret = __inode_ctx_set (inode, this, &value);
-
-out:
- return ret;
-}
-
-
-svs_inode_t *
-__svs_inode_ctx_get (xlator_t *this, inode_t *inode)
-{
- svs_inode_t *svs_inode = NULL;
- uint64_t value = 0;
- int ret = -1;
-
- GF_VALIDATE_OR_GOTO ("snapview-server", this, out);
- GF_VALIDATE_OR_GOTO (this->name, inode, out);
-
- ret = __inode_ctx_get (inode, this, &value);
- if (ret)
- goto out;
-
- svs_inode = (svs_inode_t *) ((long) value);
-
-out:
- return svs_inode;
-}
-
-
-svs_inode_t *
-svs_inode_ctx_get (xlator_t *this, inode_t *inode)
-{
- svs_inode_t *svs_inode = NULL;
-
- GF_VALIDATE_OR_GOTO ("snapview-server", this, out);
- GF_VALIDATE_OR_GOTO (this->name, inode, out);
-
- LOCK (&inode->lock);
- {
- svs_inode = __svs_inode_ctx_get (this, inode);
- }
- UNLOCK (&inode->lock);
-
-out:
- return svs_inode;
-}
-
-int32_t
-svs_inode_ctx_set (xlator_t *this, inode_t *inode, svs_inode_t *svs_inode)
-{
- int32_t ret = -1;
-
- GF_VALIDATE_OR_GOTO ("snapview-server", this, out);
- GF_VALIDATE_OR_GOTO (this->name, inode, out);
- GF_VALIDATE_OR_GOTO (this->name, svs_inode, out);
-
- LOCK (&inode->lock);
- {
- ret = __svs_inode_ctx_set (this, inode, svs_inode);
- }
- UNLOCK (&inode->lock);
-
-out:
- return ret;
-}
-
-svs_inode_t *
-svs_inode_new ()
-{
- svs_inode_t *svs_inode = NULL;
-
- svs_inode = GF_CALLOC (1, sizeof (*svs_inode), gf_svs_mt_svs_inode_t);
-
- return svs_inode;
-}
-
-svs_inode_t *
-svs_inode_ctx_get_or_new (xlator_t *this, inode_t *inode)
-{
- svs_inode_t *svs_inode = NULL;
- int ret = -1;
-
- GF_VALIDATE_OR_GOTO ("snapview-server", this, out);
- GF_VALIDATE_OR_GOTO (this->name, inode, out);
-
- LOCK (&inode->lock);
- {
- svs_inode = __svs_inode_ctx_get (this, inode);
- if (!svs_inode) {
- svs_inode = svs_inode_new ();
- if (svs_inode) {
- ret = __svs_inode_ctx_set (this, inode,
- svs_inode);
- if (ret) {
- GF_FREE (svs_inode);
- svs_inode = NULL;
- }
- }
- }
- }
- UNLOCK (&inode->lock);
-
-out:
- return svs_inode;
-}
-
-svs_fd_t *
-svs_fd_new ()
-{
- svs_fd_t *svs_fd = NULL;
-
- svs_fd = GF_CALLOC (1, sizeof (*svs_fd), gf_svs_mt_svs_fd_t);
-
- return svs_fd;
-}
-
-int
-__svs_fd_ctx_set (xlator_t *this, fd_t *fd, svs_fd_t *svs_fd)
-{
- uint64_t value = 0;
- int ret = -1;
-
- GF_VALIDATE_OR_GOTO ("snapview-server", this, out);
- GF_VALIDATE_OR_GOTO (this->name, fd, out);
- GF_VALIDATE_OR_GOTO (this->name, svs_fd, out);
-
- value = (uint64_t)(long) svs_fd;
-
- ret = __fd_ctx_set (fd, this, value);
-
-out:
- return ret;
-}
-
-
-svs_fd_t *
-__svs_fd_ctx_get (xlator_t *this, fd_t *fd)
-{
- svs_fd_t *svs_fd = NULL;
- uint64_t value = 0;
- int ret = -1;
-
- GF_VALIDATE_OR_GOTO ("snapview-server", this, out);
- GF_VALIDATE_OR_GOTO (this->name, fd, out);
-
- ret = __fd_ctx_get (fd, this, &value);
- if (ret)
- return NULL;
-
- svs_fd = (svs_fd_t *) ((long) value);
-
-out:
- return svs_fd;
-}
-
-
-svs_fd_t *
-svs_fd_ctx_get (xlator_t *this, fd_t *fd)
-{
- svs_fd_t *svs_fd = NULL;
-
- GF_VALIDATE_OR_GOTO ("snapview-server", this, out);
- GF_VALIDATE_OR_GOTO (this->name, fd, out);
-
- LOCK (&fd->lock);
- {
- svs_fd = __svs_fd_ctx_get (this, fd);
- }
- UNLOCK (&fd->lock);
-
-out:
- return svs_fd;
-}
-
-int32_t
-svs_fd_ctx_set (xlator_t *this, fd_t *fd, svs_fd_t *svs_fd)
-{
- int32_t ret = -1;
-
- GF_VALIDATE_OR_GOTO ("snapview-server", this, out);
- GF_VALIDATE_OR_GOTO (this->name, fd, out);
- GF_VALIDATE_OR_GOTO (this->name, svs_fd, out);
-
- LOCK (&fd->lock);
- {
- ret = __svs_fd_ctx_set (this, fd, svs_fd);
- }
- UNLOCK (&fd->lock);
-
-out:
- return ret;
-}
-
-svs_fd_t *
-__svs_fd_ctx_get_or_new (xlator_t *this, fd_t *fd)
-{
- svs_fd_t *svs_fd = NULL;
- int ret = -1;
- glfs_t *fs = NULL;
- glfs_object_t *object = NULL;
- svs_inode_t *inode_ctx = NULL;
- glfs_fd_t *glfd = NULL;
- inode_t *inode = NULL;
-
- GF_VALIDATE_OR_GOTO ("snapview-server", this, out);
- GF_VALIDATE_OR_GOTO (this->name, fd, out);
-
- inode = fd->inode;
- svs_fd = __svs_fd_ctx_get (this, fd);
- if (svs_fd) {
- ret = 0;
- goto out;
- }
-
- svs_fd = svs_fd_new ();
- if (!svs_fd) {
- gf_log (this->name, GF_LOG_ERROR, "failed to allocate new fd "
- "context for gfid %s", uuid_utoa (inode->gfid));
- goto out;
- }
-
- if (fd_is_anonymous (fd)) {
- inode_ctx = svs_inode_ctx_get (this, inode);
- if (!inode_ctx) {
- gf_log (this->name, GF_LOG_ERROR, "failed to get inode "
- "context for %s", uuid_utoa (inode->gfid));
- goto out;
- }
-
- fs = inode_ctx->fs;
- object = inode_ctx->object;
-
- if (inode->ia_type == IA_IFDIR) {
- glfd = glfs_h_opendir (fs, object);
- if (!glfd) {
- gf_log (this->name, GF_LOG_ERROR, "failed to "
- "open the directory %s",
- uuid_utoa (inode->gfid));
- goto out;
- }
- }
-
- if (inode->ia_type == IA_IFREG) {
- glfd = glfs_h_open (fs, object, O_RDONLY|O_LARGEFILE);
- if (!glfd) {
- gf_log (this->name, GF_LOG_ERROR, "failed to "
- "open the file %s",
- uuid_utoa (inode->gfid));
- goto out;
- }
- }
-
- svs_fd->fd = glfd;
- }
-
- ret = __svs_fd_ctx_set (this, fd, svs_fd);
- if (ret) {
- gf_log (this->name, GF_LOG_ERROR, "failed to set fd context "
- "for gfid %s", uuid_utoa (inode->gfid));
- if (svs_fd->fd) {
- if (inode->ia_type == IA_IFDIR) {
- ret = glfs_closedir (svs_fd->fd);
- if (ret)
- gf_log (this->name, GF_LOG_ERROR,
- "failed to close the fd for %s",
- uuid_utoa (inode->gfid));
- }
- if (inode->ia_type == IA_IFREG) {
- ret = glfs_close (svs_fd->fd);
- if (ret)
- gf_log (this->name, GF_LOG_ERROR,
- "failed to close the fd for %s",
- uuid_utoa (inode->gfid));
- }
- }
- ret = -1;
- }
-
-out:
- if (ret) {
- GF_FREE (svs_fd);
- svs_fd = NULL;
- }
-
- return svs_fd;
-}
-
-svs_fd_t *
-svs_fd_ctx_get_or_new (xlator_t *this, fd_t *fd)
-{
- svs_fd_t *svs_fd = NULL;
-
- GF_VALIDATE_OR_GOTO ("snapview-server", this, out);
- GF_VALIDATE_OR_GOTO (this->name, fd, out);
-
- LOCK (&fd->lock);
- {
- svs_fd = __svs_fd_ctx_get_or_new (this, fd);
- }
- UNLOCK (&fd->lock);
-
-out:
- return svs_fd;
-}
-
-void
-svs_fill_ino_from_gfid (struct iatt *buf)
-{
- uint64_t temp_ino = 0;
- int j = 0;
- int i = 0;
- xlator_t *this = NULL;
-
- this = THIS;
-
- GF_VALIDATE_OR_GOTO ("snapview-server", this, out);
- GF_VALIDATE_OR_GOTO (this->name, buf, out);
-
- /* consider least significant 8 bytes of value out of gfid */
- if (uuid_is_null (buf->ia_gfid)) {
- buf->ia_ino = -1;
- goto out;
- }
- for (i = 15; i > (15 - 8); i--) {
- temp_ino += (uint64_t)(buf->ia_gfid[i]) << j;
- j += 8;
- }
- buf->ia_ino = temp_ino;
-out:
- return;
-}
-
-void
-svs_iatt_fill (uuid_t gfid, struct iatt *buf)
-{
- struct timeval tv = {0, };
- xlator_t *this = NULL;
-
- this = THIS;
-
- GF_VALIDATE_OR_GOTO ("snapview-server", this, out);
- GF_VALIDATE_OR_GOTO (this->name, buf, out);
-
- buf->ia_type = IA_IFDIR;
- buf->ia_uid = 0;
- buf->ia_gid = 0;
- buf->ia_size = 0;
- buf->ia_nlink = 2;
- buf->ia_blocks = 8;
- buf->ia_size = 4096;
-
- uuid_copy (buf->ia_gfid, gfid);
- svs_fill_ino_from_gfid (buf);
-
- buf->ia_prot = ia_prot_from_st_mode (0755);
-
- gettimeofday (&tv, 0);
-
- buf->ia_mtime = buf->ia_atime = buf->ia_ctime = tv.tv_sec;
- buf->ia_mtime_nsec = buf->ia_atime_nsec = buf->ia_ctime_nsec =
- (tv.tv_usec * 1000);
-
-out:
- return;
-}
-
-snap_dirent_t *
-svs_get_snap_dirent (xlator_t *this, const char *name)
-{
- svs_private_t *private = NULL;
- int i = 0;
- snap_dirent_t *dirents = NULL;
- snap_dirent_t *tmp_dirent = NULL;
- snap_dirent_t *dirent = NULL;
-
- GF_VALIDATE_OR_GOTO ("snapview-server", this, out);
- GF_VALIDATE_OR_GOTO (this->name, this->private, out);
- GF_VALIDATE_OR_GOTO (this->name, name, out);
-
- private = this->private;
-
- pthread_mutex_lock (&private->snaplist_lock);
-
- dirents = private->dirents;
- if (!dirents) {
- pthread_mutex_unlock (&private->snaplist_lock);
- goto out;
- }
-
- tmp_dirent = dirents;
- for (i = 0; i < private->num_snaps; i++) {
- if (!strcmp (tmp_dirent->name, name)) {
- dirent = tmp_dirent;
- break;
- }
- tmp_dirent++;
- }
-
- pthread_mutex_unlock (&private->snaplist_lock);
-
-out:
- return dirent;
-}
-
-glfs_t *
-svs_initialise_snapshot_volume (xlator_t *this, const char *name)
-{
- svs_private_t *priv = NULL;
- int32_t ret = -1;
- snap_dirent_t *dirent = NULL;
- char volname[PATH_MAX] = {0, };
- glfs_t *fs = NULL;
- int loglevel = GF_LOG_INFO;
- char logfile[PATH_MAX] = {0, };
-
- GF_VALIDATE_OR_GOTO ("snapview-server", this, out);
- GF_VALIDATE_OR_GOTO (this->name, this->private, out);
- GF_VALIDATE_OR_GOTO (this->name, name, out);
-
- priv = this->private;
-
- dirent = svs_get_snap_dirent (this, name);
- if (!dirent) {
- gf_log (this->name, GF_LOG_ERROR, "snap entry for name %s "
- "not found", name);
- goto out;
- }
-
- if (dirent->fs) {
- ret = 0;
- fs = dirent->fs;
- goto out;
- }
-
- snprintf (volname, sizeof (volname), "/snaps/%s/%s",
- dirent->name, dirent->snap_volname);
-
- fs = glfs_new (volname);
- if (!fs) {
- gf_log (this->name, GF_LOG_ERROR,
- "glfs instance for snap volume %s "
- "failed", dirent->name);
- goto out;
- }
-
- ret = glfs_set_volfile_server (fs, "tcp", "localhost",
- 24007);
- if (ret) {
- gf_log (this->name, GF_LOG_ERROR, "setting the "
- "volfile srever for snap volume %s "
- "failed", dirent->name);
- goto out;
- }
-
- ret = glfs_init (fs);
- if (ret) {
- gf_log (this->name, GF_LOG_ERROR, "initing the "
- "fs for %s failed", dirent->name);
- goto out;
- }
-
- snprintf (logfile, sizeof (logfile),
- DEFAULT_SVD_LOG_FILE_DIRECTORY "/%s-%s.log",
- name, dirent->uuid);
-
- ret = glfs_set_logging(fs, logfile, loglevel);
- if (ret) {
- gf_log (this->name, GF_LOG_ERROR, "failed to set the "
- "log file path");
- goto out;
- }
-
- ret = 0;
-
-out:
- if (ret && fs) {
- glfs_fini (fs);
- fs = NULL;
- }
-
- if (fs)
- dirent->fs = fs;
-
- return fs;
-}
-
-snap_dirent_t *
-svs_get_latest_snap_entry (xlator_t *this)
-{
- svs_private_t *priv = NULL;
- snap_dirent_t *dirents = NULL;
- snap_dirent_t *dirent = NULL;
-
- GF_VALIDATE_OR_GOTO ("svs", this, out);
-
- priv = this->private;
-
- pthread_mutex_lock (&priv->snaplist_lock);
- dirents = priv->dirents;
- if (!dirents) {
- pthread_mutex_unlock (&priv->snaplist_lock);
- goto out;
- }
- if (priv->num_snaps)
- dirent = &dirents[priv->num_snaps - 1];
-
- pthread_mutex_unlock (&priv->snaplist_lock);
-out:
- return dirent;
-}
-
-glfs_t *
-svs_get_latest_snapshot (xlator_t *this)
-{
- glfs_t *fs = NULL;
- snap_dirent_t *dirent = NULL;
- svs_private_t *priv = NULL;
-
- GF_VALIDATE_OR_GOTO ("svs", this, out);
- priv = this->private;
-
- dirent = svs_get_latest_snap_entry (this);
-
- if (dirent) {
- pthread_mutex_lock (&priv->snaplist_lock);
- fs = dirent->fs;
- pthread_mutex_unlock (&priv->snaplist_lock);
- }
-
-out:
- return fs;
-}
int32_t
svs_lookup_entry_point (xlator_t *this, loc_t *loc, inode_t *parent,
@@ -2096,33 +1076,43 @@ svs_fill_readdir (xlator_t *this, gf_dirent_t *entries, size_t size, off_t off)
GF_ASSERT (priv);
/* create the dir entries */
- pthread_mutex_lock (&priv->snaplist_lock);
- dirents = priv->dirents;
+ LOCK (&priv->snaplist_lock);
+ {
+ dirents = priv->dirents;
- for (i = off; i < priv->num_snaps; ) {
- this_size = sizeof (gf_dirent_t) +
- strlen (dirents[i].name) + 1;
- if (this_size + filled_size > size )
- goto unlock;
+ for (i = off; i < priv->num_snaps; ) {
+ this_size = sizeof (gf_dirent_t) +
+ strlen (dirents[i].name) + 1;
+ if (this_size + filled_size > size )
+ goto unlock;
- entry = gf_dirent_for_name (dirents[i].name);
- if (!entry) {
- gf_log (this->name, GF_LOG_ERROR, "failed to allocate "
- "dentry for %s", dirents[i].name);
- goto unlock;
- }
+ entry = gf_dirent_for_name (dirents[i].name);
+ if (!entry) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "failed to allocate dentry for %s",
+ dirents[i].name);
+ goto unlock;
+ }
- entry->d_off = i + 1;
- entry->d_ino = i + 2*42;
- entry->d_type = DT_DIR;
- list_add_tail (&entry->list, &entries->list);
- ++i;
- count++;
- filled_size += this_size;
+ entry->d_off = i + 1;
+ /*
+ * readdir on the entry-point directory to the snapshot
+ * world, will return elements in the list of the
+ * snapshots as the directory entries. Since the entries
+ * returned are virtual entries which does not exist
+ * physically on the disk, pseudo inode numbers are
+ * generated.
+ */
+ entry->d_ino = i + 2*42;
+ entry->d_type = DT_DIR;
+ list_add_tail (&entry->list, &entries->list);
+ ++i;
+ count++;
+ filled_size += this_size;
+ }
}
-
unlock:
- pthread_mutex_unlock (&priv->snaplist_lock);
+ UNLOCK (&priv->snaplist_lock);
out:
return count;
@@ -2843,6 +1833,10 @@ svs_access (call_frame_t *frame, xlator_t *this, loc_t *loc, int mask,
is_fuse_call = __is_fuse_call (frame);
+ /*
+ * For entry-point directory, set read and execute bits. But not write
+ * permissions.
+ */
if (inode_ctx->type == SNAP_VIEW_ENTRY_POINT_INODE) {
if (is_fuse_call) {
op_ret = 0;
@@ -2859,6 +1853,10 @@ svs_access (call_frame_t *frame, xlator_t *this, loc_t *loc, int mask,
fs = inode_ctx->fs;
object = inode_ctx->object;
+ /* The actual posix_acl xlator does acl checks differently for
+ fuse and nfs. So set frame->root->pid as fspid of the syncop
+ if the call came from nfs
+ */
if (!is_fuse_call)
syncopctx_setfspid (&frame->root->pid);
@@ -2871,12 +1869,6 @@ svs_access (call_frame_t *frame, xlator_t *this, loc_t *loc, int mask,
goto out;
}
- /* The actual posix_acl xlator does acl checks differently for
- fuse and nfs. In this case how to send the information of
- whether the call came from fuse or nfs to the snapshot volume
- via gfapi?
- */
-
op_ret = 0;
op_errno = ret;
@@ -2926,35 +1918,31 @@ init (xlator_t *this)
this->private = priv;
GF_OPTION_INIT ("volname", priv->volname, str, out);
- pthread_mutex_init (&(priv->snaplist_lock), NULL);
-
- pthread_mutex_lock (&priv->snaplist_lock);
- priv->is_snaplist_done = 0;
- priv->num_snaps = 0;
- snap_worker_resume = _gf_false;
- pthread_mutex_unlock (&priv->snaplist_lock);
+ LOCK_INIT (&priv->snaplist_lock);
- /* get the list of snaps first to return to client xlator */
- ret = svs_get_snapshot_list (this);
- if (ret) {
- gf_log (this->name, GF_LOG_ERROR,
- "Error initializing snaplist infrastructure");
- ret = -1;
- goto out;
+ LOCK (&priv->snaplist_lock);
+ {
+ priv->num_snaps = 0;
}
+ UNLOCK (&priv->snaplist_lock);
- if ((ret = pthread_attr_init (&priv->thr_attr)) != 0) {
- gf_log (this->name, GF_LOG_ERROR, "pthread attr init failed");
+ /* What to do here upon failure? should init be failed or succeed? */
+ /* If succeeded, then dynamic management of snapshots will not */
+ /* happen.*/
+ ret = svs_mgmt_init (this);
+ if (ret) {
+ gf_log (this->name, GF_LOG_WARNING, "failed to initiate the "
+ "mgmt rpc callback for svs. Dymamic management of the"
+ "snapshots will not happen");
goto out;
}
- ret = gf_thread_create (&snap_thread,
- &priv->thr_attr,
- snaplist_worker,
- this);
+ /* get the list of snaps first to return to client xlator */
+ ret = svs_get_snapshot_list (this);
if (ret) {
gf_log (this->name, GF_LOG_ERROR,
- "Failed to create snaplist worker thread");
+ "Error initializing snaplist infrastructure");
+ ret = -1;
goto out;
}
@@ -2962,6 +1950,7 @@ init (xlator_t *this)
out:
if (ret && priv) {
+ LOCK_DESTROY (&priv->snaplist_lock);
GF_FREE (priv->dirents);
GF_FREE (priv);
}
@@ -2985,33 +1974,23 @@ fini (xlator_t *this)
"Invalid ctx found");
if (priv) {
- gf_timer_call_cancel (ctx, priv->snap_timer);
- priv->snap_timer = NULL;
- ret = pthread_mutex_destroy (&priv->snaplist_lock);
+ ret = LOCK_DESTROY (&priv->snaplist_lock);
if (ret != 0) {
gf_log (this->name, GF_LOG_WARNING,
"Could not destroy mutex snaplist_lock");
}
- ret = pthread_attr_destroy (&priv->thr_attr);
- if (ret != 0) {
- gf_log (this->name, GF_LOG_WARNING,
- "Could not destroy pthread attr");
- }
+
if (priv->dirents) {
GF_FREE (priv->dirents);
}
- GF_FREE (priv);
- }
- ret = pthread_mutex_destroy (&mutex);
- if (ret != 0) {
- gf_log (this->name, GF_LOG_WARNING,
- "Could not destroy mutex");
- }
- pthread_cond_destroy (&condvar);
- if (ret != 0) {
- gf_log (this->name, GF_LOG_WARNING,
- "Could not destroy condition variable");
+ if (priv->rpc) {
+ /* cleanup the saved-frames before last unref */
+ rpc_clnt_connection_cleanup (&priv->rpc->conn);
+ rpc_clnt_unref (priv->rpc);
+ }
+
+ GF_FREE (priv);
}
return;
diff --git a/xlators/features/snapview-server/src/snapview-server.h b/xlators/features/snapview-server/src/snapview-server.h
index 510599c3650..e689e4981a0 100644
--- a/xlators/features/snapview-server/src/snapview-server.h
+++ b/xlators/features/snapview-server/src/snapview-server.h
@@ -38,6 +38,8 @@
#include "timer.h"
#include "rpc-clnt.h"
#include "protocol-common.h"
+#include "xdr-generic.h"
+
#define DEFAULT_SVD_LOG_FILE_DIRECTORY DATADIR "/log/glusterfs"
@@ -51,6 +53,7 @@
STACK_DESTROY (((call_frame_t *)_frame)->root); \
} while (0)
+
int
svs_mgmt_submit_request (void *req, call_frame_t *frame,
glusterfs_ctx_t *ctx,
@@ -64,22 +67,6 @@ int
mgmt_get_snapinfo_cbk (struct rpc_req *req, struct iovec *iov,
int count, void *myframe);
-char *clnt_handshake_procs[GF_HNDSK_MAXVALUE] = {
- [GF_HNDSK_NULL] = "NULL",
- [GF_HNDSK_SETVOLUME] = "SETVOLUME",
- [GF_HNDSK_GETSPEC] = "GETSPEC",
- [GF_HNDSK_PING] = "PING",
- [GF_HNDSK_EVENT_NOTIFY] = "EVENTNOTIFY",
-};
-
-rpc_clnt_prog_t svs_clnt_handshake_prog = {
- .progname = "GlusterFS Handshake",
- .prognum = GLUSTER_HNDSK_PROGRAM,
- .progver = GLUSTER_HNDSK_VERSION,
- .procnames = clnt_handshake_procs,
-};
-
-
typedef enum {
SNAP_VIEW_ENTRY_POINT_INODE = 0,
SNAP_VIEW_VIRTUAL_INODE
@@ -116,17 +103,69 @@ struct svs_private {
int num_snaps;
char *volname;
struct list_head snaplist;
- pthread_mutex_t snaplist_lock;
- uint32_t is_snaplist_done;
- gf_timer_t *snap_timer;
- pthread_attr_t thr_attr;
+ gf_lock_t snaplist_lock;
+ struct rpc_clnt *rpc;
};
typedef struct svs_private svs_private_t;
+int
+__svs_inode_ctx_set (xlator_t *this, inode_t *inode, svs_inode_t *svs_inode);
+
+svs_inode_t *
+__svs_inode_ctx_get (xlator_t *this, inode_t *inode);
+
+svs_inode_t *
+svs_inode_ctx_get (xlator_t *this, inode_t *inode);
+
+int32_t
+svs_inode_ctx_set (xlator_t *this, inode_t *inode, svs_inode_t *svs_inode);
+
+svs_inode_t *
+svs_inode_new ();
+
+svs_inode_t *
+svs_inode_ctx_get_or_new (xlator_t *this, inode_t *inode);
+
+svs_fd_t *
+svs_fd_new ();
+
+int
+__svs_fd_ctx_set (xlator_t *this, fd_t *fd, svs_fd_t *svs_fd);
+
+svs_fd_t *
+__svs_fd_ctx_get (xlator_t *this, fd_t *fd);
+
+svs_fd_t *
+svs_fd_ctx_get (xlator_t *this, fd_t *fd);
+
+int32_t
+svs_fd_ctx_set (xlator_t *this, fd_t *fd, svs_fd_t *svs_fd);
+
+svs_fd_t *
+__svs_fd_ctx_get_or_new (xlator_t *this, fd_t *fd);
+
+svs_fd_t *
+svs_fd_ctx_get_or_new (xlator_t *this, fd_t *fd);
+
+void
+svs_fill_ino_from_gfid (struct iatt *buf);
+
+void
+svs_iatt_fill (uuid_t gfid, struct iatt *buf);
+
+snap_dirent_t *
+svs_get_latest_snap_entry (xlator_t *this);
+
glfs_t *
-svs_intialise_snapshot_volume (xlator_t *this, const char *name);
+svs_get_latest_snapshot (xlator_t *this);
+
+glfs_t *
+svs_initialise_snapshot_volume (xlator_t *this, const char *name);
snap_dirent_t *
svs_get_snap_dirent (xlator_t *this, const char *name);
+int
+svs_mgmt_init (xlator_t *this);
+
#endif /* __SNAP_VIEW_H__ */
diff --git a/xlators/mgmt/glusterd/src/glusterd-snapshot.c b/xlators/mgmt/glusterd/src/glusterd-snapshot.c
index 58e611d20dc..148acec8882 100644
--- a/xlators/mgmt/glusterd/src/glusterd-snapshot.c
+++ b/xlators/mgmt/glusterd/src/glusterd-snapshot.c
@@ -6259,6 +6259,7 @@ glusterd_snapshot_status_commit (dict_t *dict, char **op_errstr,
}
}
ret = 0;
+
out:
return ret;
}
@@ -7111,6 +7112,7 @@ glusterd_snapshot_postvalidate (dict_t *dict, int32_t op_ret, char **op_errstr,
"post-validation failed");
goto out;
}
+ glusterd_fetchsnap_notify (this);
break;
case GF_SNAP_OPTION_TYPE_DELETE:
if (op_ret) {
@@ -7128,6 +7130,7 @@ glusterd_snapshot_postvalidate (dict_t *dict, int32_t op_ret, char **op_errstr,
"update missed snaps list");
goto out;
}
+ glusterd_fetchsnap_notify (this);
break;
case GF_SNAP_OPTION_TYPE_RESTORE:
ret = glusterd_snapshot_update_snaps_post_validate (dict,
@@ -7146,6 +7149,7 @@ glusterd_snapshot_postvalidate (dict_t *dict, int32_t op_ret, char **op_errstr,
"perform snapshot restore post-op");
goto out;
}
+ glusterd_fetchsnap_notify (this);
break;
case GF_SNAP_OPTION_TYPE_ACTIVATE:
case GF_SNAP_OPTION_TYPE_DEACTIVATE:
diff --git a/xlators/mgmt/glusterd/src/glusterd.c b/xlators/mgmt/glusterd/src/glusterd.c
index 559b39d8d84..5a6e55ef39e 100644
--- a/xlators/mgmt/glusterd/src/glusterd.c
+++ b/xlators/mgmt/glusterd/src/glusterd.c
@@ -227,6 +227,7 @@ set:
out:
return 0;
}
+
int
glusterd_fetchspec_notify (xlator_t *this)
{
@@ -252,6 +253,42 @@ glusterd_fetchspec_notify (xlator_t *this)
}
int
+glusterd_fetchsnap_notify (xlator_t *this)
+{
+ int ret = -1;
+ glusterd_conf_t *priv = NULL;
+ rpc_transport_t *trans = NULL;
+
+ priv = this->private;
+
+ /*
+ * TODO: As of now, the identification of the rpc clients in the
+ * handshake protocol is not there. So among so many glusterfs processes
+ * registered with glusterd, it is hard to identify one particular
+ * process (in this particular case, the snap daemon). So the callback
+ * notification is sent to all the transports from the transport list.
+ * Only those processes which have a rpc client registered for this
+ * callback will respond to the notification. Once the identification
+ * of the rpc clients becomes possible, the below section can be changed
+ * to send callback notification to only those rpc clients, which have
+ * registered.
+ */
+ pthread_mutex_lock (&priv->xprt_lock);
+ {
+ list_for_each_entry (trans, &priv->xprt_list, list) {
+ rpcsvc_callback_submit (priv->rpc, trans,
+ &glusterd_cbk_prog,
+ GF_CBK_GET_SNAPS, NULL, 0);
+ }
+ }
+ pthread_mutex_unlock (&priv->xprt_lock);
+
+ ret = 0;
+
+ return ret;
+}
+
+int
glusterd_priv (xlator_t *this)
{
return 0;
diff --git a/xlators/mgmt/glusterd/src/glusterd.h b/xlators/mgmt/glusterd/src/glusterd.h
index f2f7a0a277c..9f26e7508ad 100644
--- a/xlators/mgmt/glusterd/src/glusterd.h
+++ b/xlators/mgmt/glusterd/src/glusterd.h
@@ -835,6 +835,9 @@ int
glusterd_fetchspec_notify (xlator_t *this);
int
+glusterd_fetchsnap_notify (xlator_t *this);
+
+int
glusterd_add_volume_detail_to_dict (glusterd_volinfo_t *volinfo,
dict_t *volumes, int count);