summaryrefslogtreecommitdiffstats
path: root/doc/examples
diff options
context:
space:
mode:
Diffstat (limited to 'doc/examples')
-rw-r--r--doc/examples/Makefile.am8
-rw-r--r--doc/examples/README13
-rw-r--r--doc/examples/filter.vol23
-rw-r--r--doc/examples/io-cache.vol25
-rw-r--r--doc/examples/io-threads.vol21
-rw-r--r--doc/examples/posix-locks.vol20
-rw-r--r--doc/examples/protocol-client.vol17
-rw-r--r--doc/examples/protocol-server.vol25
-rw-r--r--doc/examples/read-ahead.vol22
-rw-r--r--doc/examples/replicate.vol119
-rw-r--r--doc/examples/stripe.vol121
-rw-r--r--doc/examples/trace.vol16
-rw-r--r--doc/examples/trash.vol20
-rw-r--r--doc/examples/unify.vol178
-rw-r--r--doc/examples/write-behind.vol26
15 files changed, 0 insertions, 654 deletions
diff --git a/doc/examples/Makefile.am b/doc/examples/Makefile.am
deleted file mode 100644
index b4c93f4c9..000000000
--- a/doc/examples/Makefile.am
+++ /dev/null
@@ -1,8 +0,0 @@
-EXTRA = README unify.vol replicate.vol stripe.vol protocol-client.vol protocol-server.vol posix-locks.vol trash.vol write-behind.vol io-threads.vol io-cache.vol read-ahead.vol filter.vol trace.vol
-EXTRA_DIST = $(EXTRA)
-
-docdir = $(datadir)/doc/$(PACKAGE_NAME)
-Examplesdir = $(docdir)/examples
-Examples_DATA = $(EXTRA)
-
-CLEANFILES =
diff --git a/doc/examples/README b/doc/examples/README
deleted file mode 100644
index 4d472ac08..000000000
--- a/doc/examples/README
+++ /dev/null
@@ -1,13 +0,0 @@
-GlusterFS's translator feature is very flexible and there are quite a lot of ways one
-can configure their filesystem to behave like.
-
-Volume Specification is a way in which GlusterFS understands how it has to work, based
-on what is written there.
-
-Going through the following URLs may give you more idea about all these.
-
-* http://www.gluster.org/docs/index.php/GlusterFS
-* http://www.gluster.org/docs/index.php/GlusterFS_Volume_Specification
-* http://www.gluster.org/docs/index.php/GlusterFS_Translators
-
-Mail us any doubts, suggestions on 'gluster-devel(at)nongnu.org'
diff --git a/doc/examples/filter.vol b/doc/examples/filter.vol
deleted file mode 100644
index ca5c59837..000000000
--- a/doc/examples/filter.vol
+++ /dev/null
@@ -1,23 +0,0 @@
-volume client
- type protocol/client
- option transport-type tcp # for TCP/IP transport
- option remote-host 192.168.1.10 # IP address of the remote brick
- option remote-subvolume brick # name of the remote volume
-end-volume
-
-## In normal clustered storage type, any of the cluster translators can come here.
-#
-# Definition of other clients
-#
-# Definition of cluster translator (may be unify, afr, or unify over afr)
-#
-
-### 'Filter' translator is used on client side (or server side according to needs). This traslator makes all the below translators, (or say volumes) as read-only. Hence if one wants a 'read-only' filesystem, using filter as the top most volume will make it really fast as the fops are returned from this level itself.
-
-volume filter-ro
- type features/filter
- option root-squashing enable
-# option completely-read-only yes
-# translate-uid 1-99=0
- subvolumes client
-end-volume \ No newline at end of file
diff --git a/doc/examples/io-cache.vol b/doc/examples/io-cache.vol
deleted file mode 100644
index 5f3eca4c5..000000000
--- a/doc/examples/io-cache.vol
+++ /dev/null
@@ -1,25 +0,0 @@
-volume client
- type protocol/client
- option transport-type tcp # for TCP/IP transport
- option remote-host 192.168.1.10 # IP address of the remote brick
- option remote-subvolume brick # name of the remote volume
-end-volume
-
-## In normal clustered storage type, any of the cluster translators can come here.
-#
-# Definition of other clients
-#
-# Definition of cluster translator (may be unify, replicate, or unify over replicate)
-#
-
-### 'IO-Cache' translator is best used on client side when a filesystem has file which are not modified frequently but read several times. For example, while compiling a kernel, *.h files are read while compiling every *.c file, in these case, io-cache translator comes very handy, as it keeps the whole file content in the cache, and serves from the cache.
-# One can provide the priority of the cache too.
-
-volume ioc
- type performance/io-cache
- subvolumes client # In this example it is 'client' you may have to change it according to your spec file.
- option page-size 1MB # 128KB is default
- option cache-size 64MB # 32MB is default
- option force-revalidate-timeout 5 # 1second is default
- option priority *.html:2,*:1 # default is *:0
-end-volume
diff --git a/doc/examples/io-threads.vol b/doc/examples/io-threads.vol
deleted file mode 100644
index 9954724e1..000000000
--- a/doc/examples/io-threads.vol
+++ /dev/null
@@ -1,21 +0,0 @@
-
-volume brick
- type storage/posix # POSIX FS translator
- option directory /home/export # Export this directory
-end-volume
-
-### 'IO-threads' translator gives a threading behaviour to File I/O calls. All other normal fops are having default behaviour. Loading this on server side helps to reduce the contension of network. (Which is assumed as a GlusterFS hang).
-# One can load it in client side to reduce the latency involved in case of a slow network, when loaded below write-behind.
-volume iot
- type performance/io-threads
- subvolumes brick
- option thread-count 4 # default value is 1
-end-volume
-
-volume server
- type protocol/server
- subvolumes iot brick
- option transport-type tcp # For TCP/IP transport
- option auth.addr.brick.allow 192.168.* # Allow access to "brick" volume
- option auth.addr.iot.allow 192.168.* # Allow access to "p-locks" volume
-end-volume
diff --git a/doc/examples/posix-locks.vol b/doc/examples/posix-locks.vol
deleted file mode 100644
index b9c9e7a64..000000000
--- a/doc/examples/posix-locks.vol
+++ /dev/null
@@ -1,20 +0,0 @@
-
-volume brick
- type storage/posix # POSIX FS translator
- option directory /home/export # Export this directory
-end-volume
-
-### 'Posix-locks' feature should be added on the server side (as posix volume as subvolume) because it contains the actual file.
-volume p-locks
- type features/posix-locks
- subvolumes brick
- option mandatory on
-end-volume
-
-volume server
- type protocol/server
- subvolumes p-locks brick
- option transport-type tcp
- option auth.addr.brick.allow 192.168.* # Allow access to "brick" volume
- option auth.addr.p-locks.allow 192.168.* # Allow access to "p-locks" volume
-end-volume
diff --git a/doc/examples/protocol-client.vol b/doc/examples/protocol-client.vol
deleted file mode 100644
index 43c43e02d..000000000
--- a/doc/examples/protocol-client.vol
+++ /dev/null
@@ -1,17 +0,0 @@
-volume client
- type protocol/client
- option transport-type tcp # for TCP/IP transport
-# option transport-type ib-sdp # for Infiniband transport
- option remote-host 192.168.1.10 # IP address of the remote brick
-# option transport.socket.remote-port 24016
-
-# option transport-type ib-verbs # for Infiniband verbs transport
-# option transport.ib-verbs.work-request-send-size 1048576
-# option transport.ib-verbs.work-request-send-count 16
-# option transport.ib-verbs.work-request-recv-size 1048576
-# option transport.ib-verbs.work-request-recv-count 16
-# option transport.ib-verbs.remote-port 24016
-
- option remote-subvolume brick # name of the remote volume
-# option transport-timeout 30 # default value is 120seconds
-end-volume
diff --git a/doc/examples/protocol-server.vol b/doc/examples/protocol-server.vol
deleted file mode 100644
index e8e4a4643..000000000
--- a/doc/examples/protocol-server.vol
+++ /dev/null
@@ -1,25 +0,0 @@
-
-### Export volume "brick" with the contents of "/home/export" directory.
-volume brick
- type storage/posix # POSIX FS translator
- option directory /home/export # Export this directory
-end-volume
-
-### Add network serving capability to above brick.
-volume server
- type protocol/server
- option transport-type tcp # For TCP/IP transport
-# option transport.socket.listen-port 24016
-
-# option transport-type ib-verbs # For Infiniband Verbs transport
-# option transport.ib-verbs.work-request-send-size 131072
-# option transport.ib-verbs.work-request-send-count 64
-# option transport.ib-verbs.work-request-recv-size 131072
-# option transport.ib-verbs.work-request-recv-count 64
-# option transport.ib-verbs.listen-port 24016
-
-# option bind-address 192.168.1.10 # Default is to listen on all interfaces
-# option client-volume-filename /etc/glusterfs/glusterfs-client.vol
- subvolumes brick
- option auth.addr.brick.allow 192.168.* # Allow access to "brick" volume
-end-volume
diff --git a/doc/examples/read-ahead.vol b/doc/examples/read-ahead.vol
deleted file mode 100644
index 3ce0d95ac..000000000
--- a/doc/examples/read-ahead.vol
+++ /dev/null
@@ -1,22 +0,0 @@
-volume client
- type protocol/client
- option transport-type tcp # for TCP/IP transport
- option remote-host 192.168.1.10 # IP address of the remote brick
- option remote-subvolume brick # name of the remote volume
-end-volume
-
-## In normal clustered storage type, any of the cluster translators can come here.
-#
-# Definition of other clients
-#
-# Definition of cluster translator (may be unify, replicate, or unify over replicate)
-#
-
-### 'Read-Ahead' translator is best utilized on client side, as it prefetches the file contents when the first read() call is issued.
-volume ra
- type performance/read-ahead
- subvolumes client # In this example it is 'client' you may have to change it according to your spec file.
- option page-size 1MB # default is 256KB
- option page-count 4 # default is 2
- option force-atime-update no # defalut is 'no'
-end-volume
diff --git a/doc/examples/replicate.vol b/doc/examples/replicate.vol
deleted file mode 100644
index 333ba7de1..000000000
--- a/doc/examples/replicate.vol
+++ /dev/null
@@ -1,119 +0,0 @@
-### 'NOTE'
-# This file has both server spec and client spec to get an understanding of stripe's spec file. Hence can't be used as it is, as a GlusterFS spec file.
-# One need to seperate out server spec and client spec to get it working.
-
-#=========================================================================
-
-# **** server1 spec file ****
-
-### Export volume "brick" with the contents of "/home/export" directory.
-volume posix1
- type storage/posix # POSIX FS translator
- option directory /home/export1 # Export this directory
-end-volume
-
-### Add POSIX record locking support to the storage brick
-volume brick1
- type features/posix-locks
- option mandatory on # enables mandatory locking on all files
- subvolumes posix1
-end-volume
-
-### Add network serving capability to above brick.
-volume server
- type protocol/server
- option transport-type tcp # For TCP/IP transport
- option transport.socket.listen-port 24016
-# option client-volume-filename /etc/glusterfs/glusterfs-client.vol
- subvolumes brick1
- option auth.addr.brick1.allow * # access to "brick" volume
-end-volume
-
-
-#=========================================================================
-
-# **** server2 spec file ****
-volume posix2
- type storage/posix # POSIX FS translator
- option directory /home/export2 # Export this directory
-end-volume
-
-### Add POSIX record locking support to the storage brick
-volume brick2
- type features/posix-locks
- option mandatory on # enables mandatory locking on all files
- subvolumes posix2
-end-volume
-
-### Add network serving capability to above brick.
-volume server
- type protocol/server
- option transport-type tcp # For TCP/IP transport
- option transport.socket.listen-port 24017
- subvolumes brick2
- option auth.addr.brick2.allow * # Allow access to "brick" volume
-end-volume
-
-
-#=========================================================================
-
-# **** server3 spec file ****
-
-volume posix3
- type storage/posix # POSIX FS translator
- option directory /home/export3 # Export this directory
-end-volume
-
-### Add POSIX record locking support to the storage brick
-volume brick3
- type features/posix-locks
- option mandatory on # enables mandatory locking on all files
- subvolumes posix3
-end-volume
-
-### Add network serving capability to above brick.
-volume server
- type protocol/server
- option transport-type tcp # For TCP/IP transport
- option transport.socket.listen-port 24018
- subvolumes brick3
- option auth.addr.brick3.allow * # access to "brick" volume
-end-volume
-
-
-#=========================================================================
-
-# **** Clustered Client config file ****
-
-### Add client feature and attach to remote subvolume of server1
-volume client1
- type protocol/client
- option transport-type tcp # for TCP/IP transport
- option remote-host 127.0.0.1 # IP address of the remote brick
- option transport.socket.remote-port 24016
- option remote-subvolume brick1 # name of the remote volume
-end-volume
-
-### Add client feature and attach to remote subvolume of server2
-volume client2
- type protocol/client
- option transport-type tcp # for TCP/IP transport
- option remote-host 127.0.0.1 # IP address of the remote brick
- option transport.socket.remote-port 24017
- option remote-subvolume brick2 # name of the remote volume
-end-volume
-
-volume client3
- type protocol/client
- option transport-type tcp # for TCP/IP transport
- option remote-host 127.0.0.1 # IP address of the remote brick
- option transport.socket.remote-port 24018
- option remote-subvolume brick3 # name of the remote volume
-end-volume
-
-## Add replicate feature.
-volume replicate
- type cluster/replicate
- subvolumes client1 client2 client3
-end-volume
-
diff --git a/doc/examples/stripe.vol b/doc/examples/stripe.vol
deleted file mode 100644
index 6055b66b9..000000000
--- a/doc/examples/stripe.vol
+++ /dev/null
@@ -1,121 +0,0 @@
-
-### 'NOTE'
-# This file has both server spec and client spec to get an understanding of stripe's spec file. Hence can't be used as it is, as a GlusterFS spec file.
-# One need to seperate out server spec and client spec to get it working.
-
-#=========================================================================
-
-# **** server1 spec file ****
-
-### Export volume "brick" with the contents of "/home/export" directory.
-volume posix1
- type storage/posix # POSIX FS translator
- option directory /home/export1 # Export this directory
-end-volume
-
-### Add POSIX record locking support to the storage brick
-volume brick1
- type features/posix-locks
- option mandatory on # enables mandatory locking on all files
- subvolumes posix1
-end-volume
-
-### Add network serving capability to above brick.
-volume server
- type protocol/server
- option transport-type tcp # For TCP/IP transport
- option transport.socket.listen-port 24016
-# option client-volume-filename /etc/glusterfs/glusterfs-client.vol
- subvolumes brick1
- option auth.addr.brick1.allow * # access to "brick" volume
-end-volume
-
-
-#=========================================================================
-
-# **** server2 spec file ****
-volume posix2
- type storage/posix # POSIX FS translator
- option directory /home/export2 # Export this directory
-end-volume
-
-### Add POSIX record locking support to the storage brick
-volume brick2
- type features/posix-locks
- option mandatory on # enables mandatory locking on all files
- subvolumes posix2
-end-volume
-
-### Add network serving capability to above brick.
-volume server
- type protocol/server
- option transport-type tcp # For TCP/IP transport
- option transport.socket.listen-port 24017
- subvolumes brick2
- option auth.addr.brick2.allow * # Allow access to "brick" volume
-end-volume
-
-
-#=========================================================================
-
-# **** server3 spec file ****
-
-volume posix3
- type storage/posix # POSIX FS translator
- option directory /home/export3 # Export this directory
-end-volume
-
-### Add POSIX record locking support to the storage brick
-volume brick3
- type features/posix-locks
- option mandatory on # enables mandatory locking on all files
- subvolumes posix3
-end-volume
-
-### Add network serving capability to above brick.
-volume server
- type protocol/server
- option transport-type tcp # For TCP/IP transport
- option transport.socket.listen-port 24018
- subvolumes brick3
- option auth.addr.brick3.allow * # access to "brick" volume
-end-volume
-
-
-#=========================================================================
-
-# **** Clustered Client config file ****
-
-### Add client feature and attach to remote subvolume of server1
-volume client1
- type protocol/client
- option transport-type tcp # for TCP/IP transport
- option remote-host 127.0.0.1 # IP address of the remote brick
- option transport.socket.remote-port 24016
- option remote-subvolume brick1 # name of the remote volume
-end-volume
-
-### Add client feature and attach to remote subvolume of server2
-volume client2
- type protocol/client
- option transport-type tcp # for TCP/IP transport
- option remote-host 127.0.0.1 # IP address of the remote brick
- option transport.socket.remote-port 24017
- option remote-subvolume brick2 # name of the remote volume
-end-volume
-
-volume client3
- type protocol/client
- option transport-type tcp # for TCP/IP transport
- option remote-host 127.0.0.1 # IP address of the remote brick
- option transport.socket.remote-port 24018
- option remote-subvolume brick3 # name of the remote volume
-end-volume
-
-## Add Stripe Feature.
-volume stripe
- type cluster/stripe
- subvolumes client1 client2 client3
- option block-size 1MB
-end-volume
-
diff --git a/doc/examples/trace.vol b/doc/examples/trace.vol
deleted file mode 100644
index 3f4864db4..000000000
--- a/doc/examples/trace.vol
+++ /dev/null
@@ -1,16 +0,0 @@
-volume client
- type protocol/client
- option transport-type tcp # for TCP/IP transport
- option remote-host 192.168.1.10 # IP address of the remote brick
- option remote-subvolume brick # name of the remote volume
-end-volume
-
-### 'Trace' translator is a very handy debug tool for GlusterFS, as it can be loaded between any of the two volumes without changing the behaviour of the filesystem.
-# On client side it can be the top most volume in spec (like now) to understand what calls are made on FUSE filesystem, when a mounted filesystem is accessed.
-
-volume trace
- type debug/trace
- subvolumes client
-end-volume
-
-# 'NOTE:' By loading 'debug/trace' translator, filesystem will be very slow as it logs each and every calls to the log file.
diff --git a/doc/examples/trash.vol b/doc/examples/trash.vol
deleted file mode 100644
index 16e71be32..000000000
--- a/doc/examples/trash.vol
+++ /dev/null
@@ -1,20 +0,0 @@
-
-volume brick
- type storage/posix # POSIX FS translator
- option directory /home/export # Export this directory
-end-volume
-
-### 'Trash' translator is best used on server side as it just renames the deleted file inside 'trash-dir', and it makes 4 seperate fops for one unlink call.
-volume trashcan
- type features/trash
- subvolumes brick
- option trash-dir /.trashcan
-end-volume
-
-volume server
- type protocol/server
- subvolumes trashcan brick
- option transport-type tcp # For TCP/IP transport
- option auth.addr.brick.allow 192.168.* # Allow access to "brick" volume
- option auth.addr.trashcan.allow 192.168.* # Allow access to "p-locks" volume
-end-volume
diff --git a/doc/examples/unify.vol b/doc/examples/unify.vol
deleted file mode 100644
index 3fb7e8320..000000000
--- a/doc/examples/unify.vol
+++ /dev/null
@@ -1,178 +0,0 @@
-### 'NOTE'
-# This file has both server spec and client spec to get an understanding of stripe's spec file. Hence can't be used as it is, as a GlusterFS spec file.
-# One need to seperate out server spec and client spec to get it working.
-
-
-#=========================================================================
-
-# **** server1 spec file ****
-
-### Export volume "brick" with the contents of "/home/export" directory.
-volume posix1
- type storage/posix # POSIX FS translator
- option directory /home/export1 # Export this directory
-end-volume
-
-### Add POSIX record locking support to the storage brick
-volume brick1
- type features/posix-locks
- option mandatory on # enables mandatory locking on all files
- subvolumes posix1
-end-volume
-
-### Add network serving capability to above brick.
-volume server
- type protocol/server
- option transport-type tcp # For TCP/IP transport
- option transport.socket.listen-port 24016
-# option client-volume-filename /etc/glusterfs/glusterfs-client.vol
- subvolumes brick1
- option auth.addr.brick1.allow * # access to "brick" volume
-end-volume
-
-
-#=========================================================================
-
-# **** server2 spec file ****
-volume posix2
- type storage/posix # POSIX FS translator
- option directory /home/export2 # Export this directory
-end-volume
-
-### Add POSIX record locking support to the storage brick
-volume brick2
- type features/posix-locks
- option mandatory on # enables mandatory locking on all files
- subvolumes posix2
-end-volume
-
-### Add network serving capability to above brick.
-volume server
- type protocol/server
- option transport-type tcp # For TCP/IP transport
- option transport.socket.listen-port 24017
- subvolumes brick2
- option auth.addr.brick2.allow * # Allow access to "brick" volume
-end-volume
-
-
-#=========================================================================
-
-# **** server3 spec file ****
-
-volume posix3
- type storage/posix # POSIX FS translator
- option directory /home/export3 # Export this directory
-end-volume
-
-### Add POSIX record locking support to the storage brick
-volume brick3
- type features/posix-locks
- option mandatory on # enables mandatory locking on all files
- subvolumes posix3
-end-volume
-
-### Add network serving capability to above brick.
-volume server
- type protocol/server
- option transport-type tcp # For TCP/IP transport
- option transport.socket.listen-port 24018
- subvolumes brick3
- option auth.addr.brick3.allow * # access to "brick" volume
-end-volume
-
-#=========================================================================
-
-# *** server for namespace ***
-### Export volume "brick" with the contents of "/home/export" directory.
-volume brick-ns
- type storage/posix # POSIX FS translator
- option directory /home/export-ns # Export this directory
-end-volume
-
-volume server
- type protocol/server
- option transport-type tcp # For TCP/IP transport
- option transport.socket.listen-port 24019
- subvolumes brick-ns
- option auth.addr.brick-ns.allow * # access to "brick" volume
-end-volume
-
-
-#=========================================================================
-
-# **** Clustered Client config file ****
-
-### Add client feature and attach to remote subvolume of server1
-volume client1
- type protocol/client
- option transport-type tcp # for TCP/IP transport
-# option transport-type ib-sdp # for Infiniband transport
- option remote-host 127.0.0.1 # IP address of the remote brick
- option transport.socket.remote-port 24016
- option remote-subvolume brick1 # name of the remote volume
-end-volume
-
-### Add client feature and attach to remote subvolume of server2
-volume client2
- type protocol/client
- option transport-type tcp # for TCP/IP transport
-# option transport-type ib-sdp # for Infiniband transport
- option remote-host 127.0.0.1 # IP address of the remote brick
- option transport.socket.remote-port 24017
- option remote-subvolume brick2 # name of the remote volume
-end-volume
-
-volume client3
- type protocol/client
- option transport-type tcp # for TCP/IP transport
-# option transport-type ib-sdp # for Infiniband transport
- option remote-host 127.0.0.1 # IP address of the remote brick
- option transport.socket.remote-port 24018
- option remote-subvolume brick3 # name of the remote volume
-end-volume
-
-
-volume client-ns
- type protocol/client
- option transport-type tcp # for TCP/IP transport
-# option transport-type ib-sdp # for Infiniband transport
- option remote-host 127.0.0.1 # IP address of the remote brick
- option transport.socket.remote-port 24019
- option remote-subvolume brick-ns # name of the remote volume
-end-volume
-
-### Add unify feature to cluster the servers. Associate an
-### appropriate scheduler that matches your I/O demand.
-volume bricks
- type cluster/unify
- option namespace client-ns # this will not be storage child of unify.
- subvolumes client1 client2 client3
-### ** ALU Scheduler Option **
- option self-heal background # foreground off # default is foreground
- option scheduler alu
- option alu.limits.min-free-disk 5% #%
- option alu.limits.max-open-files 10000
- option alu.order disk-usage:read-usage:write-usage:open-files-usage:disk-speed-usage
- option alu.disk-usage.entry-threshold 2GB
- option alu.disk-usage.exit-threshold 128MB
- option alu.open-files-usage.entry-threshold 1024
- option alu.open-files-usage.exit-threshold 32
- option alu.read-usage.entry-threshold 20 #%
- option alu.read-usage.exit-threshold 4 #%
- option alu.write-usage.entry-threshold 20 #%
- option alu.write-usage.exit-threshold 4 #%
- option alu.disk-speed-usage.entry-threshold 0 # DO NOT SET IT. SPEED IS CONSTANT!!!.
- option alu.disk-speed-usage.exit-threshold 0 # DO NOT SET IT. SPEED IS CONSTANT!!!.
- option alu.stat-refresh.interval 10sec
- option alu.stat-refresh.num-file-create 10
-### ** Random Scheduler **
-# option scheduler random
-### ** NUFA Scheduler **
-# option scheduler nufa
-# option nufa.local-volume-name posix1
-### ** Round Robin (RR) Scheduler **
-# option scheduler rr
-# option rr.limits.min-free-disk 5% #%
-end-volume
-
diff --git a/doc/examples/write-behind.vol b/doc/examples/write-behind.vol
deleted file mode 100644
index 9c6bae11c..000000000
--- a/doc/examples/write-behind.vol
+++ /dev/null
@@ -1,26 +0,0 @@
-volume client
- type protocol/client
- option transport-type tcp # for TCP/IP transport
- option remote-host 192.168.1.10 # IP address of the remote brick
- option remote-subvolume brick # name of the remote volume
-end-volume
-
-## In normal clustered storage type, any of the cluster translators can come here.
-#
-# Definition of other clients
-#
-# Definition of cluster translator (may be unify, replicate, or unify over replicate)
-#
-
-
-### 'Write-behind' translator is a performance booster for write operation. Best used on client side, as its main intension is to reduce the network latency caused for each write operation.
-
-volume wb
- type performance/write-behind
- subvolumes client # In this example it is 'client' you may have to change it according to your spec file.
- option flush-behind on # default value is 'off'
- option window-size 2MB
- option aggregate-size 1MB # default value is 0
- option enable_O_SYNC no # default is no
- option disable-for-first-nbytes 128KB #default is 1
-end-volume