summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorHumble Devassy Chirammal <hchiramm@redhat.com>2015-06-08 20:35:38 +0530
committerHumble Devassy Chirammal <hchiramm@redhat.com>2015-06-09 14:52:10 +0530
commit5be489089407fc410c7157e39c73c6eb910696b8 (patch)
tree467354ebc4f56e930a0b7d639bd49264b004ee9a
parenta2a370db6db80e9365d0777701786ce706957f42 (diff)
doc: Remove doc directories
At present gluster documentation is available at http://gluster.readthedocs.org/en/latest/ and the source project is https://github.com/gluster/glusterdocs Here onwards the patches has to be send against glusterdocs project in git hub repo. For more details refer# http://www.gluster.org/pipermail/gluster-users/2015-May/022065.html Change-Id: I6d7d20d34ca4ee36356f0dc67204f28350dbf94c BUG: 1206539 Signed-off-by: Humble Devassy Chirammal <hchiramm@redhat.com>
-rw-r--r--doc/README3
-rw-r--r--doc/admin-guide/en-US/images/640px-GlusterFS_Architecture.pngbin97477 -> 0 bytes
-rw-r--r--doc/admin-guide/en-US/images/Distributed_Replicated_Volume.pngbin62929 -> 0 bytes
-rw-r--r--doc/admin-guide/en-US/images/Distributed_Striped_Replicated_Volume.pngbin57210 -> 0 bytes
-rw-r--r--doc/admin-guide/en-US/images/Distributed_Striped_Volume.pngbin53781 -> 0 bytes
-rw-r--r--doc/admin-guide/en-US/images/Distributed_Volume.pngbin47211 -> 0 bytes
-rw-r--r--doc/admin-guide/en-US/images/Geo-Rep03_Internet.pngbin131824 -> 0 bytes
-rw-r--r--doc/admin-guide/en-US/images/Geo-Rep04_Cascading.pngbin187341 -> 0 bytes
-rw-r--r--doc/admin-guide/en-US/images/Geo-Rep_LAN.pngbin163417 -> 0 bytes
-rw-r--r--doc/admin-guide/en-US/images/Geo-Rep_WAN.pngbin96291 -> 0 bytes
-rw-r--r--doc/admin-guide/en-US/images/GlusterFS_Architecture.pngbin133597 -> 0 bytes
-rw-r--r--doc/admin-guide/en-US/images/Hadoop_Architecture.pngbin43815 -> 0 bytes
-rw-r--r--doc/admin-guide/en-US/images/Replicated_Volume.pngbin44077 -> 0 bytes
-rw-r--r--doc/admin-guide/en-US/images/Striped_Replicated_Volume.pngbin62113 -> 0 bytes
-rw-r--r--doc/admin-guide/en-US/images/Striped_Volume.pngbin43316 -> 0 bytes
-rw-r--r--doc/admin-guide/en-US/images/UFO_Architecture.pngbin72139 -> 0 bytes
-rw-r--r--doc/admin-guide/en-US/images/VSA_Architecture.pngbin38875 -> 0 bytes
-rw-r--r--doc/admin-guide/en-US/images/icon.svg19
-rw-r--r--doc/admin-guide/en-US/markdown/.gitignore2
-rw-r--r--doc/admin-guide/en-US/markdown/admin_ACLs.md216
-rw-r--r--doc/admin-guide/en-US/markdown/admin_Hadoop.md31
-rw-r--r--doc/admin-guide/en-US/markdown/admin_console.md50
-rw-r--r--doc/admin-guide/en-US/markdown/admin_directory_Quota.md219
-rw-r--r--doc/admin-guide/en-US/markdown/admin_distributed_geo_rep.md264
-rw-r--r--doc/admin-guide/en-US/markdown/admin_geo-replication.md681
-rw-r--r--doc/admin-guide/en-US/markdown/admin_logging.md56
-rw-r--r--doc/admin-guide/en-US/markdown/admin_managing_snapshots.md316
-rw-r--r--doc/admin-guide/en-US/markdown/admin_managing_volumes.md770
-rw-r--r--doc/admin-guide/en-US/markdown/admin_monitoring_workload.md893
-rw-r--r--doc/admin-guide/en-US/markdown/admin_object_storage.md26
-rw-r--r--doc/admin-guide/en-US/markdown/admin_puppet.md499
-rw-r--r--doc/admin-guide/en-US/markdown/admin_rdma_transport.md70
-rw-r--r--doc/admin-guide/en-US/markdown/admin_setting_volumes.md674
-rw-r--r--doc/admin-guide/en-US/markdown/admin_settingup_clients.md600
-rw-r--r--doc/admin-guide/en-US/markdown/admin_ssl.md128
-rw-r--r--doc/admin-guide/en-US/markdown/admin_start_stop_daemon.md58
-rw-r--r--doc/admin-guide/en-US/markdown/admin_storage_pools.md91
-rw-r--r--doc/admin-guide/en-US/markdown/admin_troubleshooting.md495
-rw-r--r--doc/admin-guide/en-US/markdown/did-you-know.md36
-rw-r--r--doc/admin-guide/en-US/markdown/glossary.md300
-rw-r--r--doc/admin-guide/en-US/markdown/glusterfs_introduction.md63
-rwxr-xr-xdoc/admin-guide/en-US/markdown/pdfgen.sh16
-rw-r--r--doc/debugging/gfid-to-path.md73
-rw-r--r--doc/debugging/split-brain.md251
-rw-r--r--doc/debugging/statedump.md389
-rw-r--r--doc/features/afr-arbiter-volumes.md53
-rw-r--r--doc/features/afr-statistics.md142
-rw-r--r--doc/features/afr-v1.md340
-rw-r--r--doc/features/bit-rot/00-INDEX8
-rw-r--r--doc/features/bit-rot/bitrot-docs.txt5
-rw-r--r--doc/features/bit-rot/memory-usage.txt48
-rw-r--r--doc/features/bit-rot/object-versioning.txt236
-rw-r--r--doc/features/brick-failure-detection.md67
-rw-r--r--doc/features/dht.md223
-rw-r--r--doc/features/file-snapshot.md91
-rw-r--r--doc/features/geo-replication/distributed-geo-rep.md71
-rw-r--r--doc/features/geo-replication/libgfchangelog.md119
-rw-r--r--doc/features/gfid-access.md73
-rw-r--r--doc/features/glusterfs_nfs-ganesha_integration.md123
-rw-r--r--doc/features/heal-info-and-split-brain-resolution.md459
-rw-r--r--doc/features/libgfapi.md381
-rw-r--r--doc/features/mount_gluster_volume_using_pnfs.md56
-rw-r--r--doc/features/nufa.md20
-rw-r--r--doc/features/ovirt-integration.md106
-rw-r--r--doc/features/qemu-integration.md231
-rw-r--r--doc/features/quota/quota-object-count.md47
-rw-r--r--doc/features/quota/quota-scalability.md52
-rw-r--r--doc/features/rdmacm.md26
-rw-r--r--doc/features/readdir-ahead.md14
-rw-r--r--doc/features/rebalance.md74
-rw-r--r--doc/features/server-quorum.md44
-rw-r--r--doc/features/shard.md68
-rw-r--r--doc/features/tier/tier.md168
-rw-r--r--doc/features/trash.md80
-rw-r--r--doc/features/upcall.md33
-rw-r--r--doc/features/worm.md75
-rw-r--r--doc/features/zerofill.md26
-rw-r--r--doc/legacy/Makefile.am3
-rw-r--r--doc/legacy/advanced-stripe.odgbin12648 -> 0 bytes
-rw-r--r--doc/legacy/advanced-stripe.pdfbin13382 -> 0 bytes
-rw-r--r--doc/legacy/authentication.txt112
-rw-r--r--doc/legacy/booster.txt54
-rw-r--r--doc/legacy/colonO-icon.jpgbin779 -> 0 bytes
-rw-r--r--doc/legacy/errno.list.bsd.txt376
-rw-r--r--doc/legacy/errno.list.linux.txt1586
-rw-r--r--doc/legacy/errno.list.macosx.txt1513
-rw-r--r--doc/legacy/errno.list.solaris.txt206
-rw-r--r--doc/legacy/fdl.texi454
-rw-r--r--doc/legacy/fuse.odgbin13190 -> 0 bytes
-rw-r--r--doc/legacy/fuse.pdfbin14948 -> 0 bytes
-rw-r--r--doc/legacy/get_put_api_using_xattr.txt22
-rw-r--r--doc/legacy/ha.odgbin37290 -> 0 bytes
-rw-r--r--doc/legacy/ha.pdfbin19403 -> 0 bytes
-rw-r--r--doc/legacy/hacker-guide/Makefile.am8
-rw-r--r--doc/legacy/hacker-guide/call-stub.txt1033
-rw-r--r--doc/legacy/hacker-guide/hacker-guide.tex309
-rw-r--r--doc/legacy/hacker-guide/replicate.txt206
-rw-r--r--doc/legacy/handling-options.txt13
-rw-r--r--doc/legacy/mac-related-xattrs.txt21
-rw-r--r--doc/legacy/porting_guide.txt45
-rw-r--r--doc/legacy/replicate.lyx797
-rw-r--r--doc/legacy/replicate.pdfbin109057 -> 0 bytes
-rw-r--r--doc/legacy/solaris-related-xattrs.txt44
-rw-r--r--doc/legacy/stat-prefetch-design.txt154
-rw-r--r--doc/legacy/stripe.odgbin10188 -> 0 bytes
-rw-r--r--doc/legacy/stripe.pdfbin11941 -> 0 bytes
-rw-r--r--doc/legacy/translator-options.txt224
-rw-r--r--doc/legacy/unify.odgbin12955 -> 0 bytes
-rw-r--r--doc/legacy/unify.pdfbin18969 -> 0 bytes
-rw-r--r--doc/legacy/user-guide.info2697
-rw-r--r--doc/legacy/user-guide.pdfbin353986 -> 0 bytes
-rw-r--r--doc/legacy/user-guide.texi2246
-rw-r--r--doc/legacy/xlator.odgbin12169 -> 0 bytes
-rw-r--r--doc/legacy/xlator.pdfbin14358 -> 0 bytes
-rw-r--r--doc/tools/gfind_missing_files.md67
-rw-r--r--doc/tools/glusterfind.md148
-rw-r--r--doc/upgrade-guide/upgrade_to_3.5.md2
-rw-r--r--doc/upgrade-guide/upgrade_to_3.6.md2
-rw-r--r--doc/xlators/meta.md206
119 files changed, 3 insertions, 23393 deletions
diff --git a/doc/README b/doc/README
new file mode 100644
index 00000000000..42fca27323d
--- /dev/null
+++ b/doc/README
@@ -0,0 +1,3 @@
+Gluster Documentation : http://gluster.readthedocs.org/en/latest/
+
+Send documentation patches to : https://github.com/gluster/glusterdocs/
diff --git a/doc/admin-guide/en-US/images/640px-GlusterFS_Architecture.png b/doc/admin-guide/en-US/images/640px-GlusterFS_Architecture.png
deleted file mode 100644
index 95f89ec8286..00000000000
--- a/doc/admin-guide/en-US/images/640px-GlusterFS_Architecture.png
+++ /dev/null
Binary files differ
diff --git a/doc/admin-guide/en-US/images/Distributed_Replicated_Volume.png b/doc/admin-guide/en-US/images/Distributed_Replicated_Volume.png
deleted file mode 100644
index 22daecdb903..00000000000
--- a/doc/admin-guide/en-US/images/Distributed_Replicated_Volume.png
+++ /dev/null
Binary files differ
diff --git a/doc/admin-guide/en-US/images/Distributed_Striped_Replicated_Volume.png b/doc/admin-guide/en-US/images/Distributed_Striped_Replicated_Volume.png
deleted file mode 100644
index d286fa99e94..00000000000
--- a/doc/admin-guide/en-US/images/Distributed_Striped_Replicated_Volume.png
+++ /dev/null
Binary files differ
diff --git a/doc/admin-guide/en-US/images/Distributed_Striped_Volume.png b/doc/admin-guide/en-US/images/Distributed_Striped_Volume.png
deleted file mode 100644
index 752fa982fa6..00000000000
--- a/doc/admin-guide/en-US/images/Distributed_Striped_Volume.png
+++ /dev/null
Binary files differ
diff --git a/doc/admin-guide/en-US/images/Distributed_Volume.png b/doc/admin-guide/en-US/images/Distributed_Volume.png
deleted file mode 100644
index 4386ca935b9..00000000000
--- a/doc/admin-guide/en-US/images/Distributed_Volume.png
+++ /dev/null
Binary files differ
diff --git a/doc/admin-guide/en-US/images/Geo-Rep03_Internet.png b/doc/admin-guide/en-US/images/Geo-Rep03_Internet.png
deleted file mode 100644
index 3cd0eaded02..00000000000
--- a/doc/admin-guide/en-US/images/Geo-Rep03_Internet.png
+++ /dev/null
Binary files differ
diff --git a/doc/admin-guide/en-US/images/Geo-Rep04_Cascading.png b/doc/admin-guide/en-US/images/Geo-Rep04_Cascading.png
deleted file mode 100644
index 54bf9f05cff..00000000000
--- a/doc/admin-guide/en-US/images/Geo-Rep04_Cascading.png
+++ /dev/null
Binary files differ
diff --git a/doc/admin-guide/en-US/images/Geo-Rep_LAN.png b/doc/admin-guide/en-US/images/Geo-Rep_LAN.png
deleted file mode 100644
index a74f6dbb50a..00000000000
--- a/doc/admin-guide/en-US/images/Geo-Rep_LAN.png
+++ /dev/null
Binary files differ
diff --git a/doc/admin-guide/en-US/images/Geo-Rep_WAN.png b/doc/admin-guide/en-US/images/Geo-Rep_WAN.png
deleted file mode 100644
index d72d72768bc..00000000000
--- a/doc/admin-guide/en-US/images/Geo-Rep_WAN.png
+++ /dev/null
Binary files differ
diff --git a/doc/admin-guide/en-US/images/GlusterFS_Architecture.png b/doc/admin-guide/en-US/images/GlusterFS_Architecture.png
deleted file mode 100644
index b506db1f4e7..00000000000
--- a/doc/admin-guide/en-US/images/GlusterFS_Architecture.png
+++ /dev/null
Binary files differ
diff --git a/doc/admin-guide/en-US/images/Hadoop_Architecture.png b/doc/admin-guide/en-US/images/Hadoop_Architecture.png
deleted file mode 100644
index 8725bd330bb..00000000000
--- a/doc/admin-guide/en-US/images/Hadoop_Architecture.png
+++ /dev/null
Binary files differ
diff --git a/doc/admin-guide/en-US/images/Replicated_Volume.png b/doc/admin-guide/en-US/images/Replicated_Volume.png
deleted file mode 100644
index 135a63f345a..00000000000
--- a/doc/admin-guide/en-US/images/Replicated_Volume.png
+++ /dev/null
Binary files differ
diff --git a/doc/admin-guide/en-US/images/Striped_Replicated_Volume.png b/doc/admin-guide/en-US/images/Striped_Replicated_Volume.png
deleted file mode 100644
index adf1f8465eb..00000000000
--- a/doc/admin-guide/en-US/images/Striped_Replicated_Volume.png
+++ /dev/null
Binary files differ
diff --git a/doc/admin-guide/en-US/images/Striped_Volume.png b/doc/admin-guide/en-US/images/Striped_Volume.png
deleted file mode 100644
index 63a84b242ab..00000000000
--- a/doc/admin-guide/en-US/images/Striped_Volume.png
+++ /dev/null
Binary files differ
diff --git a/doc/admin-guide/en-US/images/UFO_Architecture.png b/doc/admin-guide/en-US/images/UFO_Architecture.png
deleted file mode 100644
index be85d7b2825..00000000000
--- a/doc/admin-guide/en-US/images/UFO_Architecture.png
+++ /dev/null
Binary files differ
diff --git a/doc/admin-guide/en-US/images/VSA_Architecture.png b/doc/admin-guide/en-US/images/VSA_Architecture.png
deleted file mode 100644
index c3ab80cf3e8..00000000000
--- a/doc/admin-guide/en-US/images/VSA_Architecture.png
+++ /dev/null
Binary files differ
diff --git a/doc/admin-guide/en-US/images/icon.svg b/doc/admin-guide/en-US/images/icon.svg
deleted file mode 100644
index b2f16d0f61d..00000000000
--- a/doc/admin-guide/en-US/images/icon.svg
+++ /dev/null
@@ -1,19 +0,0 @@
-<?xml version="1.0" encoding="UTF-8" standalone="no"?>
-<svg xmlns:svg="http://www.w3.org/2000/svg" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" version="1.0" width="32" height="32" id="svg3017">
- <defs id="defs3019">
- <linearGradient id="linearGradient2381">
- <stop id="stop2383" style="stop-color:#ffffff;stop-opacity:1" offset="0"/>
- <stop id="stop2385" style="stop-color:#ffffff;stop-opacity:0" offset="1"/>
- </linearGradient>
- <linearGradient x1="296.4996" y1="188.81061" x2="317.32471" y2="209.69398" id="linearGradient2371" xlink:href="#linearGradient2381" gradientUnits="userSpaceOnUse" gradientTransform="matrix(0.90776,0,0,0.90776,24.35648,49.24131)"/>
- </defs>
- <g transform="matrix(0.437808,-0.437808,0.437808,0.437808,-220.8237,43.55311)" id="g5089">
- <path d="m 8.4382985,-6.28125 c -0.6073916,0 -4.3132985,5.94886271 -4.3132985,8.25 l 0,26.71875 c 0,0.846384 0.5818159,1.125 1.15625,1.125 l 25.5625,0 c 0.632342,0 1.125001,-0.492658 1.125,-1.125 l 0,-5.21875 0.28125,0 c 0.49684,0 0.906249,-0.409411 0.90625,-0.90625 l 0,-27.9375 c 0,-0.4968398 -0.40941,-0.90625 -0.90625,-0.90625 l -23.8117015,0 z" transform="translate(282.8327,227.1903)" id="path5091" style="fill:#5c5c4f;stroke:#000000;stroke-width:3.23021388;stroke-miterlimit:4;stroke-dasharray:none"/>
- <rect width="27.85074" height="29.369793" rx="1.1414107" ry="1.1414107" x="286.96509" y="227.63805" id="rect5093" style="fill:#032c87"/>
- <path d="m 288.43262,225.43675 25.2418,0 0,29.3698 -26.37615,0.0241 1.13435,-29.39394 z" id="rect5095" style="fill:#ffffff"/>
- <path d="m 302.44536,251.73726 c 1.38691,7.85917 -0.69311,11.28365 -0.69311,11.28365 2.24384,-1.60762 3.96426,-3.47694 4.90522,-5.736 0.96708,2.19264 1.83294,4.42866 4.27443,5.98941 0,0 -1.59504,-7.2004 -1.71143,-11.53706 l -6.77511,0 z" id="path5097" style="fill:#a70000;fill-opacity:1;stroke-width:2"/>
- <rect width="25.241802" height="29.736675" rx="0.89682275" ry="0.89682275" x="290.73544" y="220.92249" id="rect5099" style="fill:#809cc9"/>
- <path d="m 576.47347,725.93939 6.37084,0.41502 0.4069,29.51809 c -1.89202,-1.31785 -6.85427,-3.7608 -8.26232,-1.68101 l 0,-26.76752 c 0,-0.82246 0.66212,-1.48458 1.48458,-1.48458 z" transform="matrix(0.499065,-0.866565,0,1,0,0)" id="rect5101" style="fill:#4573b3;fill-opacity:1"/>
- <path d="m 293.2599,221.89363 20.73918,0 c 0.45101,0 0.8141,0.3631 0.8141,0.81411 0.21547,6.32836 -19.36824,21.7635 -22.36739,17.59717 l 0,-17.59717 c 0,-0.45101 0.3631,-0.81411 0.81411,-0.81411 z" id="path5103" style="opacity:0.65536726;fill:url(#linearGradient2371);fill-opacity:1"/>
- </g>
-</svg>
diff --git a/doc/admin-guide/en-US/markdown/.gitignore b/doc/admin-guide/en-US/markdown/.gitignore
deleted file mode 100644
index 9eed460045f..00000000000
--- a/doc/admin-guide/en-US/markdown/.gitignore
+++ /dev/null
@@ -1,2 +0,0 @@
-output/*.pdf
-
diff --git a/doc/admin-guide/en-US/markdown/admin_ACLs.md b/doc/admin-guide/en-US/markdown/admin_ACLs.md
deleted file mode 100644
index ebae7f71887..00000000000
--- a/doc/admin-guide/en-US/markdown/admin_ACLs.md
+++ /dev/null
@@ -1,216 +0,0 @@
-#POSIX Access Control Lists
-
-POSIX Access Control Lists (ACLs) allows you to assign different
-permissions for different users or groups even though they do not
-correspond to the original owner or the owning group.
-
-For example: User john creates a file but does not want to allow anyone
-to do anything with this file, except another user, antony (even though
-there are other users that belong to the group john).
-
-This means, in addition to the file owner, the file group, and others,
-additional users and groups can be granted or denied access by using
-POSIX ACLs.
-
-##Activating POSIX ACLs Support
-
-To use POSIX ACLs for a file or directory, the partition of the file or
-directory must be mounted with POSIX ACLs support.
-
-###Activating POSIX ACLs Support on Sever
-
-To mount the backend export directories for POSIX ACLs support, use the
-following command:
-
-`# mount -o acl `
-
-For example:
-
-`# mount -o acl /dev/sda1 /export1 `
-
-Alternatively, if the partition is listed in the /etc/fstab file, add
-the following entry for the partition to include the POSIX ACLs option:
-
-`LABEL=/work /export1 ext3 rw, acl 14 `
-
-###Activating POSIX ACLs Support on Client
-
-To mount the glusterfs volumes for POSIX ACLs support, use the following
-command:
-
-`# mount –t glusterfs -o acl `
-
-For example:
-
-`# mount -t glusterfs -o acl 198.192.198.234:glustervolume /mnt/gluster`
-
-##Setting POSIX ACLs
-
-You can set two types of POSIX ACLs, that is, access ACLs and default
-ACLs. You can use access ACLs to grant permission for a specific file or
-directory. You can use default ACLs only on a directory but if a file
-inside that directory does not have an ACLs, it inherits the permissions
-of the default ACLs of the directory.
-
-You can set ACLs for per user, per group, for users not in the user
-group for the file, and via the effective right mask.
-
-##Setting Access ACLs
-
-You can apply access ACLs to grant permission for both files and
-directories.
-
-**To set or modify Access ACLs**
-
-You can set or modify access ACLs use the following command:
-
-`# setfacl –m file `
-
-The ACL entry types are the POSIX ACLs representations of owner, group,
-and other.
-
-Permissions must be a combination of the characters `r` (read), `w`
-(write), and `x` (execute). You must specify the ACL entry in the
-following format and can specify multiple entry types separated by
-commas.
-
- ACL Entry | Description
- --- | ---
- u:uid:\<permission\> | Sets the access ACLs for a user. You can specify user name or UID
- g:gid:\<permission\> | Sets the access ACLs for a group. You can specify group name or GID.
- m:\<permission\> | Sets the effective rights mask. The mask is the combination of all access permissions of the owning group and all of the user and group entries.
- o:\<permission\> | Sets the access ACLs for users other than the ones in the group for the file.
-
-If a file or directory already has an POSIX ACLs, and the setfacl
-command is used, the additional permissions are added to the existing
-POSIX ACLs or the existing rule is modified.
-
-For example, to give read and write permissions to user antony:
-
-`# setfacl -m u:antony:rw /mnt/gluster/data/testfile `
-
-##Setting Default ACLs
-
-You can apply default ACLs only to directories. They determine the
-permissions of a file system objects that inherits from its parent
-directory when it is created.
-
-To set default ACLs
-
-You can set default ACLs for files and directories using the following
-command:
-
-`# setfacl –m –-set `
-
-Permissions must be a combination of the characters r (read), w (write), and x (execute). Specify the ACL entry_type as described below, separating multiple entry types with commas.
-
-u:*user_name:permissons*
- Sets the access ACLs for a user. Specify the user name, or the UID.
-
-g:*group_name:permissions*
- Sets the access ACLs for a group. Specify the group name, or the GID.
-
-m:*permission*
- Sets the effective rights mask. The mask is the combination of all access permissions of the owning group, and all user and group entries.
-
-o:*permissions*
- Sets the access ACLs for users other than the ones in the group for the file.
-
-For example, to set the default ACLs for the /data directory to read for
-users not in the user group:
-
-`# setfacl –m --set o::r /mnt/gluster/data `
-
-> **Note**
->
-> An access ACLs set for an individual file can override the default
-> ACLs permissions.
-
-**Effects of a Default ACLs**
-
-The following are the ways in which the permissions of a directory's
-default ACLs are passed to the files and subdirectories in it:
-
-- A subdirectory inherits the default ACLs of the parent directory
- both as its default ACLs and as an access ACLs.
-- A file inherits the default ACLs as its access ACLs.
-
-##Retrieving POSIX ACLs
-
-You can view the existing POSIX ACLs for a file or directory.
-
-**To view existing POSIX ACLs**
-
-- View the existing access ACLs of a file using the following command:
-
- `# getfacl `
-
- For example, to view the existing POSIX ACLs for sample.jpg
-
- # getfacl /mnt/gluster/data/test/sample.jpg
- # owner: antony
- # group: antony
- user::rw-
- group::rw-
- other::r--
-
-- View the default ACLs of a directory using the following command:
-
- `# getfacl `
-
- For example, to view the existing ACLs for /data/doc
-
- # getfacl /mnt/gluster/data/doc
- # owner: antony
- # group: antony
- user::rw-
- user:john:r--
- group::r--
- mask::r--
- other::r--
- default:user::rwx
- default:user:antony:rwx
- default:group::r-x
- default:mask::rwx
- default:other::r-x
-
-##Removing POSIX ACLs
-
-To remove all the permissions for a user, groups, or others, use the
-following command:
-
-`# setfacl -x `
-
-####setfaclentry_type Options
-
-The ACL entry_type translates to the POSIX ACL representations of owner, group, and other.
-
-Permissions must be a combination of the characters r (read), w (write), and x (execute). Specify the ACL entry_type as described below, separating multiple entry types with commas.
-
-u:*user_name*
- Sets the access ACLs for a user. Specify the user name, or the UID.
-
-g:*group_name*
- Sets the access ACLs for a group. Specify the group name, or the GID.
-
-m:*permission*
- Sets the effective rights mask. The mask is the combination of all access permissions of the owning group, and all user and group entries.
-
-o:*permissions*
- Sets the access ACLs for users other than the ones in the group for the file.
-
-For example, to remove all permissions from the user antony:
-
-`# setfacl -x u:antony /mnt/gluster/data/test-file`
-
-##Samba and ACLs
-
-If you are using Samba to access GlusterFS FUSE mount, then POSIX ACLs
-are enabled by default. Samba has been compiled with the
-`--with-acl-support` option, so no special flags are required when
-accessing or mounting a Samba share.
-
-##NFS and ACLs
-
-Currently GlusterFS supports POSIX ACL configuration through NFS mount,
-i.e. setfacl and getfacl commands work through NFS mount.
diff --git a/doc/admin-guide/en-US/markdown/admin_Hadoop.md b/doc/admin-guide/en-US/markdown/admin_Hadoop.md
deleted file mode 100644
index 1f5e8d4ae49..00000000000
--- a/doc/admin-guide/en-US/markdown/admin_Hadoop.md
+++ /dev/null
@@ -1,31 +0,0 @@
-#Managing Hadoop Compatible Storage
-
-GlusterFS provides compatibility for Apache Hadoop and it uses the
-standard file system APIs available in Hadoop to provide a new storage
-option for Hadoop deployments. Existing MapReduce based applications can
-use GlusterFS seamlessly. This new functionality opens up data within
-Hadoop deployments to any file-based or object-based application.
-
-##Advantages
-
-The following are the advantages of Hadoop Compatible Storage with
-GlusterFS:
-
-- Provides simultaneous file-based and object-based access within
- Hadoop.
-- Eliminates the centralized metadata server.
-- Provides compatibility with MapReduce applications and rewrite is
- not required.
-- Provides a fault tolerant file system.
-
-###Pre-requisites
-
-The following are the pre-requisites to install Hadoop Compatible
-Storage :
-
-- Java Runtime Environment
-- getfattr - command line utility
-
-##Installing, and Configuring Hadoop Compatible Storage
-
-See the detailed instruction set at https://forge.gluster.org/hadoop/pages/ConfiguringHadoop2
diff --git a/doc/admin-guide/en-US/markdown/admin_console.md b/doc/admin-guide/en-US/markdown/admin_console.md
deleted file mode 100644
index 126b7e2064f..00000000000
--- a/doc/admin-guide/en-US/markdown/admin_console.md
+++ /dev/null
@@ -1,50 +0,0 @@
-##Using the Gluster Console Manager – Command Line Utility
-
-The Gluster Console Manager is a single command line utility that
-simplifies configuration and management of your storage environment. The
-Gluster Console Manager is similar to the LVM (Logical Volume Manager)
-CLI or ZFS Command Line Interface, but across multiple storage servers.
-You can use the Gluster Console Manager online, while volumes are
-mounted and active. Gluster automatically synchronizes volume
-configuration information across all Gluster servers.
-
-Using the Gluster Console Manager, you can create new volumes, start
-volumes, and stop volumes, as required. You can also add bricks to
-volumes, remove bricks from existing volumes, as well as change
-translator settings, among other operations.
-
-You can also use the commands to create scripts for automation, as well
-as use the commands as an API to allow integration with third-party
-applications.
-
-###Running the Gluster Console Manager
-
-You can run the Gluster Console Manager on any GlusterFS server either
-by invoking the commands or by running the Gluster CLI in interactive
-mode. You can also use the gluster command remotely using SSH.
-
-- To run commands directly:
-
- ` # gluster peer `
-
- For example:
-
- ` # gluster peer status `
-
-- To run the Gluster Console Manager in interactive mode
-
- `# gluster`
-
- You can execute gluster commands from the Console Manager prompt:
-
- ` gluster> `
-
- For example, to view the status of the peer server:
-
- \# `gluster `
-
- `gluster > peer status `
-
- Display the status of the peer.
-
-
diff --git a/doc/admin-guide/en-US/markdown/admin_directory_Quota.md b/doc/admin-guide/en-US/markdown/admin_directory_Quota.md
deleted file mode 100644
index 402ac5e4fcc..00000000000
--- a/doc/admin-guide/en-US/markdown/admin_directory_Quota.md
+++ /dev/null
@@ -1,219 +0,0 @@
-#Managing Directory Quota
-
-Directory quotas in GlusterFS allows you to set limits on usage of the disk
-space by directories or volumes. The storage administrators can control
-the disk space utilization at the directory and/or volume levels in
-GlusterFS by setting limits to allocatable disk space at any level in
-the volume and directory hierarchy. This is particularly useful in cloud
-deployments to facilitate utility billing model.
-
-> **Note**
-> For now, only Hard limits are supported. Here, the limit cannot be
-> exceeded and attempts to use more disk space or inodes beyond the set
-> limit is denied.
-
-System administrators can also monitor the resource utilization to limit
-the storage for the users depending on their role in the organization.
-
-You can set the quota at the following levels:
-
-- **Directory level** – limits the usage at the directory level
-- **Volume level** – limits the usage at the volume level
-
-> **Note**
-> You can set the disk limit on the directory even if it is not created.
-> The disk limit is enforced immediately after creating that directory.
-
-##Enabling Quota
-
-You must enable Quota to set disk limits.
-
-**To enable quota:**
-
-- Use the following command to enable quota:
-
- # gluster volume quota enable
-
- For example, to enable quota on the test-volume:
-
- # gluster volume quota test-volume enable
- Quota is enabled on /test-volume
-
-##Disabling Quota
-
-You can disable Quota, if needed.
-
-**To disable quota:**
-
-- Use the following command to disable quota:
-
- # gluster volume quota disable
-
- For example, to disable quota translator on the test-volume:
-
- # gluster volume quota test-volume disable
- Quota translator is disabled on /test-volume
-
-##Setting or Replacing Disk Limit
-
-You can create new directories in your storage environment and set the
-disk limit or set disk limit for the existing directories. The directory
-name should be relative to the volume with the export directory/mount
-being treated as "/".
-
-**To set or replace disk limit:**
-
-- Set the disk limit using the following command:
-
- # gluster volume quota limit-usage /
-
- For example, to set limit on data directory on the test-volume where
- data is a directory under the export directory:
-
- # gluster volume quota test-volume limit-usage /data 10GB
- Usage limit has been set on /data
-
- > **Note**
- > In a multi-level directory hierarchy, the strictest disk limit
- > will be considered for enforcement.
-
-##Displaying Disk Limit Information
-
-You can display disk limit information on all the directories on which
-the limit is set.
-
-**To display disk limit information:**
-
-- Display disk limit information of all the directories on which limit
- is set, using the following command:
-
- # gluster volume quota list
-
- For example, to see the set disks limit on the test-volume:
-
- # gluster volume quota test-volume list
- /Test/data 10 GB 6 GB
- /Test/data1 10 GB 4 GB
-
-- Display disk limit information on a particular directory on which
- limit is set, using the following command:
-
- # gluster volume quota list
-
- For example, to view the set limit on /data directory of test-volume:
-
- # gluster volume quota test-volume list /data
- /Test/data 10 GB 6 GB
-
-###Displaying Quota Limit Information Using the df Utility
-
-You can create a report of the disk usage using the df utility by taking quota limits into consideration. To generate a report, run the following command:
-
- # gluster volume set VOLNAME quota-deem-statfs on
-
-In this case, the total disk space of the directory is taken as the quota hard limit set on the directory of the volume.
-
->**Note**
->The default value for quota-deem-statfs is off. However, it is recommended to set quota-deem-statfs to on.
-
-The following example displays the disk usage when quota-deem-statfs is off:
-
- # gluster volume set test-volume features.quota-deem-statfs off
- volume set: success
- # gluster volume quota test-volume list
- Path Hard-limit Soft-limit Used Available
- -----------------------------------------------------------
- / 300.0GB 90% 11.5GB 288.5GB
- /John/Downloads 77.0GB 75% 11.5GB 65.5GB
-
-Disk usage for volume test-volume as seen on client1:
-
- # df -hT /home
- Filesystem Type Size Used Avail Use% Mounted on
- server1:/test-volume fuse.glusterfs 400G 12G 389G 3% /home
-
-The following example displays the disk usage when quota-deem-statfs is on:
-
- # gluster volume set test-volume features.quota-deem-statfs on
- volume set: success
- # gluster vol quota test-volume list
- Path Hard-limit Soft-limit Used Available
- -----------------------------------------------------------
- / 300.0GB 90% 11.5GB 288.5GB
- /John/Downloads 77.0GB 75% 11.5GB 65.5GB
-
-Disk usage for volume test-volume as seen on client1:
-
- # df -hT /home
- Filesystem Type Size Used Avail Use% Mounted on
- server1:/test-volume fuse.glusterfs 300G 12G 289G 4% /home
-
-The quota-deem-statfs option when set to on, allows the administrator to make the user view the total disk space available on the directory as the hard limit set on it.
-
-##Updating Memory Cache Size
-
-### Setting Timeout
-
-For performance reasons, quota caches the directory sizes on client. You
-can set timeout indicating the maximum valid duration of directory sizes
-in cache, from the time they are populated.
-
-For example: If there are multiple clients writing to a single
-directory, there are chances that some other client might write till the
-quota limit is exceeded. However, this new file-size may not get
-reflected in the client till size entry in cache has become stale
-because of timeout. If writes happen on this client during this
-duration, they are allowed even though they would lead to exceeding of
-quota-limits, since size in cache is not in sync with the actual size.
-When timeout happens, the size in cache is updated from servers and will
-be in sync and no further writes will be allowed. A timeout of zero will
-force fetching of directory sizes from server for every operation that
-modifies file data and will effectively disables directory size caching
-on client side.
-
-**To update the memory cache size:**
-
-- Use the following command to update the memory cache size:
-
- # gluster volume set features.quota-timeout
-
- For example, to update the memory cache size for every 5 seconds on
- test-volume:
-
- # gluster volume set test-volume features.quota-timeout 5
- Set volume successful
-
-##Setting Alert Time
-
-Alert time is the frequency at which you want your usage information to be logged after you reach the soft limit.
-
-**To set the alert time:**
-
-- Use the following command to set the alert time:
-
- # gluster volume quota VOLNAME alert-time time
-
- >**Note**
- >
- >The default alert-time is one week.
-
- For example, to set the alert time to one day:
-
- # gluster volume quota test-volume alert-time 1d
- volume quota : success
-
-##Removing Disk Limit
-
-You can remove set disk limit, if you do not want quota anymore.
-
-**To remove disk limit:**
-
-- Use the following command to remove the disk limit set on a particular directory:
-
- # gluster volume quota remove
-
- For example, to remove the disk limit on /data directory of
- test-volume:
-
- # gluster volume quota test-volume remove /data
- Usage limit set on /data is removed
diff --git a/doc/admin-guide/en-US/markdown/admin_distributed_geo_rep.md b/doc/admin-guide/en-US/markdown/admin_distributed_geo_rep.md
deleted file mode 100644
index 38c1f6725b8..00000000000
--- a/doc/admin-guide/en-US/markdown/admin_distributed_geo_rep.md
+++ /dev/null
@@ -1,264 +0,0 @@
-# Distributed Geo-Replication in glusterfs-3.5
-
-This is a admin how-to guide for new dustributed-geo-replication being released as part of glusterfs-3.5
-
-##### Note:
-This article is targeted towards users/admins who want to try new geo-replication, without going much deeper into internals and technology used.
-
-### How is it different from earlier geo-replication?
-
-- Up until now, in geo-replication, only one of the nodes in master volume would participate in geo-replication. This meant that all the data syncing is taken care by only one node while other nodes in the cluster would sit idle (not participate in data syncing). With distributed-geo-replication, each node of the master volume takes the repsonsibility of syncing the data present in that node. In case of replicate configuration, one of them would 'Active'ly sync the data while other node of the replica pair would be 'Passive'. The 'Passive' node only becomes 'Active' when the 'Active' pair goes down. This way new geo-rep leverages all the nodes in the volume and remove the bottleneck of syncing from one single node.
-- New change detection mechanism is the other thing which has been improved with new geo-rep. So far geo-rep used to crawl through glusterfs file system to figure out the files that need to synced. And because crawling filesystem can be an expensive operation, this used to be a major bottleneck for performance. With distributed geo-rep, all the files that need to be synced are identified through changelog xlator. Changelog xlator journals all the fops that modifes the file and these journals are then consumed by geo-rep to effectively identify the files that need to be synced.
-- A new syncing method tar+ssh, has been introduced to improve the performance of few specific data sets. You can switch between rsync and tar+ssh syncing method via CLI to suite your data set needs. This tar+ssh is better suited for data sets which have large number of small files.
-
-
-### Using Distributed geo-replication:
-
-#### Prerequisites:
-- There should be a password-less ssh setup between at least one node in master volume to one node in slave volume. The geo-rep create command should be executed from this node which has password-less ssh setup to slave.
-
-- Unlike previous version, slave **must** be a gluster volume. Slave can not be a directory. And both the master and slave volumes should have been created and started before creating geo-rep session.
-
-#### Creating secret pem pub file
-- Execute the below command from the node where you setup the password-less ssh to slave. This will create the secret pem pub file which would have information of RSA key of all the nodes in the master volume. And when geo-rep create command is executed, glusterd uses this file to establish a geo-rep specific ssh connections
-```sh
-gluster system:: execute gsec_create
-```
-
-#### Creating geo-replication session.
-Create a geo-rep session between master and slave volume using the following command. The node in which this command is executed and the <slave_host> specified in the command should have password less ssh setup between them. The push-pem option actually uses the secret pem pub file created earlier and establishes geo-rep specific password less ssh between each node in master to each node of slave.
-```sh
-gluster volume geo-replication <master_volume> <slave_host>::<slave_volume> create push-pem [force]
-```
-
-If the total available size in slave volume is less than the total size of master, the command will throw error message. In such cases 'force' option can be used.
-
-In use cases where the rsa-keys of nodes in master volume is distributed to slave nodes through an external agent and slave side verifications like:
-- if ssh port 22 is open in slave
-- has proper passwordless ssh login setup
-- slave volume is created and is empty
-- if slave has enough memory
-is taken care by the external agent, the following command can be used to create geo-replication:
-```sh
-gluster volume geo-replication <master_volume> <slave_host>::<slave_volume> create no-verify [force]
-```
-In this case the master node rsa-key distribution to slave node does not happen and above mentioned slave verification is not performed and these two things has to be taken care externaly.
-
-### Creating Non Root Geo-replication session
-
-`mountbroker` is a new service of glusterd. This service allows an
-unprivileged process to own a GlusterFS mount by registering a label
-(and DSL (Domain-specific language) options ) with glusterd through a
-glusterd volfile. Using CLI, you can send a mount request to glusterd to
-receive an alias (symlink) of the mounted volume.
-
-A request from the agent, the unprivileged slave agents use the
-mountbroker service of glusterd to set up an auxiliary gluster mount for
-the agent in a special environment which ensures that the agent is only
-allowed to access with special parameters that provide administrative
-level access to the particular volume.
-
-**To setup an auxiliary gluster mount for the agent**:
-
-1. In all Slave nodes, Create a new group. For example, `geogroup`
-
-2. In all Slave nodes, Create a unprivileged account. For example, ` geoaccount`. Make it a member of ` geogroup`
-
-3. In all Slave nodes, Create a new directory owned by root and with permissions *0711.* For example, create mountbroker-root directory `/var/mountbroker-root`
-
-4. In any one of Slave node, Run the following commands to add options to glusterd vol file(`/etc/glusterfs/glusterd.vol`
- in rpm installations and `/usr/local/etc/glusterfs/glusterd.vol` in Source installation.
-
- gluster system:: execute mountbroker opt mountbroker-root /var/mountbroker-root
- gluster system:: execute mountbroker opt geo-replication-log-group geogroup
- gluster system:: execute mountbroker opt rpc-auth-allow-insecure on
-
-5. In any one of Slave node, Add Mountbroker user to glusterd vol file using,
-
- ```sh
- gluster system:: execute mountbroker user geoaccount slavevol
- ```
-
-Where `slavevol` is Slave Volume name.
-
-If you host multiple slave volumes on Slave, for each of them and add the following options to the volfile using
-
- ```sh
- gluster system:: execute mountbroker user geoaccount2 slavevol2
- gluster system:: execute mountbroker user geoaccount3 slavevol3
- ```
-
-To add multiple volumes per mountbroker user,
-
- ```sh
- gluster system:: execute mountbroker user geoaccount1 slavevol11,slavevol12,slavevol13
- gluster system:: execute mountbroker user geoaccount2 slavevol21,slavevol22
- gluster system:: execute mountbroker user geoaccount3 slavevol31
- ```
-
-6. Restart `glusterd` service on all the Slave nodes
-
-7. Setup a passwdless SSH from one of the master node to the user on one of the slave node. For example, to geoaccount.
-
-8. Create a geo-replication relationship between master and slave to the user by running the following command on the master node:
- For example,
-
- ```sh
- gluster volume geo-replication <master_volume> <mountbroker_user>@<slave_host>::<slave_volume> create push-pem [force]
- ```
-
-9. In the slavenode, which is used to create relationship, run `/usr/libexec/glusterfs/set_geo_rep_pem_keys.sh` as a root with user name, master volume name, and slave volume names as the arguments.
-
- ```sh
- /usr/libexec/glusterfs/set_geo_rep_pem_keys.sh <mountbroker_user> <master_volume> <slave_volume>
- ```
-
-### Create and mount meta volume
-NOTE:
-___
-If shared meta volume is already created and mounted at '/var/run/gluster/shared_storage'
-as part of nfs or snapshot, please jump into section 'Configure meta volume with goe-replication'.
-___
-
-A 3-way replicated common gluster meta-volume should be configured and is shared
-by nfs, snapshot and geo-replication. The name of the meta-volume should be
-'gluster_shared_storage' and should be mounted at '/var/run/gluster/shared_storage/'.
-
-The meta volume needs to be configured with geo-replication to better handle
-rename and other consistency issues in geo-replication during brick/node down
-scenarios when master volume is configured with EC(Erasure Code)/AFR.
-Following are the steps to configure meta volume
-
-Create a 3 way replicated meta volume in the master cluster with all three bricks from different nodes as follows.
-
- ```sh
- gluster volume create gluster_shared_storage replica 3 <host1>:<brick_path> <host2>:<brick_path> <host3>:<brick_path>
- ```
-
-Start the meta volume as follows.
-
- ```sh
- gluster volume start <meta_vol>
- ```
-
-Mount the meta volume as follows in all the master nodes.
- ```sh
- mount -t glusterfs <master_host>:gluster_shared_storage /var/run/gluster/shared_storage
- ```
-
-###Configure meta volume with geo-replication session as follows.
-
- ```sh
- gluster volume geo-replication <master_volume> <slave_host>::<slave_volume> config use_meta_volume true
- # If Mountbroker Setup,
- gluster volume geo-replication <master_volume> <mountbroker_user>@<slave_host>::<slave_volume> config use_meta_volume true
- ```
-
-#### Starting a geo-rep session
-There is no change in this command from previous versions to this version.
-
- ```sh
- gluster volume geo-replication <master_volume> <slave_host>::<slave_volume> start
- # If Mountbroker Setup,
- gluster volume geo-replication <master_volume> <mountbroker_user>@<slave_host>::<slave_volume> start
- ```
-
-This command actually starts the session. Meaning the gsyncd monitor process will be started, which in turn spawns gsync worker processes whenever required. This also turns on changelog xlator (if not in ON state already), which starts recording all the changes on each of the glusterfs bricks. And if master is empty during geo-rep start, the change detection mechanism will be changelog. Else it’ll be xsync (the changes are identified by crawling through filesystem). Later when the initial data is syned to slave, change detection mechanism will be set to changelog
-
-#### Status of geo-replication
-
-gluster now has variants of status command.
-
- ```sh
- gluster volume geo-replication <master_volume> <slave_host>::<slave_volume> status
- # If Mountbroker Setup,
- gluster volume geo-replication <master_volume> <mountbroker_user>@<slave_host>::<slave_volume> status
- ```
-
-This displays the status of session from each brick of the master to each brick of the slave node.
-
-If you want more detailed status, then run 'status detail'
-
- ```sh
- gluster volume geo-replication <master_volume> <slave_host>::<slave_volume> status detail
- # If Mountbroker Setup,
- gluster volume geo-replication <master_volume> <mountbroker_user>@<slave_host>::<slave_volume> status detail
- ```
-
-This command displays extra information like, total files synced, files that needs to be synced, deletes pending etc.
-
-#### Stopping geo-replication session
-
-This command stops all geo-rep relates processes i.e. gsyncd monitor and works processes. Note that changelog will **not** be turned off with this command.
-
- ```sh
- gluster volume geo-replication <master_volume> <slave_host>::<slave_volume> stop [force]
- # If Mountbroker Setup,
- gluster volume geo-replication <master_volume> <mountbroker_user>@<slave_host>::<slave_volume> stop [force]
- ```
-
-Force option is to be used, when one of the node (or glusterd in one of the node) is down. Once stopped, the session can be restarted any time. Note that upon restarting of the session, the change detection mechanism falls back to xsync mode. This happens even though you have changelog generating journals, while the geo-rep session is stopped.
-
-#### Deleting geo-replication session
-
-Now you can delete the glusterfs geo-rep session. This will delete all the config data associated with the geo-rep session.
-
- ```sh
- gluster volume geo-replication <master_volume> <slave_host>::<slave_volume> delete
- # If Mountbroker Setup,
- gluster volume geo-replication <master_volume> <mountbroker_user>@<slave_host>::<slave_volume> delete
- ```
-
-This deletes all the gsync conf files in each of the nodes. This returns failure, if any of the node is down. And unlike geo-rep stop, there is 'force' option with this.
-
-#### Changing the config values
-
-There are some configuration values which can be changed using the CLI. And you can see all the current config values with following command.
-
- ```sh
- gluster volume geo-replication <master_volume> <slave_host>::<slave_volume> config
- # If Mountbroker Setup,
- gluster volume geo-replication <master_volume> <mountbroker_user>@<slave_host>::<slave_volume> config
- ```
-
-But you can check only one of them, like log_file or change-detector
-
- ```sh
- gluster volume geo-replication <master_volume> <slave_host>::<slave_volume> config log-file
- # If Mountbroker Setup,
- gluster volume geo-replication <master_volume> <mountbroker_user>@<slave_host>::<slave_volume> config log-file
-
- gluster volume geo-replication <master_volume> <slave_host>::<slave_volume> config change-detector
- # If Mountbroker Setup,
- gluster volume geo-replication <master_volume> <mountbroker_user>@<slave_host>::<slave_volume> config change-detector
-
- gluster volume geo-replication <master_volume> <slave_host>::<slave_volume> config working-dir
- # If Mountbroker Setup,
- gluster volume geo-replication <master_volume> <mountbroker_user>@<slave_host>::<slave_volume> config working-dir
- ```
-
-To set a new value to this, just provide a new value. Note that, not all the config values are allowed to change. Some can not be modified.
-
- ```sh
- gluster volume geo-replication <master_volume> <slave_host>::<slave_volume> config change-detector xsync
- # If Mountbroker Setup,
- gluster volume geo-replication <master_volume> <mountbroker_user>@<slave_host>::<slave_volume> config change-detector xsync
- ```
-
-Make sure you provide the proper value to the config value. And if you have large number of small files data set, then you can use tar+ssh as syncing method. Note that, if geo-rep session is running, this restarts the gsyncd.
-
- ```sh
- gluster volume geo-replication <master_volume> <slave_host>::<slave_volume> config use-tarssh true
- # If Mountbroker Setup,
- gluster volume geo-replication <master_volume> <mountbroker_user>@<slave_host>::<slave_volume> config use-tarssh true
- ```
-
-Resetting these value to default is also simple.
-
- ```sh
- gluster volume geo-replication <master_volume> <slave_host>::<slave_volume> config \!use-tarssh
- # If Mountbroker Setup,
- gluster volume geo-replication <master_volume> <mountbroker_user>@<slave_host>::<slave_volume> config \!use-tarssh
- ```
-
-That makes the config key (tar-ssh in this case) to fall back to it's default value.
diff --git a/doc/admin-guide/en-US/markdown/admin_geo-replication.md b/doc/admin-guide/en-US/markdown/admin_geo-replication.md
deleted file mode 100644
index 6b1f5c6df93..00000000000
--- a/doc/admin-guide/en-US/markdown/admin_geo-replication.md
+++ /dev/null
@@ -1,681 +0,0 @@
-#Managing Geo-replication
-
-Geo-replication provides a continuous, asynchronous, and incremental
-replication service from one site to another over Local Area Networks
-(LANs), Wide Area Network (WANs), and across the Internet.
-
-Geo-replication uses a master–slave model, whereby replication and
-mirroring occurs between the following partners:
-
-- **Master** – a GlusterFS volume
-
-- **Slave** – a slave which can be of the following types:
-
- - A local directory which can be represented as file URL like
- `file:///path/to/dir`. You can use shortened form, for example,
- ` /path/to/dir`.
-
- - A GlusterFS Volume - Slave volume can be either a local volume
- like `gluster://localhost:volname` (shortened form - `:volname`)
- or a volume served by different host like
- `gluster://host:volname` (shortened form - `host:volname`).
-
- > **Note**
- >
- > Both of the above types can be accessed remotely using SSH tunnel.
- > To use SSH, add an SSH prefix to either a file URL or gluster type
- > URL. For example, ` ssh://root@remote-host:/path/to/dir`
- > (shortened form - `root@remote-host:/path/to/dir`) or
- > `ssh://root@remote-host:gluster://localhost:volname` (shortened
- > from - `root@remote-host::volname`).
-
-This section introduces Geo-replication, illustrates the various
-deployment scenarios, and explains how to configure the system to
-provide replication and mirroring in your environment.
-
-##Replicated Volumes vs Geo-replication
-
-The following table lists the difference between replicated volumes and
-geo-replication:
-
- Replicated Volumes | Geo-replication
- --- | ---
- Mirrors data across clusters | Mirrors data across geographically distributed clusters
- Provides high-availability | Ensures backing up of data for disaster recovery
- Synchronous replication (each and every file operation is sent across all the bricks) | Asynchronous replication (checks for the changes in files periodically and syncs them on detecting differences)
-
-##Preparing to Deploy Geo-replication
-
-This section provides an overview of the Geo-replication deployment
-scenarios, describes how you can check the minimum system requirements,
-and explores common deployment scenarios.
-
-##Exploring Geo-replication Deployment Scenarios
-
-Geo-replication provides an incremental replication service over Local
-Area Networks (LANs), Wide Area Network (WANs), and across the Internet.
-This section illustrates the most common deployment scenarios for
-Geo-replication, including the following:
-
-- Geo-replication over LAN
-- Geo-replication over WAN
-- Geo-replication over the Internet
-- Multi-site cascading Geo-replication
-
-**Geo-replication over LAN**
-
-You can configure Geo-replication to mirror data over a Local Area
-Network.
-
-![ Geo-replication over LAN ][]
-
-**Geo-replication over WAN**
-
-You can configure Geo-replication to replicate data over a Wide Area
-Network.
-
-![ Geo-replication over WAN ][]
-
-**Geo-replication over Internet**
-
-You can configure Geo-replication to mirror data over the Internet.
-
-![ Geo-replication over Internet ][]
-
-**Multi-site cascading Geo-replication**
-
-You can configure Geo-replication to mirror data in a cascading fashion
-across multiple sites.
-
-![ Multi-site cascading Geo-replication ][]
-
-##Geo-replication Deployment Overview
-
-Deploying Geo-replication involves the following steps:
-
-1. Verify that your environment matches the minimum system requirement.
-2. Determine the appropriate deployment scenario.
-3. Start Geo-replication on master and slave systems, as required.
-
-##Checking Geo-replication Minimum Requirements
-
-Before deploying GlusterFS Geo-replication, verify that your systems
-match the minimum requirements.
-
-The following table outlines the minimum requirements for both master
-and slave nodes within your environment:
-
- Component | Master | Slave
- --- | --- | ---
- Operating System | GNU/Linux | GNU/Linux
- Filesystem | GlusterFS 3.2 or higher | GlusterFS 3.2 or higher (GlusterFS needs to be installed, but does not need to be running), ext3, ext4, or XFS (any other POSIX compliant file system would work, but has not been tested extensively)
- Python | Python 2.4 (with ctypes external module), or Python 2.5 (or higher) | Python 2.4 (with ctypes external module), or Python 2.5 (or higher)
- Secure shell | OpenSSH version 4.0 (or higher) | SSH2-compliant daemon
- Remote synchronization | rsync 3.0.7 or higher | rsync 3.0.7 or higher
- FUSE | GlusterFS supported versions | GlusterFS supported versions
-
-##Setting Up the Environment for Geo-replication
-
-**Time Synchronization**
-
-- On bricks of a geo-replication master volume, all the servers' time
- must be uniform. You are recommended to set up NTP (Network Time
- Protocol) service to keep the bricks sync in time and avoid
- out-of-time sync effect.
-
- For example: In a Replicated volume where brick1 of the master is at
- 12.20 hrs and brick 2 of the master is at 12.10 hrs with 10 minutes
- time lag, all the changes in brick2 between this period may go
- unnoticed during synchronization of files with Slave.
-
-**To setup Geo-replication for SSH**
-
-Password-less login has to be set up between the host machine (where
-geo-replication Start command will be issued) and the remote machine
-(where slave process should be launched through SSH).
-
-1. On the node where geo-replication sessions are to be set up, run the
- following command:
-
- # ssh-keygen -f /var/lib/glusterd/geo-replication/secret.pem
-
- Press Enter twice to avoid passphrase.
-
-2. Run the following command on master for all the slave hosts:
-
- # ssh-copy-id -i /var/lib/glusterd/geo-replication/secret.pem.pub @
-
-##Setting Up the Environment for a Secure Geo-replication Slave
-
-You can configure a secure slave using SSH so that master is granted a
-restricted access. With GlusterFS, you need not specify configuration
-parameters regarding the slave on the master-side configuration. For
-example, the master does not require the location of the rsync program
-on slave but the slave must ensure that rsync is in the PATH of the user
-which the master connects using SSH. The only information that master
-and slave have to negotiate are the slave-side user account, slave's
-resources that master uses as slave resources, and the master's public
-key. Secure access to the slave can be established using the following
-options:
-
-- Restricting Remote Command Execution
-
-- Using `Mountbroker` for Slaves
-
-- Using IP based Access Control
-
-**Backward Compatibility**
-
-Your existing Ge-replication environment will work with GlusterFS,
-except for the following:
-
-- The process of secure reconfiguration affects only the glusterfs
- instance on slave. The changes are transparent to master with the
- exception that you may have to change the SSH target to an
- unprivileged account on slave.
-
-- The following are the some exceptions where this might not work:
-
- - Geo-replication URLs which specify the slave resource when
- configuring master will include the following special
- characters: space, \*, ?, [;
-
- - Slave must have a running instance of glusterd, even if there is
- no gluster volume among the mounted slave resources (that is,
- file tree slaves are used exclusively).
-
-### Restricting Remote Command Execution
-
-If you restrict remote command execution, then the Slave audits commands
-coming from the master and the commands related to the given
-geo-replication session is allowed. The Slave also provides access only
-to the files within the slave resource which can be read or manipulated
-by the Master.
-
-To restrict remote command execution:
-
-1. Identify the location of the gsyncd helper utility on Slave. This
- utility is installed in `PREFIX/libexec/glusterfs/gsyncd`, where
- PREFIX is a compile-time parameter of glusterfs. For example,
- `--prefix=PREFIX` to the configure script with the following common
- values` /usr, /usr/local, and /opt/glusterfs/glusterfs_version`.
-
-2. Ensure that command invoked from master to slave passed through the
- slave's gsyncd utility.
-
- You can use either of the following two options:
-
- - Set gsyncd with an absolute path as the shell for the account
- which the master connects through SSH. If you need to use a
- privileged account, then set it up by creating a new user with
- UID 0.
-
- - Setup key authentication with command enforcement to gsyncd. You
- must prefix the copy of master's public key in the Slave
- account's `authorized_keys` file with the following command:
-
- `command=<path to gsyncd>`.
-
- For example,
- `command="PREFIX/glusterfs/gsyncd" ssh-rsa AAAAB3Nza....`
-
-### Using Mountbroker for Slaves
-
-`mountbroker` is a new service of glusterd. This service allows an
-unprivileged process to own a GlusterFS mount by registering a label
-(and DSL (Domain-specific language) options ) with glusterd through a
-glusterd volfile. Using CLI, you can send a mount request to glusterd to
-receive an alias (symlink) of the mounted volume.
-
-A request from the agent , the unprivileged slave agents use the
-mountbroker service of glusterd to set up an auxiliary gluster mount for
-the agent in a special environment which ensures that the agent is only
-allowed to access with special parameters that provide administrative
-level access to the particular volume.
-
-**To setup an auxiliary gluster mount for the agent**:
-
-1. In all Slave nodes, create a new group. For example, `geogroup`.
-
-2. In all Slave nodes, create a unprivileged account. For example, ` geoaccount`. Make it a
- member of ` geogroup`.
-
-3. In all Slave nodes, Create a new directory owned by root and with permissions *0711.*
- For example, create a create mountbroker-root directory
- `/var/mountbroker-root`.
-
-4. In any one of Slave node, Run the following commands to add options to glusterd vol
-file(`/etc/glusterfs/glusterd.vol`)
- in rpm installations and `/usr/local/etc/glusterfs/glusterd.vol` in Source installation.
-
- ```sh
- gluster system:: execute mountbroker opt mountbroker-root /var/mountbroker-root
- gluster system:: execute mountbroker opt geo-replication-log-group geogroup
- gluster system:: execute mountbroker opt rpc-auth-allow-insecure on
- ```
-
-5. In any one of the Slave node, Add Mountbroker user to glusterd vol file using,
-
- ```sh
- gluster system:: execute mountbroker user geoaccount slavevol
- ```
-
- where slavevol is the Slave Volume name
-
- If you host multiple slave volumes on Slave, for each of them and add the following options to the
-volfile using,
-
- ```sh
- gluster system:: execute mountbroker user geoaccount2 slavevol2
- gluster system:: execute mountbroker user geoaccount3 slavevol3
- ```
-
- To add multiple volumes per mountbroker user,
-
- ```sh
- gluster system:: execute mountbroker user geoaccount1 slavevol11,slavevol12,slavevol13
- gluster system:: execute mountbroker user geoaccount2 slavevol21,slavevol22
- gluster system:: execute mountbroker user geoaccount3 slavevol31
- ```
-6. Restart `glusterd` service on all Slave nodes.
-
-7. Setup a passwdless SSH from one of the master node to the user on one of the slave node.
-For example, to geoaccount.
-
-8. Create a geo-replication relationship between master and slave to the user by running the
-following command on the master node:
-
- ```sh
- gluster volume geo-replication <master_volume> <mountbroker_user>@<slave_host>::<slave_volume> create push-pem [force]
- ```
-
-9. In the slavenode, which is used to create relationship, run `/usr/libexec/glusterfs/set_geo_rep_pem_keys.sh`
-as a root with user name, master volume name, and slave volume names as the arguments.
-
- ```sh
- /usr/libexec/glusterfs/set_geo_rep_pem_keys.sh <mountbroker_user> <master_volume> <slave_volume>
- ```
-
-### Using IP based Access Control
-
-You can use IP based access control method to provide access control for
-the slave resources using IP address. You can use method for both Slave
-and file tree slaves, but in the section, we are focusing on file tree
-slaves using this method.
-
-To set access control based on IP address for file tree slaves:
-
-1. Set a general restriction for accessibility of file tree resources:
-
- # gluster volume geo-replication '/*' config allow-network ::1,127.0.0.1
-
- This will refuse all requests for spawning slave agents except for
- requests initiated locally.
-
-2. If you want the to lease file tree at `/data/slave-tree` to Master,
- enter the following command:
-
- # gluster volume geo-replicationconfig allow-network
-
- `MasterIP` is the IP address of Master. The slave agent spawn
- request from master will be accepted if it is executed at
- `/data/slave-tree`.
-
-If the Master side network configuration does not enable the Slave to
-recognize the exact IP address of Master, you can use CIDR notation to
-specify a subnet instead of a single IP address as MasterIP or even
-comma-separated lists of CIDR subnets.
-
-If you want to extend IP based access control to gluster slaves, use the
-following command:
-
- # gluster volume geo-replication '*' config allow-network ::1,127.0.0.1
-
-##Starting Geo-replication
-
-This section describes how to configure and start Gluster
-Geo-replication in your storage environment, and verify that it is
-functioning correctly.
-
-###Starting Geo-replication
-
-To start Gluster Geo-replication
-
-- Use the following command to start geo-replication between the hosts:
-
- # gluster volume geo-replication start
-
- For example:
-
- # gluster volume geo-replication Volume1 example.com:/data/remote_dir start
- Starting geo-replication session between Volume1
- example.com:/data/remote_dir has been successful
-
- > **Note**
- >
- > You may need to configure the service before starting Gluster
- > Geo-replication.
-
-###Verifying Successful Deployment
-
-You can use the gluster command to verify the status of Gluster
-Geo-replication in your environment.
-
-**To verify the status Gluster Geo-replication**
-
-- Verify the status by issuing the following command on host:
-
- # gluster volume geo-replication status
-
- For example:
-
- # gluster volume geo-replication Volume1 example.com:/data/remote_dir status
- # gluster volume geo-replication Volume1 example.com:/data/remote_dir status
- MASTER SLAVE STATUS
- ______ ______________________________ ____________
- Volume1 root@example.com:/data/remote_dir Starting....
-
-###Displaying Geo-replication Status Information
-
-You can display status information about a specific geo-replication
-master session, or a particular master-slave session, or all
-geo-replication sessions, as needed.
-
-**To display geo-replication status information**
-
-- Use the following command to display information of all geo-replication sessions:
-
- # gluster volume geo-replication Volume1 example.com:/data/remote_dir status
-
-- Use the following command to display information of a particular master slave session:
-
- # gluster volume geo-replication status
-
- For example, to display information of Volume1 and
- example.com:/data/remote\_dir
-
- # gluster volume geo-replication Volume1 example.com:/data/remote_dir status
-
- The status of the geo-replication between Volume1 and
- example.com:/data/remote\_dir is displayed.
-
-- Display information of all geo-replication sessions belonging to a
- master
-
- # gluster volume geo-replication MASTER status
-
- For example, to display information of Volume1
-
- # gluster volume geo-replication Volume1 example.com:/data/remote_dir status
-
- The status of a session could be one of the following:
-
-- **Initializing**: This is the initial phase of the Geo-replication session;
- it remains in this state for a minute in order to make sure no abnormalities are present.
-
-- **Created**: The geo-replication session is created, but not started.
-
-- **Active**: The gsync daemon in this node is active and syncing the data.
-
-- **Passive**: A replica pair of the active node. The data synchronization is handled by active node.
- Hence, this node does not sync any data.
-
-- **Faulty**: The geo-replication session has experienced a problem, and the issue needs to be
- investigated further.
-
-- **Stopped**: The geo-replication session has stopped, but has not been deleted.
-
- The Crawl Status can be one of the following:
-
-- **Changelog Crawl**: The changelog translator has produced the changelog and that is being consumed
- by gsyncd daemon to sync data.
-
-- **Hybrid Crawl**: The gsyncd daemon is crawling the glusterFS file system and generating pseudo
- changelog to sync data.
-
-- **Checkpoint Status**: Displays the status of the checkpoint, if set. Otherwise, it displays as N/A.
-
-##Configuring Geo-replication
-
-To configure Gluster Geo-replication
-
-- Use the following command at the Gluster command line:
-
- # gluster volume geo-replication config [options]
-
- For example:
-
- Use the following command to view list of all option/value pair:
-
- # gluster volume geo-replication Volume1 example.com:/data/remote_dir config
-
-####Configurable Options
-
-The following table provides an overview of the configurable options for a geo-replication setting:
-
- Option | Description
- --- | ---
- gluster-log-file LOGFILE | The path to the geo-replication glusterfs log file.
- gluster-log-level LOGFILELEVEL| The log level for glusterfs processes.
- log-file LOGFILE | The path to the geo-replication log file.
- log-level LOGFILELEVEL | The log level for geo-replication.
- ssh-command COMMAND | The SSH command to connect to the remote machine (the default is SSH).
- rsync-command COMMAND | The rsync command to use for synchronizing the files (the default is rsync).
- use-tarssh true | The use-tarssh command allows tar over Secure Shell protocol. Use this option to handle workloads of files that have not undergone edits.
- volume_id=UID | The command to delete the existing master UID for the intermediate/slave node.
- timeout SECONDS | The timeout period in seconds.
- sync-jobs N | The number of simultaneous files/directories that can be synchronized.
- ignore-deletes | If this option is set to 1, a file deleted on the master will not trigger a delete operation on the slave. As a result, the slave will remain as a superset of the master and can be used to recover the master in the event of a crash and/or accidental delete.
- checkpoint [LABEL&#124;now] | Sets a checkpoint with the given option LABEL. If the option is set as now, then the current time will be used as the label.
-
-##Stopping Geo-replication
-
-You can use the gluster command to stop Gluster Geo-replication (syncing
-of data from Master to Slave) in your environment.
-
-**To stop Gluster Geo-replication**
-
-- Use the following command to stop geo-replication between the hosts:
-
- # gluster volume geo-replication stop
-
- For example:
-
- # gluster volume geo-replication Volume1 example.com:/data/remote_dir stop
- Stopping geo-replication session between Volume1 and
- example.com:/data/remote_dir has been successful
-
-##Restoring Data from the Slave
-
-You can restore data from the slave to the master volume, whenever the
-master volume becomes faulty for reasons like hardware failure.
-
-The example in this section assumes that you are using the Master Volume
-(Volume1) with the following configuration:
-
- machine1# gluster volume info
- Type: Distribute
- Status: Started
- Number of Bricks: 2
- Transport-type: tcp
- Bricks:
- Brick1: machine1:/export/dir16
- Brick2: machine2:/export/dir16
- Options Reconfigured:
- geo-replication.indexing: on
-
-The data is syncing from master volume (Volume1) to slave directory
-(example.com:/data/remote\_dir). To view the status of this
-geo-replication session run the following command on Master:
-
- # gluster volume geo-replication Volume1 root@example.com:/data/remote_dir status
-
-**Before Failure**
-
-Assume that the Master volume had 100 files and was mounted at
-/mnt/gluster on one of the client machines (client). Run the following
-command on Client machine to view the list of files:
-
- client# ls /mnt/gluster | wc –l
- 100
-
-The slave directory (example.com) will have same data as in the master
-volume and same can be viewed by running the following command on slave:
-
- example.com# ls /data/remote_dir/ | wc –l
- 100
-
-**After Failure**
-
-If one of the bricks (machine2) fails, then the status of
-Geo-replication session is changed from "OK" to "Faulty". To view the
-status of this geo-replication session run the following command on
-Master:
-
- # gluster volume geo-replication Volume1 root@example.com:/data/remote_dir status
-
-Machine2 is failed and now you can see discrepancy in number of files
-between master and slave. Few files will be missing from the master
-volume but they will be available only on slave as shown below.
-
-Run the following command on Client:
-
- client # ls /mnt/gluster | wc –l
- 52
-
-Run the following command on slave (example.com):
-
- Example.com# # ls /data/remote_dir/ | wc –l
- 100
-
-**To restore data from the slave machine**
-
-1. Use the following command to stop all Master's geo-replication sessions:
-
- # gluster volume geo-replication stop
-
- For example:
-
- machine1# gluster volume geo-replication Volume1
- example.com:/data/remote_dir stop
-
- Stopping geo-replication session between Volume1 &
- example.com:/data/remote_dir has been successful
-
- > **Note**
- >
- > Repeat `# gluster volume geo-replication stop `command on all
- > active geo-replication sessions of master volume.
-
-2. Use the following command to replace the faulty brick in the master:
-
- # gluster volume replace-brick start
-
- For example:
-
- machine1# gluster volume replace-brick Volume1 machine2:/export/dir16 machine3:/export/dir16 start
- Replace-brick started successfully
-
-3. Use the following command to commit the migration of data:
-
- # gluster volume replace-brick commit force
-
- For example:
-
- machine1# gluster volume replace-brick Volume1 machine2:/export/dir16 machine3:/export/dir16 commit force
- Replace-brick commit successful
-
-4. Use the following command to verify the migration of brick by viewing the volume info:
-
- # gluster volume info
-
- For example:
-
- machine1# gluster volume info
- Volume Name: Volume1
- Type: Distribute
- Status: Started
- Number of Bricks: 2
- Transport-type: tcp
- Bricks:
- Brick1: machine1:/export/dir16
- Brick2: machine3:/export/dir16
- Options Reconfigured:
- geo-replication.indexing: on
-
-5. Run rsync command manually to sync data from slave to master
- volume's client (mount point).
-
- For example:
-
- example.com# rsync -PavhS --xattrs --ignore-existing /data/remote_dir/ client:/mnt/gluster
-
- Verify that the data is synced by using the following command:
-
- On master volume, run the following command:
-
- Client # ls | wc –l
- 100
-
- On the Slave run the following command:
-
- example.com# ls /data/remote_dir/ | wc –l
- 100
-
- Now Master volume and Slave directory is synced.
-
-6. Use the following command to restart geo-replication session from master to slave:
-
- # gluster volume geo-replication start
-
- For example:
-
- machine1# gluster volume geo-replication Volume1
- example.com:/data/remote_dir start
- Starting geo-replication session between Volume1 &
- example.com:/data/remote_dir has been successful
-
-##Best Practices
-
-**Manually Setting Time**
-
-If you have to change the time on your bricks manually, then you must
-set uniform time on all bricks. Setting time backward corrupts the
-geo-replication index, so the recommended way to set the time manually is:
-
-1. Stop geo-replication between the master and slave using the
- following command:
-
- # gluster volume geo-replication stop
-
-2. Stop the geo-replication indexing using the following command:
-
- # gluster volume set geo-replication.indexing of
-
-3. Set uniform time on all bricks.
-
-4. Use the following command to restart your geo-replication session:
-
- # gluster volume geo-replication start
-
-**Running Geo-replication commands in one system**
-
-It is advisable to run the geo-replication commands in one of the bricks
-in the trusted storage pool. This is because, the log files for the
-geo-replication session would be stored in the \*Server\* where the
-Geo-replication start is initiated. Hence it would be easier to locate
-the log-files when required.
-
-**Isolation**
-
-Geo-replication slave operation is not sandboxed as of now and is ran as
-a privileged service. So for the security reason, it is advised to
-create a sandbox environment (dedicated machine / dedicated virtual
-machine / chroot/container type solution) by the administrator to run
-the geo-replication slave in it. Enhancement in this regard will be
-available in follow-up minor release.
-
- [ Geo-replication over LAN ]: ../images/Geo-Rep_LAN.png
- [ Geo-replication over WAN ]: ../images/Geo-Rep_WAN.png
- [ Geo-replication over Internet ]: ../images/Geo-Rep03_Internet.png
- [ Multi-site cascading Geo-replication ]: ../images/Geo-Rep04_Cascading.png
diff --git a/doc/admin-guide/en-US/markdown/admin_logging.md b/doc/admin-guide/en-US/markdown/admin_logging.md
deleted file mode 100644
index f15907bbe61..00000000000
--- a/doc/admin-guide/en-US/markdown/admin_logging.md
+++ /dev/null
@@ -1,56 +0,0 @@
-# GlusterFS service Logs and locations
-
-Below lists the component, services, and functionality based logs in the GlusterFS Server. As per the File System Hierarchy Standards (FHS) all the log files are placed in the `/var/log` directory.
-⁠
-
-##glusterd:
-
-glusterd logs are located at `/var/log/glusterfs/etc-glusterfs-glusterd.vol.log`. One glusterd log file per server. This log file also contains the snapshot and user logs.
-
-##gluster cli command:
-gluster cli logs are located at `/var/log/glusterfs/cmd_history.log` Gluster commands executed on a node in a GlusterFS Trusted Storage Pool is logged in the `.cmd_log_history` file.
-
-##bricks:
-Bricks logs are located at `/var/log/glusterfs/bricks/<path extraction of brick path>.log` . One log file per brick on the server
-
-##rebalance:
-rebalance logs are located at `/var/log/glusterfs/VOLNAME-rebalance.log` . One log file per volume on the server.
-
-##self heal deamon:
-self heal deamon are logged at `/var/log/glusterfs/glustershd.log`. One log file per server
-
-##quota:
-
-`/var/log/glusterfs/quotad.log` are log of the quota daemons running on each node.
-`/var/log/glusterfs/quota-crawl.log` Whenever quota is enabled, a file system crawl is performed and the corresponding log is stored in this file.
-`/var/log/glusterfs/quota-mount- VOLNAME.log` An auxiliary FUSE client is mounted in <gluster-run-dir>/VOLNAME of the glusterFS and the corresponding client logs found in this file.
-
- One log file per server (and per volume from quota-mount.
-
-##Gluster NFS:
-
-`/var/log/glusterfs/nfs.log ` One log file per server
-
-##SAMBA Gluster:
-
-`/var/log/samba/glusterfs-VOLNAME-<ClientIP>.log` . If the client mounts this on a glusterFS server node, the actual log file or the mount point may not be found. In such a case, the mount outputs of all the glusterFS type mount operations need to be considered.
-
-##Ganesha NFS :
-`/var/log/nfs-ganesha.log`
-
-##FUSE Mount:
-`/var/log/glusterfs/<mountpoint path extraction>.log `
-
-##Geo-replication:
-
-`/var/log/glusterfs/geo-replication/<master>`
-`/var/log/glusterfs/geo-replication-slaves `
-
-##gluster volume heal VOLNAME info command:
-`/var/log/glusterfs/glfsheal-VOLNAME.log` . One log file per server on which the command is executed.
-
-##gluster-swift:
-`/var/log/messages`
-
-##SwiftKrbAuth:
-`/var/log/httpd/error_log `
diff --git a/doc/admin-guide/en-US/markdown/admin_managing_snapshots.md b/doc/admin-guide/en-US/markdown/admin_managing_snapshots.md
deleted file mode 100644
index 672a8ceb4c6..00000000000
--- a/doc/admin-guide/en-US/markdown/admin_managing_snapshots.md
+++ /dev/null
@@ -1,316 +0,0 @@
-Managing GlusterFS Volume Snapshots
-==========================
-
-This section describes how to perform common GlusterFS volume snapshot
-management operations
-
-Pre-requisites
-=====================
-
-GlusterFS volume snapshot feature is based on thinly provisioned LVM snapshot.
-To make use of snapshot feature GlusterFS volume should fulfill following
-pre-requisites:
-
-* Each brick should be on an independent thinly provisioned LVM.
-* Brick LVM should not contain any other data other than brick.
-* None of the brick should be on a thick LVM.
-* gluster version should be 3.6 and above.
-
-Details of how to create thin volume can be found at the following link.
-https://access.redhat.com/documentation/en-US/Red_Hat_Enterprise_Linux/6/html/Logical_Volume_Manager_Administration/thinly_provisioned_volume_creation.html
-
-
-Few features of snapshot are:
-=============================
-
-**Crash Consistency**
-
-when a snapshot is taken at a particular point-in-time, it is made sure that
-the taken snapshot is crash consistent. when the taken snapshot is restored,
-then the data is identical as it was at the time of taking a snapshot.
-
-
-**Online Snapshot**
-
-When the snapshot is being taken the file system and its associated data
-continue to be available for the clients.
-
-
-**Quorum Based**
-
-The quorum feature ensures that the volume is in good condition while the bricks
-are down. Quorum is not met if any bricks are down in a n-way replication where
-n <= 2. Quorum is met when m bricks are up, where m >= (n/2 + 1) where n is odd,
-and m >= n/2 and first brick is up where n is even. snapshot creation fails
-if quorum is not met.
-
-
-**Barrier**
-
-During snapshot creation some of the fops are blocked to guarantee crash
-consistency. There is a default time-out of 2 minutes, if snapshot creation
-is not complete within that span then fops are unbarried. If unbarrier happens
-before the snapshot creation is complete then the snapshot creation operation
-fails. This to ensure that the snapshot is in a consistent state.
-
-
-
-Snapshot Management
-=====================
-
-
-**Snapshot creation**
-
-Syntax :
-*gluster snapshot create <snapname\> <volname\> \[no-timestamp] \[description <description\>\] \[force\]*
-
-Details :
-Creates a snapshot of a GlusterFS volume. User can provide a snap-name and a
-description to identify the snap. The description cannot be more than 1024
-characters.
-Snapshot will be created by appending timestamp with user provided snap name.
-User can override this behaviour by giving no-timestamp flag.
-
-NOTE : To be able to take a snapshot, volume should be present and it
-should be in started state.
-
------------------------------------------------------------------------------
-
-**Snapshot clone**
-
-Syntax :
-*gluster snapshot clone <clonename\> <snapname\>*
-
-Details :
-Creates a clone of a snapshot. Upon successful completion, a new GlusterFS
-volume will be created from snapshot. The clone will be a space efficient clone,
-i.e, the snapshot and the clone will share the backend disk.
-
-NOTE : To be able to take a clone from snapshot, snapshot should be present
-and it should be in activated state.
-
------------------------------------------------------------------------------
-
-**Restoring snaps**
-
-Syntax :
-*gluster snapshot restore <snapname\>*
-
-Details :
-Restores an already taken snapshot of a GlusterFS volume.
-Snapshot restore is an offline activity therefore if the volume is
-online (in started state) then the restore operation will fail.
-
-Once the snapshot is restored it will not be available in the
-list of snapshots.
-
----------------------------------------------------------------------------
-
-**Deleting snaps**
-
-Syntax :
-*gluster snapshot delete \(all | <snapname\> | volume <volname\>\)*
-
-Details :
-If snapname is specified then mentioned snapshot is deleted.
-If volname is specified then all snapshots belonging to that particular
-volume is deleted. If keyword *all* is used then all snapshots belonging
-to the system is deleted.
-
---------------------------------------------------------------------------
-
-**Listing of available snaps**
-
-Syntax:
-*gluster snapshot list \[volname\]*
-
-Details:
-Lists all snapshots taken.
-If volname is provided, then only the snapshots belonging to
-that particular volume is listed.
-
--------------------------------------------------------------------------
-
-**Information of available snaps**
-
-Syntax:
-*gluster snapshot info \[\(snapname | volume <volname\>\)\]*
-
-Details:
-This command gives information such as snapshot name, snapshot UUID,
-time at which snapshot was created, and it lists down the snap-volume-name,
-number of snapshots already taken and number of snapshots still available
-for that particular volume, and the state of the snapshot.
-
-------------------------------------------------------------------------
-
-**Status of snapshots**
-
-Syntax:
-*gluster snapshot status \[\(snapname | volume <volname\>\)\]*
-
-Details:
-This command gives status of the snapshot.
-The details included are snapshot brick path, volume group(LVM details),
-status of the snapshot bricks, PID of the bricks, data percentage filled for
-that particular volume group to which the snapshots belong to, and total size
-of the logical volume.
-
-If snapname is specified then status of the mentioned snapshot is displayed.
-If volname is specified then status of all snapshots belonging to that volume
-is displayed. If both snapname and volname is not specified then status of all
-the snapshots present in the system are displayed.
-
-------------------------------------------------------------------------
-
-**Configuring the snapshot behavior**
-
-Syntax:
-*snapshot config \[volname\] \(\[snap-max-hard-limit <count\>\] \[snap-max-soft-limit <percent>\]\)
- | \(\[auto-delete <enable|disable\>\]\)
- | \(\[activate-on-create <enable|disable\>\]\)*
-
-Details:
-Displays and sets the snapshot config values.
-
-snapshot config without any keywords displays the snapshot config values of
-all volumes in the system. If volname is provided, then the snapshot config
-values of that volume is displayed.
-
-Snapshot config command along with keywords can be used to change the existing
-config values. If volname is provided then config value of that volume is
-changed, else it will set/change the system limit.
-
-snap-max-soft-limit and auto-delete are global options, that will be
-inherited by all volumes in the system and cannot be set to individual volumes.
-
-The system limit takes precedence over the volume specific limit.
-
-When auto-delete feature is enabled, then upon reaching the soft-limit,
-with every successful snapshot creation, the oldest snapshot will be deleted.
-
-When auto-delete feature is disabled, then upon reaching the soft-limit,
-the user gets a warning with every successful snapshot creation.
-
-Upon reaching the hard-limit, further snapshot creations will not be allowed.
-
-activate-on-create is disabled by default. If you enable activate-on-create,
-then further snapshot will be activated during the time of snapshot creation.
--------------------------------------------------------------------------
-
-**Activating a snapshot**
-
-Syntax:
-*gluster snapshot activate <snapname\>*
-
-Details:
-Activates the mentioned snapshot.
-
-Note : By default the snapshot is activated during snapshot creation.
-
--------------------------------------------------------------------------
-
-**Deactivate a snapshot**
-
-Syntax:
-*gluster snapshot deactivate <snapname\>*
-
-Details:
-Deactivates the mentioned snapshot.
-
--------------------------------------------------------------------------
-
-**Accessing the snapshot**
-
-Snapshots can be activated in 2 ways.
-
-1) Mounting the snapshot:
-
-The snapshot can be accessed via FUSE mount (only fuse). To do that it has to be
-mounted first. A snapshot can be mounted via fuse by below command
-
-*mount -t glusterfs <hostname>:/snaps/<snap-name>/<volume-name> <mount-path>*
-
-i.e. say "host1" is one of the peers. Let "vol" be the volume name and "my-snap"
-be the snapshot name. In this case a snapshot can be mounted via this command
-
-*mount -t glusterfs host1:/snaps/my-snap/vol /mnt/snapshot*
-
-
-2) User serviceability:
-
-Apart from the above method of mounting the snapshot, a list of available
-snapshots and the contents of each snapshot can be viewed from any of the mount
-points accessing the glusterfs volume (either FUSE or NFS or SMB). For having
-user serviceable snapshots, it has to be enabled for a volume first. User
-serviceability can be enabled for a volume using the below command.
-
-*gluster volume set <volname> features.uss enable*
-
-Once enabled, from any of the directory (including root of the filesystem) an
-access point will be created to the snapshot world. The access point is a hidden
-directory cding into which will make the user enter the snapshot world. By
-default the hidden directory is ".snaps". Once user serviceability is enabled,
-one will be able to cd into .snaps from any directory. Doing "ls" on that
-directory shows a list of directories which are nothing but the snapshots
-present for that volume. Say if there are 3 snapshots ("snap1", "snap2",
-"snap3"), then doing ls in .snaps directory will show those 3 names as the
-directory entries. They represent the state of the directory from which .snaps
-was entered, at different points in time.
-
-NOTE: The access to the snapshots are read-only.
-
-Also, the name of the hidden directory (or the access point to the snapshot
-world) can be changed using the below command.
-
-*gluster volume set <volname> snapshot-directory <new-name>*
-
-3) Accessing from windows:
-The glusterfs volumes can be made accessible by windows via samba. (the
-glusterfs plugin for samba helps achieve this, without having to re-export
-a fuse mounted glusterfs volume). The snapshots of a glusterfs volume can
-also be viewed in the windows explorer.
-
-There are 2 ways:
-* Give the path of the entry point directory
-(\\<hostname>\<samba-share>\<directory>\<entry-point path>) in the run command
-window
-* Go to the samba share via windows explorer. Make hidden files and folders
-visible so that in the root of the samba share a folder icon for the entry point
-can be seen.
-
-NOTE: From the explorer, snapshot world can be entered via entry point only from
-the root of the samba share. If snapshots have to be seen from subfolders, then
-the path should be provided in the run command window.
-
-For snapshots to be accessible from windows, below 2 options can be used.
-A) The glusterfs plugin for samba should give the option "snapdir-entry-path"
-while starting. The option is an indication to glusterfs, that samba is loading
-it and the value of the option should be the path that is being used as the
-share for windows.
-Ex: Say, there is a glusterfs volume and a directory called "export" from the
-root of the volume is being used as the samba share, then samba has to load
-glusterfs with this option as well.
-
- ret = glfs_set_xlator_option(fs, "*-snapview-client",
- "snapdir-entry-path", "/export");
-The xlator option "snapdir-entry-path" is not exposed via volume set options,
-cannot be changed from CLI. Its an option that has to be provded at the time of
-mounting glusterfs or when samba loads glusterfs.
-B) The accessibility of snapshots via root of the samba share from windows
-is configurable. By default it is turned off. It is a volume set option which can
-be changed via CLI.
-
-gluster volume set <volname> features.show-snapshot-directory "on/off". By
-default it is off.
-
-Only when both the above options have been provided (i.e snapdir-entry-path
-contains a valid unix path that is exported and show-snapshot-directory option
-is set to true), snapshots can accessed via windows explorer.
-
-If only 1st option (i.e. snapdir-entry-path) is set via samba and 2nd option
-(i.e. show-snapshot-directory) is off, then snapshots can be accessed from
-windows via the run command window, but not via the explorer.
-
-
---------------------------------------------------------------------------------------
diff --git a/doc/admin-guide/en-US/markdown/admin_managing_volumes.md b/doc/admin-guide/en-US/markdown/admin_managing_volumes.md
deleted file mode 100644
index f45567a1141..00000000000
--- a/doc/admin-guide/en-US/markdown/admin_managing_volumes.md
+++ /dev/null
@@ -1,770 +0,0 @@
-#Managing GlusterFS Volumes
-
-This section describes how to perform common GlusterFS management
-operations, including the following:
-
-- [Tuning Volume Options](#tuning-options)
-- [Configuring Transport Types for a Volume](#configuring-transport-types-for-a-volume)
-- [Expanding Volumes](#expanding-volumes)
-- [Shrinking Volumes](#shrinking-volumes)
-- [Migrating Volumes](#migrating-volumes)
-- [Rebalancing Volumes](#rebalancing-volumes)
-- [Stopping Volumes](#stopping-volumes)
-- [Deleting Volumes](#deleting-volumes)
-- [Triggering Self-Heal on Replicate](#triggering-self-heal-on-replicate)
-- [Non Uniform File Allocation(NUFA)](#non-uniform-file-allocation)
-
-<a name="tuning-options" />
-##Tuning Volume Options
-
-You can tune volume options, as needed, while the cluster is online and
-available.
-
-> **Note**
->
-> It is recommended that you to set server.allow-insecure option to ON if
-> there are too many bricks in each volume or if there are too many
-> services which have already utilized all the privileged ports in the
-> system. Turning this option ON allows ports to accept/reject messages
-> from insecure ports. So, use this option only if your deployment
-> requires it.
-
-Tune volume options using the following command:
-
- # gluster volume set
-
-For example, to specify the performance cache size for test-volume:
-
- # gluster volume set test-volume performance.cache-size 256MB
- Set volume successful
-
-The following table lists the Volume options along with its
-description and default value:
-
-> **Note**
->
-> The default options given here are subject to modification at any
-> given time and may not be the same for all versions.
-
-Option | Description | Default Value | Available Options
---- | --- | --- | ---
-auth.allow | IP addresses of the clients which should be allowed to access the volume. | \* (allow all) | Valid IP address which includes wild card patterns including \*, such as 192.168.1.\*
-auth.reject | IP addresses of the clients which should be denied to access the volume. | NONE (reject none) | Valid IP address which includes wild card patterns including \*, such as 192.168.2.\*
-client.grace-timeout | Specifies the duration for the lock state to be maintained on the client after a network disconnection. | 10 | 10 - 1800 secs
-cluster.self-heal-window-size | Specifies the maximum number of blocks per file on which self-heal would happen simultaneously. | 16 | 0 - 1025 blocks
-cluster.data-self-heal-algorithm | Specifies the type of self-heal. If you set the option as "full", the entire file is copied from source to destinations. If the option is set to "diff" the file blocks that are not in sync are copied to destinations. Reset uses a heuristic model. If the file does not exist on one of the subvolumes, or a zero-byte file exists (created by entry self-heal) the entire content has to be copied anyway, so there is no benefit from using the "diff" algorithm. If the file size is about the same as page size, the entire file can be read and written with a few operations, which will be faster than "diff" which has to read checksums and then read and write. | reset | full/diff/reset
-cluster.min-free-disk | Specifies the percentage of disk space that must be kept free. Might be useful for non-uniform bricks | 10% | Percentage of required minimum free disk space
-cluster.stripe-block-size | Specifies the size of the stripe unit that will be read from or written to. | 128 KB (for all files) | size in bytes
-cluster.self-heal-daemon | Allows you to turn-off proactive self-heal on replicated | On | On/Off
-cluster.ensure-durability | This option makes sure the data/metadata is durable across abrupt shutdown of the brick. | On | On/Off
-diagnostics.brick-log-level | Changes the log-level of the bricks. | INFO | DEBUG/WARNING/ERROR/CRITICAL/NONE/TRACE
-diagnostics.client-log-level | Changes the log-level of the clients. | INFO | DEBUG/WARNING/ERROR/CRITICAL/NONE/TRACE
-diagnostics.latency-measurement | Statistics related to the latency of each operation would be tracked. | Off | On/Off
-diagnostics.dump-fd-stats | Statistics related to file-operations would be tracked. | Off | On
-features.read-only | Enables you to mount the entire volume as read-only for all the clients (including NFS clients) accessing it. | Off | On/Off
-features.lock-heal | Enables self-healing of locks when the network disconnects. | On | On/Off
-features.quota-timeout | For performance reasons, quota caches the directory sizes on client. You can set timeout indicating the maximum duration of directory sizes in cache, from the time they are populated, during which they are considered valid | 0 | 0 - 3600 secs
-geo-replication.indexing | Use this option to automatically sync the changes in the filesystem from Master to Slave. | Off | On/Off
-network.frame-timeout | The time frame after which the operation has to be declared as dead, if the server does not respond for a particular operation. | 1800 (30 mins) | 1800 secs
-network.ping-timeout | The time duration for which the client waits to check if the server is responsive. When a ping timeout happens, there is a network disconnect between the client and server. All resources held by server on behalf of the client get cleaned up. When a reconnection happens, all resources will need to be re-acquired before the client can resume its operations on the server. Additionally, the locks will be acquired and the lock tables updated. This reconnect is a very expensive operation and should be avoided. | 42 Secs | 42 Secs
-nfs.enable-ino32 | For 32-bit nfs clients or applications that do not support 64-bit inode numbers or large files, use this option from the CLI to make Gluster NFS return 32-bit inode numbers instead of 64-bit inode numbers. | Off | On/Off
-nfs.volume-access | Set the access type for the specified sub-volume. | read-write | read-write/read-only
-nfs.trusted-write | If there is an UNSTABLE write from the client, STABLE flag will be returned to force the client to not send a COMMIT request. In some environments, combined with a replicated GlusterFS setup, this option can improve write performance. This flag allows users to trust Gluster replication logic to sync data to the disks and recover when required. COMMIT requests if received will be handled in a default manner by fsyncing. STABLE writes are still handled in a sync manner. | Off | On/Off
-nfs.trusted-sync | All writes and COMMIT requests are treated as async. This implies that no write requests are guaranteed to be on server disks when the write reply is received at the NFS client. Trusted sync includes trusted-write behavior. | Off | On/Off
-nfs.export-dir | This option can be used to export specified comma separated subdirectories in the volume. The path must be an absolute path. Along with path allowed list of IPs/hostname can be associated with each subdirectory. If provided connection will allowed only from these IPs. Format: \<dir\>[(hostspec[hostspec...])][,...]. Where hostspec can be an IP address, hostname or an IP range in CIDR notation. **Note**: Care must be taken while configuring this option as invalid entries and/or unreachable DNS servers can introduce unwanted delay in all the mount calls. | No sub directory exported. | Absolute path with allowed list of IP/hostname
-nfs.export-volumes | Enable/Disable exporting entire volumes, instead if used in conjunction with nfs3.export-dir, can allow setting up only subdirectories as exports. | On | On/Off
-nfs.rpc-auth-unix | Enable/Disable the AUTH\_UNIX authentication type. This option is enabled by default for better interoperability. However, you can disable it if required. | On | On/Off
-nfs.rpc-auth-null | Enable/Disable the AUTH\_NULL authentication type. It is not recommended to change the default value for this option. | On | On/Off
-nfs.rpc-auth-allow\<IP- Addresses\> | Allow a comma separated list of addresses and/or hostnames to connect to the server. By default, all clients are disallowed. This allows you to define a general rule for all exported volumes. | Reject All | IP address or Host name
-nfs.rpc-auth-reject\<IP- Addresses\> | Reject a comma separated list of addresses and/or hostnames from connecting to the server. By default, all connections are disallowed. This allows you to define a general rule for all exported volumes. | Reject All | IP address or Host name
-nfs.ports-insecure | Allow client connections from unprivileged ports. By default only privileged ports are allowed. This is a global setting in case insecure ports are to be enabled for all exports using a single option. | Off | On/Off
-nfs.addr-namelookup | Turn-off name lookup for incoming client connections using this option. In some setups, the name server can take too long to reply to DNS queries resulting in timeouts of mount requests. Use this option to turn off name lookups during address authentication. Note, turning this off will prevent you from using hostnames in rpc-auth.addr.\* filters. | On | On/Off
-nfs.register-with-portmap | For systems that need to run multiple NFS servers, you need to prevent more than one from registering with portmap service. Use this option to turn off portmap registration for Gluster NFS. | On | On/Off
-nfs.port \<PORT- NUMBER\> | Use this option on systems that need Gluster NFS to be associated with a non-default port number. | NA | 38465- 38467
-nfs.disable | Turn-off volume being exported by NFS | Off | On/Off
-performance.write-behind-window-size | Size of the per-file write-behind buffer. | 1MB | Write-behind cache size
-performance.io-thread-count | The number of threads in IO threads translator. | 16 | 0-65
-performance.flush-behind | If this option is set ON, instructs write-behind translator to perform flush in background, by returning success (or any errors, if any of previous writes were failed) to application even before flush is sent to backend filesystem. | On | On/Off
-performance.cache-max-file-size | Sets the maximum file size cached by the io-cache translator. Can use the normal size descriptors of KB, MB, GB,TB or PB (for example, 6GB). Maximum size uint64. | 2 \^ 64 -1 bytes | size in bytes
-performance.cache-min-file-size | Sets the minimum file size cached by the io-cache translator. Values same as "max" above | 0B | size in bytes
-performance.cache-refresh-timeout | The cached data for a file will be retained till 'cache-refresh-timeout' seconds, after which data re-validation is performed. | 1s | 0-61
-performance.cache-size | Size of the read cache. | 32 MB | size in bytes
-server.allow-insecure | Allow client connections from unprivileged ports. By default only privileged ports are allowed. This is a global setting in case insecure ports are to be enabled for all exports using a single option. | On | On/Off
-server.grace-timeout | Specifies the duration for the lock state to be maintained on the server after a network disconnection. | 10 | 10 - 1800 secs
-server.statedump-path | Location of the state dump file. | tmp directory of the brick | New directory path
-storage.health-check-interval | Number of seconds between health-checks done on the filesystem that is used for the brick(s). Defaults to 30 seconds, set to 0 to disable. | tmp directory of the brick | New directory path
-
-You can view the changed volume options using command:
-
- # gluster volume info
-
-<a name="configuring-transport-types-for-a-volume" />
-##Configuring Transport Types for a Volume
-
-A volume can support one or more transport types for communication between clients and brick processes.
-There are three types of supported transport, which are tcp, rdma, and tcp,rdma.
-
-To change the supported transport types of a volume, follow the procedure:
-
-1. Unmount the volume on all the clients using the following command:
-
- # umount mount-point
-
-2. Stop the volumes using the following command:
-
- # gluster volume stop volname
-
-3. Change the transport type. For example, to enable both tcp and rdma execute the followimg command:
-
- # gluster volume set volname config.transport tcp,rdma OR tcp OR rdma
-
-4. Mount the volume on all the clients. For example, to mount using rdma transport, use the following command:
-
- # mount -t glusterfs -o transport=rdma server1:/test-volume /mnt/glusterfs
-
-<a name="expanding-volumes" />
-##Expanding Volumes
-
-You can expand volumes, as needed, while the cluster is online and
-available. For example, you might want to add a brick to a distributed
-volume, thereby increasing the distribution and adding to the capacity
-of the GlusterFS volume.
-
-Similarly, you might want to add a group of bricks to a distributed
-replicated volume, increasing the capacity of the GlusterFS volume.
-
-> **Note**
->
-> When expanding distributed replicated and distributed striped volumes,
-> you need to add a number of bricks that is a multiple of the replica
-> or stripe count. For example, to expand a distributed replicated
-> volume with a replica count of 2, you need to add bricks in multiples
-> of 2 (such as 4, 6, 8, etc.).
-
-**To expand a volume**
-
-1. On the first server in the cluster, probe the server to which you
- want to add the new brick using the following command:
-
- `# gluster peer probe `
-
- For example:
-
- # gluster peer probe server4
- Probe successful
-
-2. Add the brick using the following command:
-
- `# gluster volume add-brick `
-
- For example:
-
- # gluster volume add-brick test-volume server4:/exp4
- Add Brick successful
-
-3. Check the volume information using the following command:
-
- `# gluster volume info `
-
- The command displays information similar to the following:
-
- Volume Name: test-volume
- Type: Distribute
- Status: Started
- Number of Bricks: 4
- Bricks:
- Brick1: server1:/exp1
- Brick2: server2:/exp2
- Brick3: server3:/exp3
- Brick4: server4:/exp4
-
-4. Rebalance the volume to ensure that all files are distributed to the
- new brick.
-
- You can use the rebalance command as described in [Rebalancing Volumes](#rebalancing-volumes)
-
-<a name="shrinking-volumes" />
-##Shrinking Volumes
-
-You can shrink volumes, as needed, while the cluster is online and
-available. For example, you might need to remove a brick that has become
-inaccessible in a distributed volume due to hardware or network failure.
-
-> **Note**
->
-> Data residing on the brick that you are removing will no longer be
-> accessible at the Gluster mount point. Note however that only the
-> configuration information is removed - you can continue to access the
-> data directly from the brick, as necessary.
-
-When shrinking distributed replicated and distributed striped volumes,
-you need to remove a number of bricks that is a multiple of the replica
-or stripe count. For example, to shrink a distributed striped volume
-with a stripe count of 2, you need to remove bricks in multiples of 2
-(such as 4, 6, 8, etc.). In addition, the bricks you are trying to
-remove must be from the same sub-volume (the same replica or stripe
-set).
-
-**To shrink a volume**
-
-1. Remove the brick using the following command:
-
- `# gluster volume remove-brick ` `start`
-
- For example, to remove server2:/exp2:
-
- # gluster volume remove-brick test-volume server2:/exp2 force
-
- Removing brick(s) can result in data loss. Do you want to Continue? (y/n)
-
-2. Enter "y" to confirm the operation. The command displays the
- following message indicating that the remove brick operation is
- successfully started:
-
- Remove Brick successful
-
-3. (Optional) View the status of the remove brick operation using the
- following command:
-
- `# gluster volume remove-brick status`
-
- For example, to view the status of remove brick operation on
- server2:/exp2 brick:
-
- # gluster volume remove-brick test-volume server2:/exp2 status
- Node Rebalanced-files size scanned status
- --------- ---------------- ---- ------- -----------
- 617c923e-6450-4065-8e33-865e28d9428f 34 340 162 in progress
-
-4. Check the volume information using the following command:
-
- `# gluster volume info `
-
- The command displays information similar to the following:
-
- # gluster volume info
- Volume Name: test-volume
- Type: Distribute
- Status: Started
- Number of Bricks: 3
- Bricks:
- Brick1: server1:/exp1
- Brick3: server3:/exp3
- Brick4: server4:/exp4
-
-5. Rebalance the volume to ensure that all files are distributed to the
- new brick.
-
- You can use the rebalance command as described in [Rebalancing Volumes](#rebalancing-volumes)
-
-<a name="migrating-volumes" />
-##Migrating Volumes
-
-You can migrate the data from one brick to another, as needed, while the
-cluster is online and available.
-
-**To migrate a volume**
-
-1. Make sure the new brick, server5 in this example, is successfully
- added to the cluster.
-
-2. Migrate the data from one brick to another using the following
- command:
-
- ` # gluster volume replace-brick start`
-
- For example, to migrate the data in server3:/exp3 to server5:/exp5
- in test-volume:
-
- # gluster volume replace-brick test-volume server3:/exp3 server5:exp5 start
- Replace brick start operation successful
-
- > **Note**
- >
- > You need to have the FUSE package installed on the server on which
- > you are running the replace-brick command for the command to work.
-
-3. To pause the migration operation, if needed, use the following
- command:
-
- `# gluster volume replace-brick pause `
-
- For example, to pause the data migration from server3:/exp3 to
- server5:/exp5 in test-volume:
-
- # gluster volume replace-brick test-volume server3:/exp3 server5:/exp5 pause
- Replace brick pause operation successful
-
-4. To abort the migration operation, if needed, use the following
- command:
-
- `# gluster volume replace-brick abort `
-
- For example, to abort the data migration from server3:/exp3 to
- server5:/exp5 in test-volume:
-
- # gluster volume replace-brick test-volume server3:/exp3 server5:exp5 abort
- Replace brick abort operation successful
-
-5. Check the status of the migration operation using the following
- command:
-
- `# gluster volume replace-brick status `
-
- For example, to check the data migration status from server3:/exp3
- to server5:/exp5 in test-volume:
-
- # gluster volume replace-brick test-volume server3:/exp3 server5:/exp5 status
- Current File = /usr/src/linux-headers-2.6.31-14/block/Makefile
- Number of files migrated = 10567
- Migration complete
-
- The status command shows the current file being migrated along with
- the current total number of files migrated. After completion of
- migration, it displays Migration complete.
-
-6. Commit the migration of data from one brick to another using the
- following command:
-
- `# gluster volume replace-brick commit `
-
- For example, to commit the data migration from server3:/exp3 to
- server5:/exp5 in test-volume:
-
- # gluster volume replace-brick test-volume server3:/exp3 server5:/exp5 commit
- replace-brick commit successful
-
-7. Verify the migration of brick by viewing the volume info using the
- following command:
-
- `# gluster volume info `
-
- For example, to check the volume information of new brick
- server5:/exp5 in test-volume:
-
- # gluster volume info test-volume
- Volume Name: testvolume
- Type: Replicate
- Status: Started
- Number of Bricks: 4
- Transport-type: tcp
- Bricks:
- Brick1: server1:/exp1
- Brick2: server2:/exp2
- Brick3: server4:/exp4
- Brick4: server5:/exp5
-
- The new volume details are displayed.
-
- In the above example, previously, there were bricks; 1,2,3, and 4
- and now brick 3 is replaced by brick 5.
-
-<a name="rebalancing-volumes" />
-##Rebalancing Volumes
-
-After expanding or shrinking a volume (using the add-brick and
-remove-brick commands respectively), you need to rebalance the data
-among the servers. New directories created after expanding or shrinking
-of the volume will be evenly distributed automatically. For all the
-existing directories, the distribution can be fixed by rebalancing the
-layout and/or data.
-
-This section describes how to rebalance GlusterFS volumes in your
-storage environment, using the following common scenarios:
-
-- **Fix Layout** - Fixes the layout changes so that the files can actually
- go to newly added nodes.
-
-- **Fix Layout and Migrate Data** - Rebalances volume by fixing the layout
- changes and migrating the existing data.
-
-###Rebalancing Volume to Fix Layout Changes
-
-Fixing the layout is necessary because the layout structure is static
-for a given directory. In a scenario where new bricks have been added to
-the existing volume, newly created files in existing directories will
-still be distributed only among the old bricks. The
-`# gluster volume rebalance fix-layout start `command will fix the
-layout information so that the files can also go to newly added nodes.
-When this command is issued, all the file stat information which is
-already cached will get revalidated.
-
-As of GlusterFS 3.6, the assignment of files to bricks will take into account
-the sizes of the bricks. For example, a 20TB brick will be assigned twice as
-many files as a 10TB brick. In versions before 3.6, the two bricks were
-treated as equal regardless of size, and would have been assigned an equal
-share of files.
-
-A fix-layout rebalance will only fix the layout changes and does not
-migrate data. If you want to migrate the existing data,
-use`# gluster volume rebalance start ` command to rebalance data among
-the servers.
-
-**To rebalance a volume to fix layout changes**
-
-- Start the rebalance operation on any one of the server using the
- following command:
-
- `# gluster volume rebalance fix-layout start`
-
- For example:
-
- # gluster volume rebalance test-volume fix-layout start
- Starting rebalance on volume test-volume has been successful
-
-###Rebalancing Volume to Fix Layout and Migrate Data
-
-After expanding or shrinking a volume (using the add-brick and
-remove-brick commands respectively), you need to rebalance the data
-among the servers.
-
-**To rebalance a volume to fix layout and migrate the existing data**
-
-- Start the rebalance operation on any one of the server using the
- following command:
-
- `# gluster volume rebalance start`
-
- For example:
-
- # gluster volume rebalance test-volume start
- Starting rebalancing on volume test-volume has been successful
-
-- Start the migration operation forcefully on any one of the server
- using the following command:
-
- `# gluster volume rebalance start force`
-
- For example:
-
- # gluster volume rebalance test-volume start force
- Starting rebalancing on volume test-volume has been successful
-
-###Displaying Status of Rebalance Operation
-
-You can display the status information about rebalance volume operation,
-as needed.
-
-- Check the status of the rebalance operation, using the following
- command:
-
- `# gluster volume rebalance status`
-
- For example:
-
- # gluster volume rebalance test-volume status
- Node Rebalanced-files size scanned status
- --------- ---------------- ---- ------- -----------
- 617c923e-6450-4065-8e33-865e28d9428f 416 1463 312 in progress
-
- The time to complete the rebalance operation depends on the number
- of files on the volume along with the corresponding file sizes.
- Continue checking the rebalance status, verifying that the number of
- files rebalanced or total files scanned keeps increasing.
-
- For example, running the status command again might display a result
- similar to the following:
-
- # gluster volume rebalance test-volume status
- Node Rebalanced-files size scanned status
- --------- ---------------- ---- ------- -----------
- 617c923e-6450-4065-8e33-865e28d9428f 498 1783 378 in progress
-
- The rebalance status displays the following when the rebalance is
- complete:
-
- # gluster volume rebalance test-volume status
- Node Rebalanced-files size scanned status
- --------- ---------------- ---- ------- -----------
- 617c923e-6450-4065-8e33-865e28d9428f 502 1873 334 completed
-
-###Stopping Rebalance Operation
-
-You can stop the rebalance operation, as needed.
-
-- Stop the rebalance operation using the following command:
-
- `# gluster volume rebalance stop`
-
- For example:
-
- # gluster volume rebalance test-volume stop
- Node Rebalanced-files size scanned status
- --------- ---------------- ---- ------- -----------
- 617c923e-6450-4065-8e33-865e28d9428f 59 590 244 stopped
- Stopped rebalance process on volume test-volume
-
-<a name="stopping-volumes" />
-##Stopping Volumes
-
-1. Stop the volume using the following command:
-
- `# gluster volume stop `
-
- For example, to stop test-volume:
-
- # gluster volume stop test-volume
- Stopping volume will make its data inaccessible. Do you want to continue? (y/n)
-
-2. Enter `y` to confirm the operation. The output of the command
- displays the following:
-
- Stopping volume test-volume has been successful
-
-<a name="deleting-volumes" />
-##Deleting Volumes
-
-1. Delete the volume using the following command:
-
- `# gluster volume delete `
-
- For example, to delete test-volume:
-
- # gluster volume delete test-volume
- Deleting volume will erase all information about the volume. Do you want to continue? (y/n)
-
-2. Enter `y` to confirm the operation. The command displays the
- following:
-
- Deleting volume test-volume has been successful
-
-<a name="triggering-self-heal-on-replicate" />
-##Triggering Self-Heal on Replicate
-
-In replicate module, previously you had to manually trigger a self-heal
-when a brick goes offline and comes back online, to bring all the
-replicas in sync. Now the pro-active self-heal daemon runs in the
-background, diagnoses issues and automatically initiates self-healing
-every 10 minutes on the files which requires*healing*.
-
-You can view the list of files that need *healing*, the list of files
-which are currently/previously *healed*, list of files which are in
-split-brain state, and you can manually trigger self-heal on the entire
-volume or only on the files which need *healing*.
-
-- Trigger self-heal only on the files which requires *healing*:
-
- `# gluster volume heal `
-
- For example, to trigger self-heal on files which requires *healing*
- of test-volume:
-
- # gluster volume heal test-volume
- Heal operation on volume test-volume has been successful
-
-- Trigger self-heal on all the files of a volume:
-
- `# gluster volume heal full`
-
- For example, to trigger self-heal on all the files of of
- test-volume:
-
- # gluster volume heal test-volume full
- Heal operation on volume test-volume has been successful
-
-- View the list of files that needs *healing*:
-
- `# gluster volume heal info`
-
- For example, to view the list of files on test-volume that needs
- *healing*:
-
- # gluster volume heal test-volume info
- Brick :/gfs/test-volume_0
- Number of entries: 0
-
- Brick :/gfs/test-volume_1
- Number of entries: 101
- /95.txt
- /32.txt
- /66.txt
- /35.txt
- /18.txt
- /26.txt
- /47.txt
- /55.txt
- /85.txt
- ...
-
-- View the list of files that are self-healed:
-
- `# gluster volume heal info healed`
-
- For example, to view the list of files on test-volume that are
- self-healed:
-
- # gluster volume heal test-volume info healed
- Brick :/gfs/test-volume_0
- Number of entries: 0
-
- Brick :/gfs/test-volume_1
- Number of entries: 69
- /99.txt
- /93.txt
- /76.txt
- /11.txt
- /27.txt
- /64.txt
- /80.txt
- /19.txt
- /41.txt
- /29.txt
- /37.txt
- /46.txt
- ...
-
-- View the list of files of a particular volume on which the self-heal
- failed:
-
- `# gluster volume heal info failed`
-
- For example, to view the list of files of test-volume that are not
- self-healed:
-
- # gluster volume heal test-volume info failed
- Brick :/gfs/test-volume_0
- Number of entries: 0
-
- Brick server2:/gfs/test-volume_3
- Number of entries: 72
- /90.txt
- /95.txt
- /77.txt
- /71.txt
- /87.txt
- /24.txt
- ...
-
-- View the list of files of a particular volume which are in
- split-brain state:
-
- `# gluster volume heal info split-brain`
-
- For example, to view the list of files of test-volume which are in
- split-brain state:
-
- # gluster volume heal test-volume info split-brain
- Brick server1:/gfs/test-volume_2
- Number of entries: 12
- /83.txt
- /28.txt
- /69.txt
- ...
-
- Brick :/gfs/test-volume_2
- Number of entries: 12
- /83.txt
- /28.txt
- /69.txt
- ...
-
-<a name="non-uniform-file-allocation" />
-##Non Uniform File Allocation
-
-NUFA translator or Non Uniform File Access translator is designed for giving higher preference
-to a local drive when used in a HPC type of environment. It can be applied to Distribute and Replica translators;
-in the latter case it ensures that *one* copy is local if space permits.
-
-When a client on a server creates files, the files are allocated to a brick in the volume based on the file name.
-This allocation may not be ideal, as there is higher latency and unnecessary network traffic for read/write operations
-to a non-local brick or export directory. NUFA ensures that the files are created in the local export directory
-of the server, and as a result, reduces latency and conserves bandwidth for that server accessing that file.
-This can also be useful for applications running on mount points on the storage server.
-
-If the local brick runs out of space or reaches the minimum disk free limit, instead of allocating files
-to the local brick, NUFA distributes files to other bricks in the same volume if there is
-space available on those bricks.
-
-NUFA should be enabled before creating any data in the volume.
-
-Use the following command to enable NUFA:
-
- # gluster volume set VOLNAME cluster.nufa enable on
-
-**Important**
-
-NUFA is supported under the following conditions:
-
-- Volumes with only with one brick per server.
-- For use with a FUSE client.NUFA is not supported with NFS or SMB.
-- A client that is mounting a NUFA-enabled volume must be present within the trusted storage pool.
-
-The NUFA scheduler also exists, for use with the Unify translator; see below.
-
- volume bricks
- type cluster/nufa
- option local-volume-name brick1
- subvolumes brick1 brick2 brick3 brick4 brick5 brick6 brick7
- end-volume
-
-#####NUFA additional options
-
-- lookup-unhashed
-
- This is an advanced option where files are looked up in all subvolumes if they are missing on the subvolume matching the hash value of the filename. The default is on.
-
-- local-volume-name
-
- The volume name to consider local and prefer file creations on. The default is to search for a volume matching the hostname of the system.
-
-- subvolumes
-
- This option lists the subvolumes that are part of this 'cluster/nufa' volume. This translator requires more than one subvolume.
-
-<a name="bitrot-detection" />
-##BitRot Detection
-
-With BitRot detection in Gluster, it's possible to identify "insidious" type of disk
-errors where data is silently corrupted with no indication from the disk to the storage
-software layer than an error has occured. This also helps in catching "backend" tinkering
-of bricks (where data is directly manipulated on the bricks without going through FUSE,
-NFS or any other access protocol(s).
-
-BitRot detection is disbled by default and needs to be enabled to make use of other
-sub-commands.
-
-1. To enable bitrot detection for a given volume <VOLNAME>:
-
- `# gluster volume bitrot <VOLNAME> enable`
-
- and similarly to disable bitrot use:
-
- `# gluster volume bitrot <VOLNAME> disable`
-
-NOTE: Enabling bitrot spanws the Signer & Scrubber daemon per node. Signer is responsible
- for signing (calculating checksum for each file) an object and scrubber verifies the
- calculated checksum against the objects data.
-
-2. Scrubber daemon has three (3) throttling modes that adjusts the rate at which objects
- are verified.
-
- `# volume bitrot <VOLNAME> scrub-throttle lazy`
-
- `# volume bitrot <VOLNAME> scrub-throttle normal`
-
- `# volume bitrot <VOLNAME> scrub-throttle aggressive`
-
-3. By default scrubber scrubs the filesystem biweekly. It's possible to tune it to scrub
- based on predefined frequency such as monthly, etc. This can be done as shown below:
-
- `# volume bitrot <VOLNAME> scrub-frequency daily`
-
- `# volume bitrot <VOLNAME> scrub-frequency weekly`
-
- `# volume bitrot <VOLNAME> scrub-frequency biweekly`
-
- `# volume bitrot <VOLNAME> scrub-frequency monthly`
-
-NOTE: Daily scrubbing would not be available with GA release.
-
-4. Scrubber daemon can be paused and later resumed when required. This can be done as
- shown below:
-
- `# volume bitrot <VOLNAME> scrub pause`
-
-and to resume scrubbing
-
- `# volume bitrot <VOLNAME> scrub resume`
-
-NOTE: Signing cannot be paused (and resumed) and would always be active as long as
- bitrot is enabled for that particular volume.
diff --git a/doc/admin-guide/en-US/markdown/admin_monitoring_workload.md b/doc/admin-guide/en-US/markdown/admin_monitoring_workload.md
deleted file mode 100644
index c3ac0609b99..00000000000
--- a/doc/admin-guide/en-US/markdown/admin_monitoring_workload.md
+++ /dev/null
@@ -1,893 +0,0 @@
-#Monitoring your GlusterFS Workload
-
-You can monitor the GlusterFS volumes on different parameters.
-Monitoring volumes helps in capacity planning and performance tuning
-tasks of the GlusterFS volume. Using these information, you can identify
-and troubleshoot issues.
-
-You can use Volume Top and Profile commands to view the performance and
-identify bottlenecks/hotspots of each brick of a volume. This helps
-system administrators to get vital performance information whenever
-performance needs to be probed.
-
-You can also perform statedump of the brick processes and nfs server
-process of a volume, and also view volume status and volume information.
-
-##Running GlusterFS Volume Profile Command
-
-GlusterFS Volume Profile command provides an interface to get the
-per-brick I/O information for each File Operation (FOP) of a volume. The
-per brick information helps in identifying bottlenecks in the storage
-system.
-
-This section describes how to run GlusterFS Volume Profile command by
-performing the following operations:
-
-- [Start Profiling](#start-profiling)
-- [Displaying the I/0 Information](#displaying-io)
-- [Stop Profiling](#stop-profiling)
-
-<a name="start-profiling" />
-###Start Profiling
-
-You must start the Profiling to view the File Operation information for
-each brick.
-
-To start profiling, use following command:
-
-`# gluster volume profile start `
-
-For example, to start profiling on test-volume:
-
- # gluster volume profile test-volume start
- Profiling started on test-volume
-
-When profiling on the volume is started, the following additional
-options are displayed in the Volume Info:
-
- diagnostics.count-fop-hits: on
- diagnostics.latency-measurement: on
-
-<a name="displaying-io" />
-###Displaying the I/0 Information
-
-You can view the I/O information of each brick by using the following command:
-
-`# gluster volume profile info`
-
-For example, to see the I/O information on test-volume:
-
- # gluster volume profile test-volume info
- Brick: Test:/export/2
- Cumulative Stats:
-
- Block 1b+ 32b+ 64b+
- Size:
- Read: 0 0 0
- Write: 908 28 8
-
- Block 128b+ 256b+ 512b+
- Size:
- Read: 0 6 4
- Write: 5 23 16
-
- Block 1024b+ 2048b+ 4096b+
- Size:
- Read: 0 52 17
- Write: 15 120 846
-
- Block 8192b+ 16384b+ 32768b+
- Size:
- Read: 52 8 34
- Write: 234 134 286
-
- Block 65536b+ 131072b+
- Size:
- Read: 118 622
- Write: 1341 594
-
-
- %-latency Avg- Min- Max- calls Fop
- latency Latency Latency
- ___________________________________________________________
- 4.82 1132.28 21.00 800970.00 4575 WRITE
- 5.70 156.47 9.00 665085.00 39163 READDIRP
- 11.35 315.02 9.00 1433947.00 38698 LOOKUP
- 11.88 1729.34 21.00 2569638.00 7382 FXATTROP
- 47.35 104235.02 2485.00 7789367.00 488 FSYNC
-
- ------------------
-
- ------------------
-
- Duration : 335
-
- BytesRead : 94505058
-
- BytesWritten : 195571980
-
-<a name="stop-profiling" />
-###Stop Profiling
-
-You can stop profiling the volume, if you do not need profiling
-information anymore.
-
-Stop profiling using the following command:
-
- `# gluster volume profile stop`
-
-For example, to stop profiling on test-volume:
-
- `# gluster volume profile stop`
-
- `Profiling stopped on test-volume`
-
-##Running GlusterFS Volume TOP Command
-
-GlusterFS Volume Top command allows you to view the glusterfs bricks’
-performance metrics like read, write, file open calls, file read calls,
-file write calls, directory open calls, and directory real calls. The
-top command displays up to 100 results.
-
-This section describes how to run and view the results for the following
-GlusterFS Top commands:
-
-- [Viewing Open fd Count and Maximum fd Count](#open-fd-count)
-- [Viewing Highest File Read Calls](#file-read)
-- [Viewing Highest File Write Calls](#file-write)
-- [Viewing Highest Open Calls on Directories](#open-dir)
-- [Viewing Highest Read Calls on Directory](#read-dir)
-- [Viewing List of Read Performance on each Brick](#read-perf)
-- [Viewing List of Write Performance on each Brick](#write-perf)
-
-<a name="open-fd-count" />
-###Viewing Open fd Count and Maximum fd Count
-
-You can view both current open fd count (list of files that are
-currently the most opened and the count) on the brick and the maximum
-open fd count (count of files that are the currently open and the count
-of maximum number of files opened at any given point of time, since the
-servers are up and running). If the brick name is not specified, then
-open fd metrics of all the bricks belonging to the volume will be
-displayed.
-
-- View open fd count and maximum fd count using the following command:
-
- `# gluster volume top open [brick ] [list-cnt ]`
-
- For example, to view open fd count and maximum fd count on brick
- server:/export of test-volume and list top 10 open calls:
-
- `# gluster volume top open brick list-cnt `
-
- `Brick: server:/export/dir1 `
-
- `Current open fd's: 34 Max open fd's: 209 `
-
- ==========Open file stats========
-
- open file name
- call count
-
- 2 /clients/client0/~dmtmp/PARADOX/
- COURSES.DB
-
- 11 /clients/client0/~dmtmp/PARADOX/
- ENROLL.DB
-
- 11 /clients/client0/~dmtmp/PARADOX/
- STUDENTS.DB
-
- 10 /clients/client0/~dmtmp/PWRPNT/
- TIPS.PPT
-
- 10 /clients/client0/~dmtmp/PWRPNT/
- PCBENCHM.PPT
-
- 9 /clients/client7/~dmtmp/PARADOX/
- STUDENTS.DB
-
- 9 /clients/client1/~dmtmp/PARADOX/
- STUDENTS.DB
-
- 9 /clients/client2/~dmtmp/PARADOX/
- STUDENTS.DB
-
- 9 /clients/client0/~dmtmp/PARADOX/
- STUDENTS.DB
-
- 9 /clients/client8/~dmtmp/PARADOX/
- STUDENTS.DB
-
-<a name="file-read" />
-###Viewing Highest File Read Calls
-
-You can view highest read calls on each brick. If brick name is not
-specified, then by default, list of 100 files will be displayed.
-
-- View highest file Read calls using the following command:
-
- `# gluster volume top read [brick ] [list-cnt ] `
-
- For example, to view highest Read calls on brick server:/export of
- test-volume:
-
- `# gluster volume top read brick list-cnt `
-
- `Brick:` server:/export/dir1
-
- ==========Read file stats========
-
- read filename
- call count
-
- 116 /clients/client0/~dmtmp/SEED/LARGE.FIL
-
- 64 /clients/client0/~dmtmp/SEED/MEDIUM.FIL
-
- 54 /clients/client2/~dmtmp/SEED/LARGE.FIL
-
- 54 /clients/client6/~dmtmp/SEED/LARGE.FIL
-
- 54 /clients/client5/~dmtmp/SEED/LARGE.FIL
-
- 54 /clients/client0/~dmtmp/SEED/LARGE.FIL
-
- 54 /clients/client3/~dmtmp/SEED/LARGE.FIL
-
- 54 /clients/client4/~dmtmp/SEED/LARGE.FIL
-
- 54 /clients/client9/~dmtmp/SEED/LARGE.FIL
-
- 54 /clients/client8/~dmtmp/SEED/LARGE.FIL
-
-<a name="file-write" />
-###Viewing Highest File Write Calls
-
-You can view list of files which has highest file write calls on each
-brick. If brick name is not specified, then by default, list of 100
-files will be displayed.
-
-- View highest file Write calls using the following command:
-
- `# gluster volume top write [brick ] [list-cnt ] `
-
- For example, to view highest Write calls on brick server:/export of
- test-volume:
-
- `# gluster volume top write brick list-cnt `
-
- `Brick: server:/export/dir1 `
-
- ==========Write file stats========
- write call count filename
-
- 83 /clients/client0/~dmtmp/SEED/LARGE.FIL
-
- 59 /clients/client7/~dmtmp/SEED/LARGE.FIL
-
- 59 /clients/client1/~dmtmp/SEED/LARGE.FIL
-
- 59 /clients/client2/~dmtmp/SEED/LARGE.FIL
-
- 59 /clients/client0/~dmtmp/SEED/LARGE.FIL
-
- 59 /clients/client8/~dmtmp/SEED/LARGE.FIL
-
- 59 /clients/client5/~dmtmp/SEED/LARGE.FIL
-
- 59 /clients/client4/~dmtmp/SEED/LARGE.FIL
-
- 59 /clients/client6/~dmtmp/SEED/LARGE.FIL
-
- 59 /clients/client3/~dmtmp/SEED/LARGE.FIL
-
-<a name="open-dir" />
-###Viewing Highest Open Calls on Directories
-
-You can view list of files which has highest open calls on directories
-of each brick. If brick name is not specified, then the metrics of all
-the bricks belonging to that volume will be displayed.
-
-- View list of open calls on each directory using the following
- command:
-
- `# gluster volume top opendir [brick ] [list-cnt ] `
-
- For example, to view open calls on brick server:/export/ of
- test-volume:
-
- `# gluster volume top opendir brick list-cnt `
-
- `Brick: server:/export/dir1 `
-
- ==========Directory open stats========
-
- Opendir count directory name
-
- 1001 /clients/client0/~dmtmp
-
- 454 /clients/client8/~dmtmp
-
- 454 /clients/client2/~dmtmp
-
- 454 /clients/client6/~dmtmp
-
- 454 /clients/client5/~dmtmp
-
- 454 /clients/client9/~dmtmp
-
- 443 /clients/client0/~dmtmp/PARADOX
-
- 408 /clients/client1/~dmtmp
-
- 408 /clients/client7/~dmtmp
-
- 402 /clients/client4/~dmtmp
-
-<a name="read-dir" />
-###Viewing Highest Read Calls on Directory
-
-You can view list of files which has highest directory read calls on
-each brick. If brick name is not specified, then the metrics of all the
-bricks belonging to that volume will be displayed.
-
-- View list of highest directory read calls on each brick using the
- following command:
-
- `# gluster volume top readdir [brick ] [list-cnt ] `
-
- For example, to view highest directory read calls on brick
- server:/export of test-volume:
-
- `# gluster volume top readdir brick list-cnt `
-
- `Brick: `
-
- ==========Directory readdirp stats========
-
- readdirp count directory name
-
- 1996 /clients/client0/~dmtmp
-
- 1083 /clients/client0/~dmtmp/PARADOX
-
- 904 /clients/client8/~dmtmp
-
- 904 /clients/client2/~dmtmp
-
- 904 /clients/client6/~dmtmp
-
- 904 /clients/client5/~dmtmp
-
- 904 /clients/client9/~dmtmp
-
- 812 /clients/client1/~dmtmp
-
- 812 /clients/client7/~dmtmp
-
- 800 /clients/client4/~dmtmp
-
-<a name="read-perf" />
-###Viewing List of Read Performance on each Brick
-
-You can view the read throughput of files on each brick. If brick name
-is not specified, then the metrics of all the bricks belonging to that
-volume will be displayed. The output will be the read throughput.
-
- ==========Read throughput file stats========
-
- read filename Time
- through
- put(MBp
- s)
-
- 2570.00 /clients/client0/~dmtmp/PWRPNT/ -2011-01-31
- TRIDOTS.POT 15:38:36.894610
- 2570.00 /clients/client0/~dmtmp/PWRPNT/ -2011-01-31
- PCBENCHM.PPT 15:38:39.815310
- 2383.00 /clients/client2/~dmtmp/SEED/ -2011-01-31
- MEDIUM.FIL 15:52:53.631499
-
- 2340.00 /clients/client0/~dmtmp/SEED/ -2011-01-31
- MEDIUM.FIL 15:38:36.926198
-
- 2299.00 /clients/client0/~dmtmp/SEED/ -2011-01-31
- LARGE.FIL 15:38:36.930445
-
- 2259.00 /clients/client0/~dmtmp/PARADOX/ -2011-01-31
- COURSES.X04 15:38:40.549919
-
- 2221.00 /clients/client0/~dmtmp/PARADOX/ -2011-01-31
- STUDENTS.VAL 15:52:53.298766
-
- 2221.00 /clients/client3/~dmtmp/SEED/ -2011-01-31
- COURSES.DB 15:39:11.776780
-
- 2184.00 /clients/client3/~dmtmp/SEED/ -2011-01-31
- MEDIUM.FIL 15:39:10.251764
-
- 2184.00 /clients/client5/~dmtmp/WORD/ -2011-01-31
- BASEMACH.DOC 15:39:09.336572
-
-This command will initiate a dd for the specified count and block size
-and measures the corresponding throughput.
-
-- View list of read performance on each brick using the following
- command:
-
- `# gluster volume top read-perf [bs count ] [brick ] [list-cnt ]`
-
- For example, to view read performance on brick server:/export/ of
- test-volume, 256 block size of count 1, and list count 10:
-
- `# gluster volume top read-perf bs 256 count 1 brick list-cnt `
-
- `Brick: server:/export/dir1 256 bytes (256 B) copied, Throughput: 4.1 MB/s `
-
- ==========Read throughput file stats========
-
- read filename Time
- through
- put(MBp
- s)
-
- 2912.00 /clients/client0/~dmtmp/PWRPNT/ -2011-01-31
- TRIDOTS.POT 15:38:36.896486
-
- 2570.00 /clients/client0/~dmtmp/PWRPNT/ -2011-01-31
- PCBENCHM.PPT 15:38:39.815310
-
- 2383.00 /clients/client2/~dmtmp/SEED/ -2011-01-31
- MEDIUM.FIL 15:52:53.631499
-
- 2340.00 /clients/client0/~dmtmp/SEED/ -2011-01-31
- MEDIUM.FIL 15:38:36.926198
-
- 2299.00 /clients/client0/~dmtmp/SEED/ -2011-01-31
- LARGE.FIL 15:38:36.930445
-
- 2259.00 /clients/client0/~dmtmp/PARADOX/ -2011-01-31
- COURSES.X04 15:38:40.549919
-
- 2221.00 /clients/client9/~dmtmp/PARADOX/ -2011-01-31
- STUDENTS.VAL 15:52:53.298766
-
- 2221.00 /clients/client8/~dmtmp/PARADOX/ -2011-01-31
- COURSES.DB 15:39:11.776780
-
- 2184.00 /clients/client3/~dmtmp/SEED/ -2011-01-31
- MEDIUM.FIL 15:39:10.251764
-
- 2184.00 /clients/client5/~dmtmp/WORD/ -2011-01-31
- BASEMACH.DOC 15:39:09.336572
-
-<a name="write-perf" />
-###Viewing List of Write Performance on each Brick
-
-You can view list of write throughput of files on each brick. If brick
-name is not specified, then the metrics of all the bricks belonging to
-that volume will be displayed. The output will be the write throughput.
-
-This command will initiate a dd for the specified count and block size
-and measures the corresponding throughput. To view list of write
-performance on each brick:
-
-- View list of write performance on each brick using the following
- command:
-
- `# gluster volume top write-perf [bs count ] [brick ] [list-cnt ] `
-
- For example, to view write performance on brick server:/export/ of
- test-volume, 256 block size of count 1, and list count 10:
-
- `# gluster volume top write-perf bs 256 count 1 brick list-cnt `
-
- `Brick`: server:/export/dir1
-
- `256 bytes (256 B) copied, Throughput: 2.8 MB/s `
-
- ==========Write throughput file stats========
-
- write filename Time
- throughput
- (MBps)
-
- 1170.00 /clients/client0/~dmtmp/SEED/ -2011-01-31
- SMALL.FIL 15:39:09.171494
-
- 1008.00 /clients/client6/~dmtmp/SEED/ -2011-01-31
- LARGE.FIL 15:39:09.73189
-
- 949.00 /clients/client0/~dmtmp/SEED/ -2011-01-31
- MEDIUM.FIL 15:38:36.927426
-
- 936.00 /clients/client0/~dmtmp/SEED/ -2011-01-31
- LARGE.FIL 15:38:36.933177
- 897.00 /clients/client5/~dmtmp/SEED/ -2011-01-31
- MEDIUM.FIL 15:39:09.33628
-
- 897.00 /clients/client6/~dmtmp/SEED/ -2011-01-31
- MEDIUM.FIL 15:39:09.27713
-
- 885.00 /clients/client0/~dmtmp/SEED/ -2011-01-31
- SMALL.FIL 15:38:36.924271
-
- 528.00 /clients/client5/~dmtmp/SEED/ -2011-01-31
- LARGE.FIL 15:39:09.81893
-
- 516.00 /clients/client6/~dmtmp/ACCESS/ -2011-01-31
- FASTENER.MDB 15:39:01.797317
-
-##Displaying Volume Information
-
-You can display information about a specific volume, or all volumes, as
-needed.
-
-- Display information about a specific volume using the following
- command:
-
- `# gluster volume info ``VOLNAME`
-
- For example, to display information about test-volume:
-
- # gluster volume info test-volume
- Volume Name: test-volume
- Type: Distribute
- Status: Created
- Number of Bricks: 4
- Bricks:
- Brick1: server1:/exp1
- Brick2: server2:/exp2
- Brick3: server3:/exp3
- Brick4: server4:/exp4
-
-- Display information about all volumes using the following command:
-
- `# gluster volume info all`
-
- # gluster volume info all
-
- Volume Name: test-volume
- Type: Distribute
- Status: Created
- Number of Bricks: 4
- Bricks:
- Brick1: server1:/exp1
- Brick2: server2:/exp2
- Brick3: server3:/exp3
- Brick4: server4:/exp4
-
- Volume Name: mirror
- Type: Distributed-Replicate
- Status: Started
- Number of Bricks: 2 X 2 = 4
- Bricks:
- Brick1: server1:/brick1
- Brick2: server2:/brick2
- Brick3: server3:/brick3
- Brick4: server4:/brick4
-
- Volume Name: Vol
- Type: Distribute
- Status: Started
- Number of Bricks: 1
- Bricks:
- Brick: server:/brick6
-
-##Performing Statedump on a Volume
-
-Statedump is a mechanism through which you can get details of all
-internal variables and state of the glusterfs process at the time of
-issuing the command.You can perform statedumps of the brick processes
-and nfs server process of a volume using the statedump command. The
-following options can be used to determine what information is to be
-dumped:
-
-- **mem** - Dumps the memory usage and memory pool details of the
- bricks.
-
-- **iobuf** - Dumps iobuf details of the bricks.
-
-- **priv** - Dumps private information of loaded translators.
-
-- **callpool** - Dumps the pending calls of the volume.
-
-- **fd** - Dumps the open fd tables of the volume.
-
-- **inode** - Dumps the inode tables of the volume.
-
-**To display volume statedump**
-
-- Display statedump of a volume or NFS server using the following
- command:
-
- `# gluster volume statedump [nfs] [all|mem|iobuf|callpool|priv|fd|inode]`
-
- For example, to display statedump of test-volume:
-
- # gluster volume statedump test-volume
- Volume statedump successful
-
- The statedump files are created on the brick servers in the` /tmp`
- directory or in the directory set using `server.statedump-path`
- volume option. The naming convention of the dump file is
- `<brick-path>.<brick-pid>.dump`.
-
-- By defult, the output of the statedump is stored at
- ` /tmp/<brickname.PID.dump>` file on that particular server. Change
- the directory of the statedump file using the following command:
-
- `# gluster volume set server.statedump-path `
-
- For example, to change the location of the statedump file of
- test-volume:
-
- # gluster volume set test-volume server.statedump-path /usr/local/var/log/glusterfs/dumps/
- Set volume successful
-
- You can view the changed path of the statedump file using the
- following command:
-
- `# gluster volume info `
-
-##Displaying Volume Status
-
-You can display the status information about a specific volume, brick or
-all volumes, as needed. Status information can be used to understand the
-current status of the brick, nfs processes, and overall file system.
-Status information can also be used to monitor and debug the volume
-information. You can view status of the volume along with the following
-details:
-
-- **detail** - Displays additional information about the bricks.
-
-- **clients** - Displays the list of clients connected to the volume.
-
-- **mem** - Displays the memory usage and memory pool details of the
- bricks.
-
-- **inode** - Displays the inode tables of the volume.
-
-- **fd** - Displays the open fd (file descriptors) tables of the
- volume.
-
-- **callpool** - Displays the pending calls of the volume.
-
-**To display volume status**
-
-- Display information about a specific volume using the following
- command:
-
- `# gluster volume status [all| []] [detail|clients|mem|inode|fd|callpool]`
-
- For example, to display information about test-volume:
-
- # gluster volume status test-volume
- STATUS OF VOLUME: test-volume
- BRICK PORT ONLINE PID
- --------------------------------------------------------
- arch:/export/1 24009 Y 22445
- --------------------------------------------------------
- arch:/export/2 24010 Y 22450
-
-- Display information about all volumes using the following command:
-
- `# gluster volume status all`
-
- # gluster volume status all
- STATUS OF VOLUME: volume-test
- BRICK PORT ONLINE PID
- --------------------------------------------------------
- arch:/export/4 24010 Y 22455
-
- STATUS OF VOLUME: test-volume
- BRICK PORT ONLINE PID
- --------------------------------------------------------
- arch:/export/1 24009 Y 22445
- --------------------------------------------------------
- arch:/export/2 24010 Y 22450
-
-- Display additional information about the bricks using the following
- command:
-
- `# gluster volume status detail`
-
- For example, to display additional information about the bricks of
- test-volume:
-
- # gluster volume status test-volume details
- STATUS OF VOLUME: test-volume
- -------------------------------------------
- Brick : arch:/export/1
- Port : 24009
- Online : Y
- Pid : 16977
- File System : rootfs
- Device : rootfs
- Mount Options : rw
- Disk Space Free : 13.8GB
- Total Disk Space : 46.5GB
- Inode Size : N/A
- Inode Count : N/A
- Free Inodes : N/A
-
- Number of Bricks: 1
- Bricks:
- Brick: server:/brick6
-
-- Display the list of clients accessing the volumes using the
- following command:
-
- `# gluster volume status clients`
-
- For example, to display the list of clients connected to
- test-volume:
-
- # gluster volume status test-volume clients
- Brick : arch:/export/1
- Clients connected : 2
- Hostname Bytes Read BytesWritten
- -------- --------- ------------
- 127.0.0.1:1013 776 676
- 127.0.0.1:1012 50440 51200
-
-- Display the memory usage and memory pool details of the bricks using
- the following command:
-
- `# gluster volume status mem`
-
- For example, to display the memory usage and memory pool details of
- the bricks of test-volume:
-
- Memory status for volume : test-volume
- ----------------------------------------------
- Brick : arch:/export/1
- Mallinfo
- --------
- Arena : 434176
- Ordblks : 2
- Smblks : 0
- Hblks : 12
- Hblkhd : 40861696
- Usmblks : 0
- Fsmblks : 0
- Uordblks : 332416
- Fordblks : 101760
- Keepcost : 100400
-
- Mempool Stats
- -------------
- Name HotCount ColdCount PaddedSizeof AllocCount MaxAlloc
- ---- -------- --------- ------------ ---------- --------
- test-volume-server:fd_t 0 16384 92 57 5
- test-volume-server:dentry_t 59 965 84 59 59
- test-volume-server:inode_t 60 964 148 60 60
- test-volume-server:rpcsvc_request_t 0 525 6372 351 2
- glusterfs:struct saved_frame 0 4096 124 2 2
- glusterfs:struct rpc_req 0 4096 2236 2 2
- glusterfs:rpcsvc_request_t 1 524 6372 2 1
- glusterfs:call_stub_t 0 1024 1220 288 1
- glusterfs:call_stack_t 0 8192 2084 290 2
- glusterfs:call_frame_t 0 16384 172 1728 6
-
-- Display the inode tables of the volume using the following command:
-
- `# gluster volume status inode`
-
- For example, to display the inode tables of the test-volume:
-
- # gluster volume status test-volume inode
- inode tables for volume test-volume
- ----------------------------------------------
- Brick : arch:/export/1
- Active inodes:
- GFID Lookups Ref IA type
- ---- ------- --- -------
- 6f3fe173-e07a-4209-abb6-484091d75499 1 9 2
- 370d35d7-657e-44dc-bac4-d6dd800ec3d3 1 1 2
-
- LRU inodes:
- GFID Lookups Ref IA type
- ---- ------- --- -------
- 80f98abe-cdcf-4c1d-b917-ae564cf55763 1 0 1
- 3a58973d-d549-4ea6-9977-9aa218f233de 1 0 1
- 2ce0197d-87a9-451b-9094-9baa38121155 1 0 2
-
-- Display the open fd tables of the volume using the following
- command:
-
- `# gluster volume status fd`
-
- For example, to display the open fd tables of the test-volume:
-
- # gluster volume status test-volume fd
-
- FD tables for volume test-volume
- ----------------------------------------------
- Brick : arch:/export/1
- Connection 1:
- RefCount = 0 MaxFDs = 128 FirstFree = 4
- FD Entry PID RefCount Flags
- -------- --- -------- -----
- 0 26311 1 2
- 1 26310 3 2
- 2 26310 1 2
- 3 26311 3 2
-
- Connection 2:
- RefCount = 0 MaxFDs = 128 FirstFree = 0
- No open fds
-
- Connection 3:
- RefCount = 0 MaxFDs = 128 FirstFree = 0
- No open fds
-
-- Display the pending calls of the volume using the following command:
-
- `# gluster volume status callpool`
-
- Each call has a call stack containing call frames.
-
- For example, to display the pending calls of test-volume:
-
- # gluster volume status test-volume
-
- Pending calls for volume test-volume
- ----------------------------------------------
- Brick : arch:/export/1
- Pending calls: 2
- Call Stack1
- UID : 0
- GID : 0
- PID : 26338
- Unique : 192138
- Frames : 7
- Frame 1
- Ref Count = 1
- Translator = test-volume-server
- Completed = No
- Frame 2
- Ref Count = 0
- Translator = test-volume-posix
- Completed = No
- Parent = test-volume-access-control
- Wind From = default_fsync
- Wind To = FIRST_CHILD(this)->fops->fsync
- Frame 3
- Ref Count = 1
- Translator = test-volume-access-control
- Completed = No
- Parent = repl-locks
- Wind From = default_fsync
- Wind To = FIRST_CHILD(this)->fops->fsync
- Frame 4
- Ref Count = 1
- Translator = test-volume-locks
- Completed = No
- Parent = test-volume-io-threads
- Wind From = iot_fsync_wrapper
- Wind To = FIRST_CHILD (this)->fops->fsync
- Frame 5
- Ref Count = 1
- Translator = test-volume-io-threads
- Completed = No
- Parent = test-volume-marker
- Wind From = default_fsync
- Wind To = FIRST_CHILD(this)->fops->fsync
- Frame 6
- Ref Count = 1
- Translator = test-volume-marker
- Completed = No
- Parent = /export/1
- Wind From = io_stats_fsync
- Wind To = FIRST_CHILD(this)->fops->fsync
- Frame 7
- Ref Count = 1
- Translator = /export/1
- Completed = No
- Parent = test-volume-server
- Wind From = server_fsync_resume
- Wind To = bound_xl->fops->fsync
-
-
diff --git a/doc/admin-guide/en-US/markdown/admin_object_storage.md b/doc/admin-guide/en-US/markdown/admin_object_storage.md
deleted file mode 100644
index 71edab64536..00000000000
--- a/doc/admin-guide/en-US/markdown/admin_object_storage.md
+++ /dev/null
@@ -1,26 +0,0 @@
-# SwiftOnFile
-
-SwiftOnFile project enables GlusterFS volume to be used as backend for Openstack
-Swift - a distributed object store. This allows objects PUT over Swift's RESTful
-API to be accessed as files over filesystem interface and vice versa i.e files
-created over filesystem interface (NFS/FUSE/native) can be accessed as objects
-over Swift's RESTful API.
-
-SwiftOnFile project was formerly known as `gluster-swift` and also as `UFO
-(Unified File and Object)` before that. More information about SwiftOnFile can
-be found [here](https://github.com/swiftonfile/swiftonfile/blob/master/doc/markdown/quick_start_guide.md).
-There are differences in working of gluster-swift (now obsolete) and swiftonfile
-projects. The older gluster-swift code and relevant documentation can be found
-in [icehouse branch](https://github.com/swiftonfile/swiftonfile/tree/icehouse)
-of swiftonfile repo.
-
-## SwiftOnFile vs gluster-swift
-
-| Gluster-Swift | SwiftOnFile |
-|:---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------:|:-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------:|
-| One GlusterFS volume maps to and stores only one Swift account. Mountpoint Hierarchy: `container/object` | One GlusterFS volume or XFS partition can have multiple accounts. Mountpoint Hierarchy: `acc/container/object` |
-| Over-rides account server, container server and object server. We need to keep in sync with upstream Swift and often may need code changes or workarounds to support new Swift features | Implements only object-server. Very less need to catch-up to Swift as new features at proxy,container and account level would very likely be compatible with SwiftOnFile as it's just a storage policy. |
-| Does not use DBs for accounts and container.A container listing involves a filesystem crawl.A HEAD on account/container gives inaccurate or stale results without FS crawl. | Uses Swift's DBs to store account and container information. An account or container listing does not involve FS crawl. Accurate info on HEAD to account/container – ability to support account quotas. |
-| GET on a container and account lists actual files in filesystem. | GET on a container and account only lists objects PUT over Swift. Files created over filesystem interface do not appear in container and object listings. |
-| Standalone deployment required and does not integrate with existing Swift cluster. | Integrates with any existing Swift deployment as a Storage Policy. |
-
diff --git a/doc/admin-guide/en-US/markdown/admin_puppet.md b/doc/admin-guide/en-US/markdown/admin_puppet.md
deleted file mode 100644
index 103449be0e7..00000000000
--- a/doc/admin-guide/en-US/markdown/admin_puppet.md
+++ /dev/null
@@ -1,499 +0,0 @@
-#Puppet-Gluster
-<!---
-GlusterFS module by James
-Copyright (C) 2010-2013+ James Shubin
-Written by James Shubin <james@shubin.ca>
-
-This program is free software: you can redistribute it and/or modify
-it under the terms of the GNU Affero General Public License as published by
-the Free Software Foundation, either version 3 of the License, or
-(at your option) any later version.
-
-This program is distributed in the hope that it will be useful,
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU Affero General Public License for more details.
-
-You should have received a copy of the GNU Affero General Public License
-along with this program. If not, see <http://www.gnu.org/licenses/>.
--->
-##A GlusterFS Puppet module by [James](https://ttboj.wordpress.com/)
-####Available from:
-####[https://github.com/purpleidea/puppet-gluster/](https://github.com/purpleidea/puppet-gluster/)
-
-####Also available from:
-####[https://forge.gluster.org/puppet-gluster/](https://forge.gluster.org/puppet-gluster/)
-
-####Table of Contents
-
-1. [Overview](#overview)
-2. [Module description - What the module does](#module-description)
-3. [Setup - Getting started with Puppet-Gluster](#setup)
- * [What can Puppet-Gluster manage?](#what-can-puppet-gluster-manage)
- * [Simple setup](#simple-setup)
- * [Elastic setup](#elastic-setup)
- * [Advanced setup](#advanced-setup)
-4. [Usage/FAQ - Notes on management and frequently asked questions](#usage-and-frequently-asked-questions)
-5. [Reference - Class and type reference](#reference)
- * [gluster::simple](#glustersimple)
- * [gluster::elastic](#glusterelastic)
- * [gluster::server](#glusterserver)
- * [gluster::host](#glusterhost)
- * [gluster::brick](#glusterbrick)
- * [gluster::volume](#glustervolume)
- * [gluster::volume::property](#glustervolumeproperty)
-6. [Examples - Example configurations](#examples)
-7. [Limitations - Puppet versions, OS compatibility, etc...](#limitations)
-8. [Development - Background on module development](#development)
-9. [Author - Author and contact information](#author)
-
-##Overview
-
-The Puppet-Gluster module installs, configures, and manages a GlusterFS cluster.
-
-##Module Description
-
-This Puppet-Gluster module handles installation, configuration, and management
-of GlusterFS across all of the hosts in the cluster.
-
-##Setup
-
-###What can Puppet-Gluster manage?
-
-Puppet-Gluster is designed to be able to manage as much or as little of your
-GlusterFS cluster as you wish. All features are optional. If there is a feature
-that doesn't appear to be optional, and you believe it should be, please let me
-know. Having said that, it makes good sense to me to have Puppet-Gluster manage
-as much of your GlusterFS infrastructure as it can. At the moment, it cannot
-rack new servers, but I am accepting funding to explore this feature ;) At the
-moment it can manage:
-
-* GlusterFS packages (rpm)
-* GlusterFS configuration files (/var/lib/glusterd/)
-* GlusterFS host peering (gluster peer probe)
-* GlusterFS storage partitioning (fdisk)
-* GlusterFS storage formatting (mkfs)
-* GlusterFS brick creation (mkdir)
-* GlusterFS services (glusterd)
-* GlusterFS firewalling (whitelisting)
-* GlusterFS volume creation (gluster volume create)
-* GlusterFS volume state (started/stopped)
-* GlusterFS volume properties (gluster volume set)
-* And much more...
-
-###Simple setup
-
-include '::gluster::simple' is enough to get you up and running. When using the
-gluster::simple class, or with any other Puppet-Gluster configuration,
-identical definitions must be used on all hosts in the cluster. The simplest
-way to accomplish this is with a single shared puppet host definition like:
-
-```puppet
-node /^annex\d+$/ { # annex{1,2,..N}
- class { '::gluster::simple':
- }
-}
-```
-
-If you wish to pass in different parameters, you can specify them in the class
-before you provision your hosts:
-
-```puppet
-class { '::gluster::simple':
- replica => 2,
- volume => ['volume1', 'volume2', 'volumeN'],
-}
-```
-
-###Elastic setup
-
-The gluster::elastic class is not yet available. Stay tuned!
-
-###Advanced setup
-
-Some system administrators may wish to manually itemize each of the required
-components for the Puppet-Gluster deployment. This happens automatically with
-the higher level modules, but may still be a desirable feature, particularly
-for non-elastic storage pools where the configuration isn't expected to change
-very often (if ever).
-
-To put together your cluster piece by piece, you must manually include and
-define each class and type that you wish to use. If there are certain aspects
-that you wish to manage yourself, you can omit them from your configuration.
-See the [reference](#reference) section below for the specifics. Here is one
-possible example:
-
-```puppet
-class { '::gluster::server':
- shorewall => true,
-}
-
-gluster::host { 'annex1.example.com':
- # use uuidgen to make these
- uuid => '1f660ca2-2c78-4aa0-8f4d-21608218c69c',
-}
-
-# note that this is using a folder on your existing file system...
-# this can be useful for prototyping gluster using virtual machines
-# if this isn't a separate partition, remember that your root fs will
-# run out of space when your gluster volume does!
-gluster::brick { 'annex1.example.com:/data/gluster-storage1':
- areyousure => true,
-}
-
-gluster::host { 'annex2.example.com':
- # NOTE: specifying a host uuid is now optional!
- # if you don't choose one, one will be assigned
- #uuid => '2fbe6e2f-f6bc-4c2d-a301-62fa90c459f8',
-}
-
-gluster::brick { 'annex2.example.com:/data/gluster-storage2':
- areyousure => true,
-}
-
-$brick_list = [
- 'annex1.example.com:/data/gluster-storage1',
- 'annex2.example.com:/data/gluster-storage2',
-]
-
-gluster::volume { 'examplevol':
- replica => 2,
- bricks => $brick_list,
- start => undef, # i'll start this myself
-}
-
-# namevar must be: <VOLNAME>#<KEY>
-gluster::volume::property { 'examplevol#auth.reject':
- value => ['192.0.2.13', '198.51.100.42', '203.0.113.69'],
-}
-```
-
-##Usage and frequently asked questions
-
-All management should be done by manipulating the arguments on the appropriate
-Puppet-Gluster classes and types. Since certain manipulations are either not
-yet possible with Puppet-Gluster, or are not supported by GlusterFS, attempting
-to manipulate the Puppet configuration in an unsupported way will result in
-undefined behaviour, and possible even data loss, however this is unlikely.
-
-###How do I change the replica count?
-
-You must set this before volume creation. This is a limitation of GlusterFS.
-There are certain situations where you can change the replica count by adding
-a multiple of the existing brick count to get this desired effect. These cases
-are not yet supported by Puppet-Gluster. If you want to use Puppet-Gluster
-before and / or after this transition, you can do so, but you'll have to do the
-changes manually.
-
-###Do I need to use a virtual IP?
-
-Using a virtual IP (VIP) is strongly recommended as a distributed lock manager
-(DLM) and also to provide a highly-available (HA) IP address for your clients
-to connect to. For a more detailed explanation of the reasoning please see:
-
-[https://ttboj.wordpress.com/2012/08/23/how-to-avoid-cluster-race-conditions-or-how-to-implement-a-distributed-lock-manager-in-puppet/](https://ttboj.wordpress.com/2012/08/23/how-to-avoid-cluster-race-conditions-or-how-to-implement-a-distributed-lock-manager-in-puppet/)
-
-Remember that even if you're using a hosted solution (such as AWS) that doesn't
-provide an additional IP address, or you want to avoid using an additional IP,
-and you're okay not having full HA client mounting, you can use an unused
-private RFC1918 IP address as the DLM VIP. Remember that a layer 3 IP can
-co-exist on the same layer 2 network with the layer 3 network that is used by
-your cluster.
-
-###Is it possible to have Puppet-Gluster complete in a single run?
-
-No. This is a limitation of Puppet, and is related to how GlusterFS operates.
-For example, it is not reliably possible to predict which ports a particular
-GlusterFS volume will run on until after the volume is started. As a result,
-this module will initially whitelist connections from GlusterFS host IP
-addresses, and then further restrict this to only allow individual ports once
-this information is known. This is possible in conjunction with the
-[puppet-shorewall](https://github.com/purpleidea/puppet-shorewall) module.
-You should notice that each run should complete without error. If you do see an
-error, it means that either something is wrong with your system and / or
-configuration, or because there is a bug in Puppet-Gluster.
-
-###Can you integrate this with vagrant?
-
-Not until vagrant properly supports libvirt/KVM. I have no desire to use
-VirtualBox for fun.
-
-###Awesome work, but it's missing support for a feature and/or platform!
-
-Since this is an Open Source / Free Software project that I also give away for
-free (as in beer, free as in gratis, free as in libre), I'm unable to provide
-unlimited support. Please consider donating funds, hardware, virtual machines,
-and other resources. For specific needs, you could perhaps sponsor a feature!
-
-###You didn't answer my question, or I have a question!
-
-Contact me through my [technical blog](https://ttboj.wordpress.com/contact/)
-and I'll do my best to help. If you have a good question, please remind me to
-add my answer to this documentation!
-
-##Reference
-Please note that there are a number of undocumented options. For more
-information on these options, please view the source at:
-[https://github.com/purpleidea/puppet-gluster/](https://github.com/purpleidea/puppet-gluster/).
-If you feel that a well used option needs documenting here, please contact me.
-
-###Overview of classes and types
-
-* [gluster::simple](#glustersimple): Simple Puppet-Gluster deployment.
-* [gluster::elastic](#glusterelastic): Under construction.
-* [gluster::server](#glusterserver): Base class for server hosts.
-* [gluster::host](#glusterhost): Host type for each participating host.
-* [gluster::brick](#glusterbrick): Brick type for each defined brick, per host.
-* [gluster::volume](#glustervolume): Volume type for each defined volume.
-* [gluster::volume::property](#glustervolumeproperty): Manages properties for each volume.
-
-###gluster::simple
-This is gluster::simple. It should probably take care of 80% of all use cases.
-It is particularly useful for deploying quick test clusters. It uses a
-finite-state machine (FSM) to decide when the cluster has settled and volume
-creation can begin. For more information on the FSM in Puppet-Gluster see:
-[https://ttboj.wordpress.com/2013/09/28/finite-state-machines-in-puppet/](https://ttboj.wordpress.com/2013/09/28/finite-state-machines-in-puppet/)
-
-####`replica`
-The replica count. Can't be changed automatically after initial deployment.
-
-####`volume`
-The volume name or list of volume names to create.
-
-####`path`
-The valid brick path for each host. Defaults to local file system. If you need
-a different path per host, then Gluster::Simple will not meet your needs.
-
-####`vip`
-The virtual IP address to be used for the cluster distributed lock manager.
-
-####`shorewall`
-Boolean to specify whether puppet-shorewall integration should be used or not.
-
-###gluster::elastic
-Under construction.
-
-###gluster::server
-Main server class for the cluster. Must be included when building the GlusterFS
-cluster manually. Wrapper classes such as [gluster::simple](#glustersimple)
-include this automatically.
-
-####`vip`
-The virtual IP address to be used for the cluster distributed lock manager.
-
-####`shorewall`
-Boolean to specify whether puppet-shorewall integration should be used or not.
-
-###gluster::host
-Main host type for the cluster. Each host participating in the GlusterFS
-cluster must define this type on itself, and on every other host. As a result,
-this is not a singleton like the [gluster::server](#glusterserver) class.
-
-####`ip`
-Specify which IP address this host is using. This defaults to the
-_$::ipaddress_ variable. Be sure to set this manually if you're declaring this
-yourself on each host without using exported resources. If each host thinks the
-other hosts should have the same IP address as itself, then Puppet-Gluster and
-GlusterFS won't work correctly.
-
-####`uuid`
-Universally unique identifier (UUID) for the host. If empty, Puppet-Gluster
-will generate this automatically for the host. You can generate your own
-manually with _uuidgen_, and set them yourself. I found this particularly
-useful for testing, because I would pick easy to recognize UUID's like:
-_aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa_,
-_bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb_, and so on. If you set a UUID manually,
-and Puppet-Gluster has a chance to run, then it will remember your choice, and
-store it locally to be used again if you no longer specify the UUID. This is
-particularly useful for upgrading an existing un-managed GlusterFS installation
-to a Puppet-Gluster managed one, without changing any UUID's.
-
-###gluster::brick
-Main brick type for the cluster. Each brick is an individual storage segment to
-be used on a host. Each host must have at least one brick to participate in the
-cluster, but usually a host will have multiple bricks. A brick can be as simple
-as a file system folder, or it can be a separate file system. Please read the
-official GlusterFS documentation, if you aren't entirely comfortable with the
-concept of a brick.
-
-For most test clusters, and for experimentation, it is easiest to use a
-directory on the root file system. You can even use a _/tmp_ sub folder if you
-don't care about the persistence of your data. For more serious clusters, you
-might want to create separate file systems for your data. On self-hosted iron,
-it is not uncommon to create multiple RAID-6 drive pools, and to then create a
-separate file system per virtual drive. Each file system can then be used as a
-single brick.
-
-So that each volume in GlusterFS has the maximum ability to grow, without
-having to partition storage separately, the bricks in Puppet-Gluster are
-actually folders (on whatever backing store you wish) which then contain
-sub folders-- one for each volume. As a result, all the volumes on a given
-GlusterFS cluster can share the total available storage space. If you wish to
-limit the storage used by each volume, you can setup quotas. Alternatively, you
-can buy more hardware, and elastically grow your GlusterFS volumes, since the
-price per GB will be significantly less than any proprietary storage system.
-The one downside to this brick sharing, is that if you have chosen the brick
-per host count specifically to match your performance requirements, and
-each GlusterFS volume on the same cluster has drastically different brick per
-host performance requirements, then this won't suit your needs. I doubt that
-anyone actually has such requirements, but if you do insist on needing this
-compartmentalization, then you can probably use the Puppet-Gluster grouping
-feature to accomplish this goal. Please let me know about your use-case, and
-be warned that the grouping feature hasn't been extensively tested.
-
-To prove to you that I care about automation, this type offers the ability to
-automatically partition and format your file systems. This means you can plug
-in new iron, boot, provision and configure the entire system automatically.
-Regrettably, I don't have a lot of test hardware to routinely use this feature.
-If you'd like to donate some, I'd be happy to test this thoroughly. Having said
-that, I have used this feature, I consider it to be extremely safe, and it has
-never caused me to lose data. If you're uncertain, feel free to look at the
-code, or avoid using this feature entirely. If you think there's a way to make
-it even safer, then feel free to let me know.
-
-####`dev`
-Block device, such as _/dev/sdc_ or _/dev/disk/by-id/scsi-0123456789abcdef_. By
-default, Puppet-Gluster will assume you're using a folder to store the brick
-data, if you don't specify this parameter.
-
-####`fsuuid`
-File system UUID. This ensures we can distinctly identify a file system. You
-can set this to be used with automatic file system creation, or you can specify
-the file system UUID that you'd like to use.
-
-####`labeltype`
-Only _gpt_ is supported. Other options include _msdos_, but this has never been
-used because of it's size limitations.
-
-####`fstype`
-This should be _xfs_ or _ext4_. Using _xfs_ is recommended, but _ext4_ is also
-quite common. This only affects a file system that is getting created by this
-module. If you provision a new machine, with a root file system of _ext4_, and
-the brick you create is a root file system path, then this option does nothing.
-
-####`xfs_inode64`
-Set _inode64_ mount option when using the _xfs_ fstype. Choose _true_ to set.
-
-####`xfs_nobarrier`
-Set _nobarrier_ mount option when using the _xfs_ fstype. Choose _true_ to set.
-
-####`ro`
-Whether the file system should be mounted read only. For emergencies only.
-
-####`force`
-If _true_, this will overwrite any xfs file system it sees. This is useful for
-rebuilding GlusterFS repeatedly and wiping data. There are other safeties in
-place to stop this. In general, you probably don't ever want to touch this.
-
-####`areyousure`
-Do you want to allow Puppet-Gluster to do dangerous things? You have to set
-this to _true_ to allow Puppet-Gluster to _fdisk_ and _mkfs_ your file system.
-
-###gluster::volume
-Main volume type for the cluster. This is where a lot of the magic happens.
-Remember that changing some of these parameters after the volume has been
-created won't work, and you'll experience undefined behaviour. There could be
-FSM based error checking to verify that no changes occur, but it has been left
-out so that this code base can eventually support such changes, and so that the
-user can manually change a parameter if they know that it is safe to do so.
-
-####`bricks`
-List of bricks to use for this volume. If this is left at the default value of
-_true_, then this list is built automatically. The algorithm that determines
-this order does not support all possible situations, and most likely can't
-handle certain corner cases. It is possible to examine the FSM to view the
-selected brick order before it has a chance to create the volume. The volume
-creation script won't run until there is a stable brick list as seen by the FSM
-running on the host that has the DLM. If you specify this list of bricks
-manually, you must choose the order to match your desired volume layout. If you
-aren't sure about how to order the bricks, you should review the GlusterFS
-documentation first.
-
-####`transport`
-Only _tcp_ is supported. Possible values can include _rdma_, but this won't get
-any testing if I don't have access to infiniband hardware. Donations welcome.
-
-####`replica`
-Replica count. Usually you'll want to set this to _2_. Some users choose _3_.
-Other values are seldom seen. A value of _1_ can be used for simply testing a
-distributed setup, when you don't care about your data or high availability. A
-value greater than _4_ is probably wasteful and unnecessary. It might even
-cause performance issues if a synchronous write is waiting on a slow fourth
-server.
-
-####`stripe`
-Stripe count. Thoroughly unsupported and untested option. Not recommended for
-use by GlusterFS.
-
-####`ping`
-Do we want to include ping checks with _fping_?
-
-####`settle`
-Do we want to run settle checks?
-
-####`start`
-Requested state for the volume. Valid values include: _true_ (start), _false_
-(stop), or _undef_ (un-managed start/stop state).
-
-###gluster::volume::property
-Main volume property type for the cluster. This allows you to manage GlusterFS
-volume specific properties. There are a wide range of properties that volumes
-support. For the full list of properties, you should consult the GlusterFS
-documentation, or run the _gluster volume set help_ command. To set a property
-you must use the special name pattern of: _volume_#_key_. The value argument is
-used to set the associated value. It is smart enough to accept values in the
-most logical format for that specific property. Some properties aren't yet
-supported, so please report any problems you have with this functionality.
-Because this feature is an awesome way to _document as code_ the volume
-specific optimizations that you've made, make sure you use this feature even if
-you don't use all the others.
-
-####`value`
-The value to be used for this volume property.
-
-##Examples
-For example configurations, please consult the [examples/](https://github.com/purpleidea/puppet-gluster/tree/master/examples) directory in the git
-source repository. It is available from:
-
-[https://github.com/purpleidea/puppet-gluster/tree/master/examples](https://github.com/purpleidea/puppet-gluster/tree/master/examples)
-
-It is also available from:
-
-[https://forge.gluster.org/puppet-gluster/puppet-gluster/trees/master/examples](https://forge.gluster.org/puppet-gluster/puppet-gluster/trees/master/examples/)
-
-##Limitations
-
-This module has been tested against open source Puppet 3.2.4 and higher.
-
-The module has been tested on:
-
-* CentOS 6.4
-
-It will probably work without incident or without major modification on:
-
-* CentOS 5.x/6.x
-* RHEL 5.x/6.x
-
-It will most likely work with other Puppet versions and on other platforms, but
-testing under other conditions has been light due to lack of resources. It will
-most likely not work on Debian/Ubuntu systems without modification. I would
-really love to add support for these operating systems, but I do not have any
-test resources to do so. Please sponsor this if you'd like to see it happen.
-
-##Development
-
-This is my personal project that I work on in my free time.
-Donations of funding, hardware, virtual machines, and other resources are
-appreciated. Please contact me if you'd like to sponsor a feature, invite me to
-talk/teach or for consulting.
-
-You can follow along [on my technical blog](https://ttboj.wordpress.com/).
-
-##Author
-
-Copyright (C) 2010-2013+ James Shubin
-
-* [github](https://github.com/purpleidea/)
-* [@purpleidea](https://twitter.com/#!/purpleidea)
-* [https://ttboj.wordpress.com/](https://ttboj.wordpress.com/)
-
diff --git a/doc/admin-guide/en-US/markdown/admin_rdma_transport.md b/doc/admin-guide/en-US/markdown/admin_rdma_transport.md
deleted file mode 100644
index 872adb31a08..00000000000
--- a/doc/admin-guide/en-US/markdown/admin_rdma_transport.md
+++ /dev/null
@@ -1,70 +0,0 @@
-# Introduction
-
-GlusterFS supports using RDMA protocol for communication between glusterfs clients and glusterfs bricks.
-GlusterFS clients include FUSE client, libgfapi clients(Samba and NFS-Ganesha included), gNFS server and other glusterfs processes that communicate with bricks like self-heal daemon, quotad, rebalance process etc.
-
-NOTE: As of now only FUSE client and gNFS server would support RDMA transport.
-
-
-NOTE:
-NFS client to gNFS Server/NFS Ganesha Server communication would still happen over tcp.
-CIFS Clients/Windows Clients to Samba Server communication would still happen over tcp.
-
-# Setup
-Please refer to these external documentation to setup RDMA on your machines
-http://pkg-ofed.alioth.debian.org/howto/infiniband-howto.html
-http://people.redhat.com/dledford/infiniband_get_started.html
-
-## Creating Trusted Storage Pool
-All the servers in the Trusted Storage Pool must have RDMA devices if either RDMA or TCP,RDMA volumes are created in the storage pool.
-The peer probe must be performed using IP/hostname assigned to the RDMA device.
-
-## Ports and Firewall
-Process glusterd will listen on both tcp and rdma if rdma device is found. Port used for rdma is 24008. Similarly, brick processes will also listen on two ports for a volume created with transport "tcp,rdma".
-
-Make sure you update the firewall to accept packets on these ports.
-
-# Gluster Volume Create
-
-A volume can support one or more transport types for communication between clients and brick processes. There are three types of supported transport, which are, tcp, rdma, and tcp,rdma.
-
-Example: To create a distributed volume with four storage servers over InfiniBand:
-
-`# gluster volume create test-volume transport rdma server1:/exp1 server2:/exp2 server3:/exp3 server4:/exp4`
-Creation of test-volume has been successful
-Please start the volume to access data.
-
-# Changing Transport of Volume
-To change the supported transport types of a existing volume, follow the procedure:
-NOTE: This is possible only if the volume was created with IP/hostname assigned to RDMA device.
-
- 1. Unmount the volume on all the clients using the following command:
-`# umount mount-point`
- 2. Stop the volumes using the following command:
-`# gluster volume stop volname`
- 3. Change the transport type.
-For example, to enable both tcp and rdma execute the followimg command:
-`# gluster volume set volname config.transport tcp,rdma`
- 4. Mount the volume on all the clients.
-For example, to mount using rdma transport, use the following command:
-`# mount -t glusterfs -o transport=rdma server1:/test-volume /mnt/glusterfs`
-
-NOTE:
-config.transport option does not have a entry in help of gluster cli.
-`#gluster vol set help | grep config.transport`
-However, the key is a valid one.
-
-# Mounting a Volume using RDMA
-
-You can use the mount option "transport" to specify the transport type that FUSE client must use to communicate with bricks. If the volume was created with only one transport type, then that becomes the default when no value is specified. In case of tcp,rdma volume, tcp is the default.
-
-For example, to mount using rdma transport, use the following command:
-`# mount -t glusterfs -o transport=rdma server1:/test-volume /mnt/glusterfs`
-
-# Transport used by auxillary processes
-All the auxillary processes like self-heal daemon, rebalance process etc use the default transport.In case you have a tcp,rdma volume it will use tcp.
-In case of rdma volume, rdma will be used.
-Configuration options to select transport used by these processes when volume is tcp,rdma are not yet available and will be coming in later releases.
-
-
-
diff --git a/doc/admin-guide/en-US/markdown/admin_setting_volumes.md b/doc/admin-guide/en-US/markdown/admin_setting_volumes.md
deleted file mode 100644
index d66a6894152..00000000000
--- a/doc/admin-guide/en-US/markdown/admin_setting_volumes.md
+++ /dev/null
@@ -1,674 +0,0 @@
-#Setting up GlusterFS Server Volumes
-
-A volume is a logical collection of bricks where each brick is an export
-directory on a server in the trusted storage pool. Most of the gluster
-management operations are performed on the volume.
-
-To create a new volume in your storage environment, specify the bricks
-that comprise the volume. After you have created a new volume, you must
-start it before attempting to mount it.
-
-###Formatting and Mounting Bricks
-
-####Creating a Thinly Provisioned Logical Volume
-
-To create a thinly provisioned logical volume, proceed with the following steps:
-
- 1. Create a physical volume(PV) by using the pvcreate command.
- For example:
-
- `pvcreate --dataalignment 1280K /dev/sdb`
-
- Here, /dev/sdb is a storage device.
- Use the correct dataalignment option based on your device.
-
- >**Note**
- >
- >The device name and the alignment value will vary based on the device you are using.
-
- 2. Create a Volume Group (VG) from the PV using the vgcreate command:
-
-For example:
-
- `vgcreate --physicalextentsize 128K gfs_vg /dev/sdb`
-
- It is recommended that only one VG must be created from one storage device.
-
- 3. Create a thin-pool using the following commands:
-
- 1. Create an LV to serve as the metadata device using the following command:
-
- `lvcreate -L metadev_sz --name metadata_device_name VOLGROUP`
-
- For example:
-
- `lvcreate -L 16776960K --name gfs_pool_meta gfs_vg`
-
- 2. Create an LV to serve as the data device using the following command:
-
- `lvcreate -L datadev_sz --name thin_pool VOLGROUP`
-
- For example:
-
- `lvcreate -L 536870400K --name gfs_pool gfs_vg`
-
- 3. Create a thin pool from the data LV and the metadata LV using the following command:
-
- `lvconvert --chunksize STRIPE_WIDTH --thinpool VOLGROUP/thin_pool --poolmetadata VOLGROUP/metadata_device_name`
-
- For example:
-
- `lvconvert --chunksize 1280K --thinpool gfs_vg/gfs_pool --poolmetadata gfs_vg/gfs_pool_meta`
-
- >**Note**
- >
- >By default, the newly provisioned chunks in a thin pool are zeroed to prevent data leaking between different block devices.
-
- `lvchange --zero n VOLGROUP/thin_pool`
-
- For example:
-
- `lvchange --zero n gfs_vg/gfs_pool`
-
- 4. Create a thinly provisioned volume from the previously created pool using the lvcreate command:
-
- For example:
-
- `lvcreate -V 1G -T gfs_vg/gfs_pool -n gfs_lv`
-
- It is recommended that only one LV should be created in a thin pool.
-
-Format bricks using the supported XFS configuration, mount the bricks, and verify the bricks are mounted correctly.
-
- 1. Run # mkfs.xfs -f -i size=512 -n size=8192 -d su=128K,sw=10 DEVICE to format the bricks to the supported XFS file system format. Here, DEVICE is the thin LV. The inode size is set to 512 bytes to accommodate for the extended attributes used by GlusterFS.
-
- Run # mkdir /mountpoint to create a directory to link the brick to.
-
- Add an entry in /etc/fstab:
-
- `/dev/gfs_vg/gfs_lv /mountpoint xfs rw,inode64,noatime,nouuid 1 2`
-
- Run # mount /mountpoint to mount the brick.
-
- Run the df -h command to verify the brick is successfully mounted:
-
- `# df -h
- /dev/gfs_vg/gfs_lv 16G 1.2G 15G 7% /exp1`
-
-- Volumes of the following types can be created in your storage
- environment:
-
- - **Distributed** - Distributed volumes distributes files throughout
- the bricks in the volume. You can use distributed volumes where
- the requirement is to scale storage and the redundancy is either
- not important or is provided by other hardware/software layers.
-
- - **Replicated** – Replicated volumes replicates files across bricks
- in the volume. You can use replicated volumes in environments
- where high-availability and high-reliability are critical.
-
- - **Striped** – Striped volumes stripes data across bricks in the
- volume. For best results, you should use striped volumes only in
- high concurrency environments accessing very large files.
-
- - **Distributed Striped** - Distributed striped volumes stripe data
- across two or more nodes in the cluster. You should use
- distributed striped volumes where the requirement is to scale
- storage and in high concurrency environments accessing very
- large files is critical.
-
- - **Distributed Replicated** - Distributed replicated volumes
- distributes files across replicated bricks in the volume. You
- can use distributed replicated volumes in environments where the
- requirement is to scale storage and high-reliability is
- critical. Distributed replicated volumes also offer improved
- read performance in most environments.
-
- - **Distributed Striped Replicated** – Distributed striped replicated
- volumes distributes striped data across replicated bricks in the
- cluster. For best results, you should use distributed striped
- replicated volumes in highly concurrent environments where
- parallel access of very large files and performance is critical.
- In this release, configuration of this volume type is supported
- only for Map Reduce workloads.
-
- - **Striped Replicated** – Striped replicated volumes stripes data
- across replicated bricks in the cluster. For best results, you
- should use striped replicated volumes in highly concurrent
- environments where there is parallel access of very large files
- and performance is critical. In this release, configuration of
- this volume type is supported only for Map Reduce workloads.
-
- - **Dispersed** - Dispersed volumes are based on erasure codes,
- providing space-efficient protection against disk or server failures.
- It stores an encoded fragment of the original file to each brick in
- a way that only a subset of the fragments is needed to recover the
- original file. The number of bricks that can be missing without
- losing access to data is configured by the administrator on volume
- creation time.
-
- - **Distributed Dispersed** - Distributed dispersed volumes distribute
- files across dispersed subvolumes. This has the same advantages of
- distribute replicate volumes, but using disperse to store the data
- into the bricks.
-
-**To create a new volume**
-
-- Create a new volume :
-
- `# gluster volume create [stripe | replica | disperse] [transport tcp | rdma | tcp,rdma] `
-
- For example, to create a volume called test-volume consisting of
- server3:/exp3 and server4:/exp4:
-
- # gluster volume create test-volume server3:/exp3 server4:/exp4
- Creation of test-volume has been successful
- Please start the volume to access data.
-
-##Creating Distributed Volumes
-
-In a distributed volumes files are spread randomly across the bricks in
-the volume. Use distributed volumes where you need to scale storage and
-redundancy is either not important or is provided by other
-hardware/software layers.
-
-> **Note**:
-> Disk/server failure in distributed volumes can result in a serious
-> loss of data because directory contents are spread randomly across the
-> bricks in the volume.
-
-![][]
-
-**To create a distributed volume**
-
-1. Create a trusted storage pool.
-
-2. Create the distributed volume:
-
- `# gluster volume create [transport tcp | rdma | tcp,rdma] `
-
- For example, to create a distributed volume with four storage
- servers using tcp:
-
- # gluster volume create test-volume server1:/exp1 server2:/exp2 server3:/exp3 server4:/exp4
- Creation of test-volume has been successful
- Please start the volume to access data.
-
- (Optional) You can display the volume information:
-
- # gluster volume info
- Volume Name: test-volume
- Type: Distribute
- Status: Created
- Number of Bricks: 4
- Transport-type: tcp
- Bricks:
- Brick1: server1:/exp1
- Brick2: server2:/exp2
- Brick3: server3:/exp3
- Brick4: server4:/exp4
-
- For example, to create a distributed volume with four storage
- servers over InfiniBand:
-
- # gluster volume create test-volume transport rdma server1:/exp1 server2:/exp2 server3:/exp3 server4:/exp4
- Creation of test-volume has been successful
- Please start the volume to access data.
-
- If the transport type is not specified, *tcp* is used as the
- default. You can also set additional options if required, such as
- auth.allow or auth.reject.
-
- > **Note**:
- > Make sure you start your volumes before you try to mount them or
- > else client operations after the mount will hang.
-
-##Creating Replicated Volumes
-
-Replicated volumes create copies of files across multiple bricks in the
-volume. You can use replicated volumes in environments where
-high-availability and high-reliability are critical.
-
-> **Note**:
-> The number of bricks should be equal to of the replica count for a
-> replicated volume. To protect against server and disk failures, it is
-> recommended that the bricks of the volume are from different servers.
-
-![][1]
-
-**To create a replicated volume**
-
-1. Create a trusted storage pool.
-
-2. Create the replicated volume:
-
- `# gluster volume create [replica ] [transport tcp | rdma | tcp,rdma] `
-
- For example, to create a replicated volume with two storage servers:
-
- # gluster volume create test-volume replica 2 transport tcp server1:/exp1 server2:/exp2
- Creation of test-volume has been successful
- Please start the volume to access data.
-
- If the transport type is not specified, *tcp* is used as the
- default. You can also set additional options if required, such as
- auth.allow or auth.reject.
-
- > **Note**:
-
- > - Make sure you start your volumes before you try to mount them or
- > else client operations after the mount will hang.
-
- > - GlusterFS will fail to create a replicate volume if more than one brick of a replica set is present on the same peer. For eg. four node replicated volume with a more that one brick of a replica set is present on the same peer.
- > ```
- # gluster volume create <volname> replica 4 server1:/brick1 server1:/brick2 server2:/brick3 server4:/brick4
- volume create: <volname>: failed: Multiple bricks of a replicate volume are present on the same server. This setup is not optimal. Use 'force' at the end of the command if you want to override this behavior.```
-
- > Use the `force` option at the end of command if you want to create the volume in this case.
-
-###Arbiter configuration for replica volumes
-Arbiter volumes are replica 3 volumes where the 3rd brick acts as the arbiter brick. This configuration has mechanisms that prevent occurrence of split-brains.
-It can be created with the following command:
-`# gluster volume create <VOLNAME> replica 3 arbiter 1 host1:brick1 host2:brick2 host3:brick3`
-More information about this configuration can be found at `doc/features/afr-arbiter-volumes.md`
-Note that the arbiter configuration for replica 3 can be used to create distributed-replicate volumes as well.
-
-##Creating Striped Volumes
-
-Striped volumes stripes data across bricks in the volume. For best
-results, you should use striped volumes only in high concurrency
-environments accessing very large files.
-
-> **Note**:
-> The number of bricks should be a equal to the stripe count for a
-> striped volume.
-
-![][2]
-
-**To create a striped volume**
-
-1. Create a trusted storage pool.
-
-2. Create the striped volume:
-
- `# gluster volume create [stripe ] [transport tcp | rdma | tcp,rdma] `
-
- For example, to create a striped volume across two storage servers:
-
- # gluster volume create test-volume stripe 2 transport tcp server1:/exp1 server2:/exp2
- Creation of test-volume has been successful
- Please start the volume to access data.
-
- If the transport type is not specified, *tcp* is used as the
- default. You can also set additional options if required, such as
- auth.allow or auth.reject.
-
- > **Note**:
- > Make sure you start your volumes before you try to mount them or
- > else client operations after the mount will hang.
-
-##Creating Distributed Striped Volumes
-
-Distributed striped volumes stripes files across two or more nodes in
-the cluster. For best results, you should use distributed striped
-volumes where the requirement is to scale storage and in high
-concurrency environments accessing very large files is critical.
-
-> **Note**:
-> The number of bricks should be a multiple of the stripe count for a
-> distributed striped volume.
-
-![][3]
-
-**To create a distributed striped volume**
-
-1. Create a trusted storage pool.
-
-2. Create the distributed striped volume:
-
- `# gluster volume create [stripe ] [transport tcp | rdma | tcp,rdma] `
-
- For example, to create a distributed striped volume across eight
- storage servers:
-
- # gluster volume create test-volume stripe 4 transport tcp server1:/exp1 server2:/exp2 server3:/exp3 server4:/exp4 server5:/exp5 server6:/exp6 server7:/exp7 server8:/exp8
- Creation of test-volume has been successful
- Please start the volume to access data.
-
- If the transport type is not specified, *tcp* is used as the
- default. You can also set additional options if required, such as
- auth.allow or auth.reject.
-
- > **Note**:
- > Make sure you start your volumes before you try to mount them or
- > else client operations after the mount will hang.
-
-##Creating Distributed Replicated Volumes
-
-Distributes files across replicated bricks in the volume. You can use
-distributed replicated volumes in environments where the requirement is
-to scale storage and high-reliability is critical. Distributed
-replicated volumes also offer improved read performance in most
-environments.
-
-> **Note**:
-> The number of bricks should be a multiple of the replica count for a
-> distributed replicated volume. Also, the order in which bricks are
-> specified has a great effect on data protection. Each replica\_count
-> consecutive bricks in the list you give will form a replica set, with
-> all replica sets combined into a volume-wide distribute set. To make
-> sure that replica-set members are not placed on the same node, list
-> the first brick on every server, then the second brick on every server
-> in the same order, and so on.
-
-![][4]
-
-**To create a distributed replicated volume**
-
-1. Create a trusted storage pool.
-
-2. Create the distributed replicated volume:
-
- `# gluster volume create [replica ] [transport tcp | rdma | tcp,rdma] `
-
- For example, four node distributed (replicated) volume with a
- two-way mirror:
-
- # gluster volume create test-volume replica 2 transport tcp server1:/exp1 server2:/exp2 server3:/exp3 server4:/exp4
- Creation of test-volume has been successful
- Please start the volume to access data.
-
- For example, to create a six node distributed (replicated) volume
- with a two-way mirror:
-
- # gluster volume create test-volume replica 2 transport tcp server1:/exp1 server2:/exp2 server3:/exp3 server4:/exp4 server5:/exp5 server6:/exp6
- Creation of test-volume has been successful
- Please start the volume to access data.
-
- If the transport type is not specified, *tcp* is used as the
- default. You can also set additional options if required, such as
- auth.allow or auth.reject.
-
- > **Note**:
- > - Make sure you start your volumes before you try to mount them or
- > else client operations after the mount will hang.
-
- > - GlusterFS will fail to create a distribute replicate volume if more than one brick of a replica set is present on the same peer. For eg. four node distribute (replicated) volume with a more than one brick of a replica set is present on the same peer.
- > ```
- # gluster volume create <volname> replica 2 server1:/brick1 server1:/brick2 server2:/brick3 server4:/brick4
- volume create: <volname>: failed: Multiple bricks of a replicate volume are present on the same server. This setup is not optimal. Use 'force' at the end of the command if you want to override this behavior.```
-
- > Use the `force` option at the end of command if you want to create the volume in this case.
-
-
-##Creating Distributed Striped Replicated Volumes
-
-Distributed striped replicated volumes distributes striped data across
-replicated bricks in the cluster. For best results, you should use
-distributed striped replicated volumes in highly concurrent environments
-where parallel access of very large files and performance is critical.
-In this release, configuration of this volume type is supported only for
-Map Reduce workloads.
-
-> **Note**:
-> The number of bricks should be a multiples of number of stripe count
-> and replica count for a distributed striped replicated volume.
-
-**To create a distributed striped replicated volume**
-
-1. Create a trusted storage pool.
-
-2. Create a distributed striped replicated volume using the following
- command:
-
- `# gluster volume create [stripe ] [replica ] [transport tcp | rdma | tcp,rdma] `
-
- For example, to create a distributed replicated striped volume
- across eight storage servers:
-
- # gluster volume create test-volume stripe 2 replica 2 transport tcp server1:/exp1 server2:/exp2 server3:/exp3 server4:/exp4 server5:/exp5 server6:/exp6 server7:/exp7 server8:/exp8
- Creation of test-volume has been successful
- Please start the volume to access data.
-
- If the transport type is not specified, *tcp* is used as the
- default. You can also set additional options if required, such as
- auth.allow or auth.reject.
-
- > **Note**:
- > - Make sure you start your volumes before you try to mount them or
- > else client operations after the mount will hang.
-
- > - GlusterFS will fail to create a distribute replicate volume if more than one brick of a replica set is present on the same peer. For eg. four node distribute (replicated) volume with a more than one brick of a replica set is present on the same peer.
- > ```
- # gluster volume create <volname> stripe 2 replica 2 server1:/brick1 server1:/brick2 server2:/brick3 server4:/brick4
- volume create: <volname>: failed: Multiple bricks of a replicate volume are present on the same server. This setup is not optimal. Use 'force' at the end of the command if you want to override this behavior.```
-
- > Use the `force` option at the end of command if you want to create the volume in this case.
-
-##Creating Striped Replicated Volumes
-
-Striped replicated volumes stripes data across replicated bricks in the
-cluster. For best results, you should use striped replicated volumes in
-highly concurrent environments where there is parallel access of very
-large files and performance is critical. In this release, configuration
-of this volume type is supported only for Map Reduce workloads.
-
-> **Note**:
-> The number of bricks should be a multiple of the replicate count and
-> stripe count for a striped replicated volume.
-
-![][5]
-
-**To create a striped replicated volume**
-
-1. Create a trusted storage pool consisting of the storage servers that
- will comprise the volume.
-
-2. Create a striped replicated volume :
-
- `# gluster volume create [stripe ] [replica ] [transport tcp | rdma | tcp,rdma] `
-
- For example, to create a striped replicated volume across four
- storage servers:
-
- # gluster volume create test-volume stripe 2 replica 2 transport tcp server1:/exp1 server2:/exp2 server3:/exp3 server4:/exp4
- Creation of test-volume has been successful
- Please start the volume to access data.
-
- To create a striped replicated volume across six storage servers:
-
- # gluster volume create test-volume stripe 3 replica 2 transport tcp server1:/exp1 server2:/exp2 server3:/exp3 server4:/exp4 server5:/exp5 server6:/exp6
- Creation of test-volume has been successful
- Please start the volume to access data.
-
- If the transport type is not specified, *tcp* is used as the
- default. You can also set additional options if required, such as
- auth.allow or auth.reject.
-
- > **Note**:
- > - Make sure you start your volumes before you try to mount them or
- > else client operations after the mount will hang.
-
- > - GlusterFS will fail to create a distribute replicate volume if more than one brick of a replica set is present on the same peer. For eg. four node distribute (replicated) volume with a more than one brick of replica set is present on the same peer.
- > ```
- # gluster volume create <volname> stripe 2 replica 2 server1:/brick1 server1:/brick2 server2:/brick3 server4:/brick4
- volume create: <volname>: failed: Multiple bricks of a replicate volume are present on the same server. This setup is not optimal. Use `force` at the end of the command if you want to override this behavior.```
-
- > Use the `force` option at the end of command if you want to create the volume in this case.
-
-##Creating Dispersed Volumes
-
-Dispersed volumes are based on erasure codes. It stripes the encoded data of
-files, with some redundancy addedd, across multiple bricks in the volume. You
-can use dispersed volumes to have a configurable level of reliability with a
-minimum space waste.
-
-**Redundancy**
-
-Each dispersed volume has a redundancy value defined when the volume is
-created. This value determines how many bricks can be lost without
-interrupting the operation of the volume. It also determines the amount of
-usable space of the volume using this formula:
-
- <Usable size> = <Brick size> * (#Bricks - Redundancy)
-
-All bricks of a disperse set should have the same capacity otherwise, when
-the smaller brick becomes full, no additional data will be allowed in the
-disperse set.
-
-It's important to note that a configuration with 3 bricks and redundancy 1
-will have less usable space (66.7% of the total physical space) than a
-configuration with 10 bricks and redundancy 1 (90%). However the first one
-will be safer than the second one (roughly the probability of failure of
-the second configuration if more than 4.5 times bigger than the first one).
-
-For example, a dispersed volume composed by 6 bricks of 4TB and a redundancy
-of 2 will be completely operational even with two bricks inaccessible. However
-a third inaccessible brick will bring the volume down because it won't be
-possible to read or write to it. The usable space of the volume will be equal
-to 16TB.
-
-The implementation of erasure codes in GlusterFS limits the redundancy to a
-value smaller than #Bricks / 2 (or equivalently, redundancy * 2 < #Bricks).
-Having a redundancy equal to half of the number of bricks would be almost
-equivalent to a replica-2 volume, and probably a replicated volume will
-perform better in this case.
-
-**Optimal volumes**
-
-One of the worst things erasure codes have in terms of performance is the
-RMW (Read-Modify-Write) cycle. Erasure codes operate in blocks of a certain
-size and it cannot work with smaller ones. This means that if a user issues
-a write of a portion of a file that doesn't fill a full block, it needs to
-read the remaining portion from the current contents of the file, merge them,
-compute the updated encoded block and, finally, writing the resulting data.
-
-This adds latency, reducing performance when this happens. Some GlusterFS
-performance xlators can help to reduce or even eliminate this problem for
-some workloads, but it should be taken into account when using dispersed
-volumes for a specific use case.
-
-Current implementation of dispersed volumes use blocks of a size that depends
-on the number of bricks and redundancy: 512 * (#Bricks - redundancy) bytes.
-This value is also known as the stripe size.
-
-Using combinations of #Bricks/redundancy that give a power of two for the
-stripe size will make the disperse volume perform better in most workloads
-because it's more typical to write information in blocks that are multiple of
-two (for example databases, virtual machines and many applications).
-
-These combinations are considered *optimal*.
-
-For example, a configuration with 6 bricks and redundancy 2 will have a stripe
-size of 512 * (6 - 2) = 2048 bytes, so it's considered optimal. A configuration
-with 7 bricks and redundancy 2 would have a stripe size of 2560 bytes, needing
-a RMW cycle for many writes (of course this always depends on the use case).
-
-**To create a dispersed volume**
-
-1. Create a trusted storage pool.
-
-2. Create the dispersed volume:
-
- `# gluster volume create [disperse [<count>]] [redundancy <count>] [transport tcp | rdma | tcp,rdma]`
-
- A dispersed volume can be created by specifying the number of bricks in a
- disperse set, by specifying the number of redundancy bricks, or both.
-
- If *disperse* is not specified, or the _&lt;count&gt;_ is missing, the
- entire volume will be treated as a single disperse set composed by all
- bricks enumerated in the command line.
-
- If *redundancy* is not specified, it is computed automatically to be the
- optimal value. If this value does not exist, it's assumed to be '1' and a
- warning message is shown:
-
- # gluster volume create test-volume disperse 4 server{1..4}:/bricks/test-volume
- There isn't an optimal redundancy value for this configuration. Do you want to create the volume with redundancy 1 ? (y/n)
-
- In all cases where *redundancy* is automatically computed and it's not
- equal to '1', a warning message is displayed:
-
- # gluster volume create test-volume disperse 6 server{1..6}:/bricks/test-volume
- The optimal redundancy for this configuration is 2. Do you want to create the volume with this value ? (y/n)
-
- _redundancy_ must be greater than 0, and the total number of bricks must
- be greater than 2 * _redundancy_. This means that a dispersed volume must
- have a minimum of 3 bricks.
-
- If the transport type is not specified, *tcp* is used as the default. You
- can also set additional options if required, like in the other volume
- types.
-
- > **Note**:
-
- > - Make sure you start your volumes before you try to mount them or
- > else client operations after the mount will hang.
-
- > - GlusterFS will fail to create a dispersed volume if more than one brick of a disperse set is present on the same peer.
-
- > ```
- # gluster volume create <volname> disperse 3 server1:/brick{1..3}
- volume create: <volname>: failed: Multiple bricks of a replicate volume are present on the same server. This setup is not optimal.
- Do you still want to continue creating the volume? (y/n)```
-
- > Use the `force` option at the end of command if you want to create the volume in this case.
-
-##Creating Distributed Dispersed Volumes
-
-Distributed dispersed volumes are the equivalent to distributed replicated
-volumes, but using dispersed subvolumes instead of replicated ones.
-
-**To create a distributed dispersed volume**
-
-1. Create a trusted storage pool.
-
-2. Create the distributed dispersed volume:
-
- `# gluster volume create disperse <count> [redundancy <count>] [transport tcp | rdma | tcp,rdma]`
-
- To create a distributed dispersed volume, the *disperse* keyword and
- &lt;count&gt; is mandatory, and the number of bricks specified in the
- command line must must be a multiple of the disperse count.
-
- *redundancy* is exactly the same as in the dispersed volume.
-
- If the transport type is not specified, *tcp* is used as the default. You
- can also set additional options if required, like in the other volume
- types.
-
- > **Note**:
-
- > - Make sure you start your volumes before you try to mount them or
- > else client operations after the mount will hang.
-
- > - GlusterFS will fail to create a distributed dispersed volume if more than one brick of a disperse set is present on the same peer.
-
- > ```
- # gluster volume create <volname> disperse 3 server1:/brick{1..6}
- volume create: <volname>: failed: Multiple bricks of a replicate volume are present on the same server. This setup is not optimal.
- Do you still want to continue creating the volume? (y/n)```
-
- > Use the `force` option at the end of command if you want to create the volume in this case.
-
-##Starting Volumes
-
-You must start your volumes before you try to mount them.
-
-**To start a volume**
-
-- Start a volume:
-
- `# gluster volume start `
-
- For example, to start test-volume:
-
- # gluster volume start test-volume
- Starting test-volume has been successful
-
- []: ../images/Distributed_Volume.png
- [1]: ../images/Replicated_Volume.png
- [2]: ../images/Striped_Volume.png
- [3]: ../images/Distributed_Striped_Volume.png
- [4]: ../images/Distributed_Replicated_Volume.png
- [5]: ../images/Striped_Replicated_Volume.png
diff --git a/doc/admin-guide/en-US/markdown/admin_settingup_clients.md b/doc/admin-guide/en-US/markdown/admin_settingup_clients.md
deleted file mode 100644
index 909eca5ae0a..00000000000
--- a/doc/admin-guide/en-US/markdown/admin_settingup_clients.md
+++ /dev/null
@@ -1,600 +0,0 @@
-#Accessing Data - Setting Up GlusterFS Client
-
-You can access gluster volumes in multiple ways. You can use Gluster
-Native Client method for high concurrency, performance and transparent
-failover in GNU/Linux clients. You can also use NFS v3 to access gluster
-volumes. Extensive testing has be done on GNU/Linux clients and NFS
-implementation in other operating system, such as FreeBSD, and Mac OS X,
-as well as Windows 7 (Professional and Up) and Windows Server 2003.
-Other NFS client implementations may work with gluster NFS server.
-
-You can use CIFS to access volumes when using Microsoft Windows as well
-as SAMBA clients. For this access method, Samba packages need to be
-present on the client side.
-
-##Gluster Native Client
-
-The Gluster Native Client is a FUSE-based client running in user space.
-Gluster Native Client is the recommended method for accessing volumes
-when high concurrency and high write performance is required.
-
-This section introduces the Gluster Native Client and explains how to
-install the software on client machines. This section also describes how
-to mount volumes on clients (both manually and automatically) and how to
-verify that the volume has mounted successfully.
-
-###Installing the Gluster Native Client
-
-Before you begin installing the Gluster Native Client, you need to
-verify that the FUSE module is loaded on the client and has access to
-the required modules as follows:
-
-1. Add the FUSE loadable kernel module (LKM) to the Linux kernel:
-
- `# modprobe fuse`
-
-2. Verify that the FUSE module is loaded:
-
- `# dmesg | grep -i fuse `
- `fuse init (API version 7.13)`
-
-### Installing on Red Hat Package Manager (RPM) Distributions
-
-To install Gluster Native Client on RPM distribution-based systems
-
-1. Install required prerequisites on the client using the following
- command:
-
- `$ sudo yum -y install openssh-server wget fuse fuse-libs openib libibverbs`
-
-2. Ensure that TCP and UDP ports 24007 and 24008 are open on all
- Gluster servers. Apart from these ports, you need to open one port
- for each brick starting from port 49152 (instead of 24009 onwards as
- with previous releases). The brick ports assignment scheme is now
- compliant with IANA guidelines. For example: if you have
- five bricks, you need to have ports 49152 to 49156 open.
-
- You can use the following chains with iptables:
-
- `$ sudo iptables -A RH-Firewall-1-INPUT -m state --state NEW -m tcp -p tcp --dport 24007:24008 -j ACCEPT `
- `$ sudo iptables -A RH-Firewall-1-INPUT -m state --state NEW -m tcp -p tcp --dport 49152:49156 -j ACCEPT`
-
- > **Note**
- >
- > If you already have iptable chains, make sure that the above
- > ACCEPT rules precede the DROP rules. This can be achieved by
- > providing a lower rule number than the DROP rule.
-
-3. Download the latest glusterfs, glusterfs-fuse, and glusterfs-rdma
- RPM files to each client. The glusterfs package contains the Gluster
- Native Client. The glusterfs-fuse package contains the FUSE
- translator required for mounting on client systems and the
- glusterfs-rdma packages contain OpenFabrics verbs RDMA module for
- Infiniband.
-
- You can download the software at [GlusterFS download page][1].
-
-4. Install Gluster Native Client on the client.
-
- `$ sudo rpm -i glusterfs-3.3.0qa30-1.x86_64.rpm `
- `$ sudo rpm -i glusterfs-fuse-3.3.0qa30-1.x86_64.rpm `
- `$ sudo rpm -i glusterfs-rdma-3.3.0qa30-1.x86_64.rpm`
-
- > **Note**
- >
- > The RDMA module is only required when using Infiniband.
-
-### Installing on Debian-based Distributions
-
-To install Gluster Native Client on Debian-based distributions
-
-1. Install OpenSSH Server on each client using the following command:
-
- `$ sudo apt-get install openssh-server vim wget`
-
-2. Download the latest GlusterFS .deb file and checksum to each client.
-
- You can download the software at [GlusterFS download page][1].
-
-3. For each .deb file, get the checksum (using the following command)
- and compare it against the checksum for that file in the md5sum
- file.
-
- `$ md5sum GlusterFS_DEB_file.deb `
-
- The md5sum of the packages is available at: [GlusterFS download page][2]
-
-4. Uninstall GlusterFS v3.1 (or an earlier version) from the client
- using the following command:
-
- `$ sudo dpkg -r glusterfs `
-
- (Optional) Run `$ sudo dpkg -purge glusterfs `to purge the
- configuration files.
-
-5. Install Gluster Native Client on the client using the following
- command:
-
- `$ sudo dpkg -i GlusterFS_DEB_file `
-
- For example:
-
- `$ sudo dpkg -i glusterfs-3.3.x.deb `
-
-6. Ensure that TCP and UDP ports 24007 and 24008 are open on all
- Gluster servers. Apart from these ports, you need to open one port
- for each brick starting from port 49152 (instead of 24009 onwards as
- with previous releases). The brick ports assignment scheme is now
- compliant with IANA guidelines. For example: if you have
- five bricks, you need to have ports 49152 to 49156 open.
-
- You can use the following chains with iptables:
-
- `$ sudo iptables -A RH-Firewall-1-INPUT -m state --state NEW -m tcp -p tcp --dport 24007:24008 -j ACCEPT `
- `$ sudo iptables -A RH-Firewall-1-INPUT -m state --state NEW -m tcp -p tcp --dport 49152:49156 -j ACCEPT`
-
- > **Note**
- >
- > If you already have iptable chains, make sure that the above
- > ACCEPT rules precede the DROP rules. This can be achieved by
- > providing a lower rule number than the DROP rule.
-
-### Performing a Source Installation
-
-To build and install Gluster Native Client from the source code
-
-1. Create a new directory using the following commands:
-
- `# mkdir glusterfs `
- `# cd glusterfs`
-
-2. Download the source code.
-
- You can download the source at [][1].
-
-3. Extract the source code using the following command:
-
- `# tar -xvzf SOURCE-FILE `
-
-4. Run the configuration utility using the following command:
-
- `# ./configure `
-
- GlusterFS configure summary
- ===========================
- FUSE client : yes
- Infiniband verbs : yes
- epoll IO multiplex : yes
- argp-standalone : no
- fusermount : no
- readline : yes
-
- The configuration summary shows the components that will be built
- with Gluster Native Client.
-
-5. Build the Gluster Native Client software using the following
- commands:
-
- `# make `
- `# make install`
-
-6. Verify that the correct version of Gluster Native Client is
- installed, using the following command:
-
- `# glusterfs –-version`
-
-##Mounting Volumes
-
-After installing the Gluster Native Client, you need to mount Gluster
-volumes to access data. There are two methods you can choose:
-
-- [Manually Mounting Volumes](#manual-mount)
-- [Automatically Mounting Volumes](#auto-mount)
-
-> **Note**
->
-> Server names selected during creation of Volumes should be resolvable
-> in the client machine. You can use appropriate /etc/hosts entries or
-> DNS server to resolve server names to IP addresses.
-
-<a name="manual-mount" />
-### Manually Mounting Volumes
-
-- To mount a volume, use the following command:
-
- `# mount -t glusterfs HOSTNAME-OR-IPADDRESS:/VOLNAME MOUNTDIR`
-
- For example:
-
- `# mount -t glusterfs server1:/test-volume /mnt/glusterfs`
-
- > **Note**
- >
- > The server specified in the mount command is only used to fetch
- > the gluster configuration volfile describing the volume name.
- > Subsequently, the client will communicate directly with the
- > servers mentioned in the volfile (which might not even include the
- > one used for mount).
- >
- > If you see a usage message like "Usage: mount.glusterfs", mount
- > usually requires you to create a directory to be used as the mount
- > point. Run "mkdir /mnt/glusterfs" before you attempt to run the
- > mount command listed above.
-
-**Mounting Options**
-
-You can specify the following options when using the
-`mount -t glusterfs` command. Note that you need to separate all options
-with commas.
-
-backupvolfile-server=server-name
-
-volfile-max-fetch-attempts=number of attempts
-
-log-level=loglevel
-
-log-file=logfile
-
-transport=transport-type
-
-direct-io-mode=[enable|disable]
-
-use-readdirp=[yes|no]
-
-For example:
-
-`# mount -t glusterfs -o backupvolfile-server=volfile_server2,use-readdirp=no,volfile-max-fetch-attempts=2,log-level=WARNING,log-file=/var/log/gluster.log server1:/test-volume /mnt/glusterfs`
-
-If `backupvolfile-server` option is added while mounting fuse client,
-when the first volfile server fails, then the server specified in
-`backupvolfile-server` option is used as volfile server to mount the
-client.
-
-In `volfile-max-fetch-attempts=X` option, specify the number of
-attempts to fetch volume files while mounting a volume. This option is
-useful when you mount a server with multiple IP addresses or when
-round-robin DNS is configured for the server-name..
-
-If `use-readdirp` is set to ON, it forces the use of readdirp
-mode in fuse kernel module
-
-<a name="auto-mount" />
-### Automatically Mounting Volumes
-
-You can configure your system to automatically mount the Gluster volume
-each time your system starts.
-
-The server specified in the mount command is only used to fetch the
-gluster configuration volfile describing the volume name. Subsequently,
-the client will communicate directly with the servers mentioned in the
-volfile (which might not even include the one used for mount).
-
-- To mount a volume, edit the /etc/fstab file and add the following
- line:
-
- `HOSTNAME-OR-IPADDRESS:/VOLNAME MOUNTDIR glusterfs defaults,_netdev 0 0 `
-
- For example:
-
- `server1:/test-volume /mnt/glusterfs glusterfs defaults,_netdev 0 0`
-
-**Mounting Options**
-
-You can specify the following options when updating the /etc/fstab file.
-Note that you need to separate all options with commas.
-
-log-level=loglevel
-
-log-file=logfile
-
-transport=transport-type
-
-direct-io-mode=[enable|disable]
-
-use-readdirp=no
-
-For example:
-
-`HOSTNAME-OR-IPADDRESS:/VOLNAME MOUNTDIR glusterfs defaults,_netdev,log-level=WARNING,log-file=/var/log/gluster.log 0 0 `
-
-### Testing Mounted Volumes
-
-To test mounted volumes
-
-- Use the following command:
-
- `# mount `
-
- If the gluster volume was successfully mounted, the output of the
- mount command on the client will be similar to this example:
-
- `server1:/test-volume on /mnt/glusterfs type fuse.glusterfs (rw,allow_other,default_permissions,max_read=131072`
-
-- Use the following command:
-
- `# df`
-
- The output of df command on the client will display the aggregated
- storage space from all the bricks in a volume similar to this
- example:
-
- `# df -h /mnt/glusterfs Filesystem Size Used Avail Use% Mounted on server1:/test-volume 28T 22T 5.4T 82% /mnt/glusterfs`
-
-- Change to the directory and list the contents by entering the
- following:
-
- `# cd MOUNTDIR `
- `# ls`
-
-- For example,
-
- `# cd /mnt/glusterfs `
- `# ls`
-
-#NFS
-
-You can use NFS v3 to access to gluster volumes. Extensive testing has
-be done on GNU/Linux clients and NFS implementation in other operating
-system, such as FreeBSD, and Mac OS X, as well as Windows 7
-(Professional and Up), Windows Server 2003, and others, may work with
-gluster NFS server implementation.
-
-GlusterFS now includes network lock manager (NLM) v4. NLM enables
-applications on NFSv3 clients to do record locking on files on NFS
-server. It is started automatically whenever the NFS server is run.
-
-You must install nfs-common package on both servers and clients (only
-for Debian-based) distribution.
-
-This section describes how to use NFS to mount Gluster volumes (both
-manually and automatically) and how to verify that the volume has been
-mounted successfully.
-
-##Using NFS to Mount Volumes
---------------------------
-
-You can use either of the following methods to mount Gluster volumes:
-
-- [Manually Mounting Volumes Using NFS](#manual-nfs)
-- [Automatically Mounting Volumes Using NFS](#auto-nfs)
-
-**Prerequisite**: Install nfs-common package on both servers and clients
-(only for Debian-based distribution), using the following command:
-
-`$ sudo aptitude install nfs-common `
-
-<a name="manual-nfs" />
-### Manually Mounting Volumes Using NFS
-
-**To manually mount a Gluster volume using NFS**
-
-- To mount a volume, use the following command:
-
- `# mount -t nfs -o vers=3 HOSTNAME-OR-IPADDRESS:/VOLNAME MOUNTDIR`
-
- For example:
-
- `# mount -t nfs -o vers=3 server1:/test-volume /mnt/glusterfs`
-
- > **Note**
- >
- > Gluster NFS server does not support UDP. If the NFS client you are
- > using defaults to connecting using UDP, the following message
- > appears:
- >
- > `requested NFS version or transport protocol is not supported`.
-
- **To connect using TCP**
-
-- Add the following option to the mount command:
-
- `-o mountproto=tcp `
-
- For example:
-
- `# mount -o mountproto=tcp -t nfs server1:/test-volume /mnt/glusterfs`
-
-**To mount Gluster NFS server from a Solaris client**
-
-- Use the following command:
-
- `# mount -o proto=tcp,vers=3 nfs://HOSTNAME-OR-IPADDRESS:38467/VOLNAME MOUNTDIR`
-
- For example:
-
- ` # mount -o proto=tcp,vers=3 nfs://server1:38467/test-volume /mnt/glusterfs`
-
-<a name="auto-nfs" />
-### Automatically Mounting Volumes Using NFS
-
-You can configure your system to automatically mount Gluster volumes
-using NFS each time the system starts.
-
-**To automatically mount a Gluster volume using NFS**
-
-- To mount a volume, edit the /etc/fstab file and add the following
- line:
-
- `HOSTNAME-OR-IPADDRESS:/VOLNAME MOUNTDIR nfs defaults,_netdev,vers=3 0 0`
-
- For example,
-
- `server1:/test-volume /mnt/glusterfs nfs defaults,_netdev,vers=3 0 0`
-
- > **Note**
- >
- > Gluster NFS server does not support UDP. If the NFS client you are
- > using defaults to connecting using UDP, the following message
- > appears:
- >
- > `requested NFS version or transport protocol is not supported.`
-
- To connect using TCP
-
-- Add the following entry in /etc/fstab file :
-
- `HOSTNAME-OR-IPADDRESS:/VOLNAME MOUNTDIR nfs defaults,_netdev,mountproto=tcp 0 0`
-
- For example,
-
- `server1:/test-volume /mnt/glusterfs nfs defaults,_netdev,mountproto=tcp 0 0`
-
-**To automount NFS mounts**
-
-Gluster supports \*nix standard method of automounting NFS mounts.
-Update the /etc/auto.master and /etc/auto.misc and restart the autofs
-service. After that, whenever a user or process attempts to access the
-directory it will be mounted in the background.
-
-### Testing Volumes Mounted Using NFS
-
-You can confirm that Gluster directories are mounting successfully.
-
-**To test mounted volumes**
-
-- Use the mount command by entering the following:
-
- `# mount`
-
- For example, the output of the mount command on the client will
- display an entry like the following:
-
- `server1:/test-volume on /mnt/glusterfs type nfs (rw,vers=3,addr=server1)`
-
-- Use the df command by entering the following:
-
- `# df`
-
- For example, the output of df command on the client will display the
- aggregated storage space from all the bricks in a volume.
-
- # df -h /mnt/glusterfs
- Filesystem Size Used Avail Use% Mounted on
- server1:/test-volume 28T 22T 5.4T 82% /mnt/glusterfs
-
-- Change to the directory and list the contents by entering the
- following:
-
- `# cd MOUNTDIR`
- `# ls`
-
-#CIFS
-
-You can use CIFS to access to volumes when using Microsoft Windows as
-well as SAMBA clients. For this access method, Samba packages need to be
-present on the client side. You can export glusterfs mount point as the
-samba export, and then mount it using CIFS protocol.
-
-This section describes how to mount CIFS shares on Microsoft
-Windows-based clients (both manually and automatically) and how to
-verify that the volume has mounted successfully.
-
-> **Note**
->
-> CIFS access using the Mac OS X Finder is not supported, however, you
-> can use the Mac OS X command line to access Gluster volumes using
-> CIFS.
-
-##Using CIFS to Mount Volumes
-
-You can use either of the following methods to mount Gluster volumes:
-
-- [Exporting Gluster Volumes Through Samba](#export-samba)
-- [Manually Mounting Volumes Using CIFS](#cifs-manual)
-- [Automatically Mounting Volumes Using CIFS](#cifs-auto)
-
-You can also use Samba for exporting Gluster Volumes through CIFS
-protocol.
-
-<a name="export-samba" />
-### Exporting Gluster Volumes Through Samba
-
-We recommend you to use Samba for exporting Gluster volumes through the
-CIFS protocol.
-
-**To export volumes through CIFS protocol**
-
-1. Mount a Gluster volume.
-
-2. Setup Samba configuration to export the mount point of the Gluster
- volume.
-
- For example, if a Gluster volume is mounted on /mnt/gluster, you
- must edit smb.conf file to enable exporting this through CIFS. Open
- smb.conf file in an editor and add the following lines for a simple
- configuration:
-
- [glustertest]
-
- comment = For testing a Gluster volume exported through CIFS
-
- path = /mnt/glusterfs
-
- read only = no
-
- guest ok = yes
-
-Save the changes and start the smb service using your systems init
-scripts (/etc/init.d/smb [re]start).
-
-> **Note**
->
-> To be able mount from any server in the trusted storage pool, you must
-> repeat these steps on each Gluster node. For more advanced
-> configurations, see Samba documentation.
-
-<a name="cifs-manual" />
-### Manually Mounting Volumes Using CIFS
-
-You can manually mount Gluster volumes using CIFS on Microsoft
-Windows-based client machines.
-
-**To manually mount a Gluster volume using CIFS**
-
-1. Using Windows Explorer, choose **Tools \> Map Network Drive…** from
- the menu. The **Map Network Drive**window appears.
-
-2. Choose the drive letter using the **Drive** drop-down list.
-
-3. Click **Browse**, select the volume to map to the network drive, and
- click **OK**.
-
-4. Click **Finish.**
-
-The network drive (mapped to the volume) appears in the Computer window.
-
-Alternatively, to manually mount a Gluster volume using CIFS by going to
-**Start \> Run** and entering Network path manually.
-
-<a name="cifs-auto" />
-### Automatically Mounting Volumes Using CIFS
-
-You can configure your system to automatically mount Gluster volumes
-using CIFS on Microsoft Windows-based clients each time the system
-starts.
-
-**To automatically mount a Gluster volume using CIFS**
-
-The network drive (mapped to the volume) appears in the Computer window
-and is reconnected each time the system starts.
-
-1. Using Windows Explorer, choose **Tools \> Map Network Drive…** from
- the menu. The **Map Network Drive**window appears.
-
-2. Choose the drive letter using the **Drive** drop-down list.
-
-3. Click **Browse**, select the volume to map to the network drive, and
- click **OK**.
-
-4. Click the **Reconnect** at logon checkbox.
-
-5. Click **Finish.**
-
-### Testing Volumes Mounted Using CIFS
-
-You can confirm that Gluster directories are mounting successfully by
-navigating to the directory using Windows Explorer.
-
- []: http://bits.gluster.com/gluster/glusterfs/3.3.0qa30/x86_64/
- [1]: http://www.gluster.org/download/
- [2]: http://download.gluster.com/pub/gluster/glusterfs
diff --git a/doc/admin-guide/en-US/markdown/admin_ssl.md b/doc/admin-guide/en-US/markdown/admin_ssl.md
deleted file mode 100644
index 4522bcedf88..00000000000
--- a/doc/admin-guide/en-US/markdown/admin_ssl.md
+++ /dev/null
@@ -1,128 +0,0 @@
-# Setting up GlusterFS with SSL/TLS
-
-GlusterFS allows its communication to be secured using the [Transport Layer
-Security][tls] standard (which supersedes Secure Sockets Layer), using the
-[OpenSSL][ossl] library. Setting this up requires a basic working knowledge of
-some SSL/TLS concepts, which can only be briefly summarized here.
-
- * "Authentication" is the process of one entity (e.g. a machine, process, or
- person) proving its identity to a second entity.
-
- * "Authorization" is the process of checking whether an entity has permission
- to perform an action.
-
- * TLS provides authentication and encryption. It does not provide
- authorization, though GlusterFS can use TLS-authenticated identities to
- authorize client connections to bricks/volumes.
-
- * An entity X which must authenticate to a second entity Y does so by sharing
- with Y a *certificate*, which contains information sufficient to prove X's
- identity. X's proof of identity also requires possession of a *private key*
- which matches its certificate, but this key is never seen by Y or anyone
- else. Because the certificate is already public, anyone who has the key can
- claim that identity.
-
- * Each certificate contains the identity of its principal (owner) along with
- the identity of a *certifying authority* or CA who can verify the integrity
- of the certificate's contents. The principal and CA can be the same (a
- "self-signed certificate"). If they are different, the CA must *sign* the
- certificate by appending information derived from both the certificate
- contents and the CA's own private key.
-
- * Certificate-signing relationships can extend through multiple levels. For
- example, a company X could sign another company Y's certificate, which could
- then be used to sign a third certificate Z for a specific user or purpose.
- Anyone who trusts X (and is willing to extend that trust through a
- *certificate depth* of two or more) would therefore be able to authenticate
- Y and Z as well.
-
- * Any entity willing to accept other entities' authentication attempts must
- have some sort of database seeded with the certificates that already accept.
-
-In GlusterFS's case, a client or server X uses the following files to contain
-TLS-related information:
-
- * /etc/ssl/glusterfs.pem X's own certificate
-
- * /etc/ssl/glusterfs.key X's private key
-
- * /etc/ssl/glusterfs.ca concatenation of *others'* certificates
-
-GlusterFS always performs *mutual authentication*, though clients do not
-currently do anything with the authenticated server identity. Thus, if client X
-wants to communicate with server Y, then X's certificate (or that of a signer)
-must be in Y's CA file, and vice versa.
-
-For all uses of TLS in GlusterFS, if one side of a connection is configured to
-use TLS then the other side must use it as well. There is no automatic fallback
-to non-TLS communication, or allowance for concurrent TLS and non-TLS access to
-the same resource, because either would be insecure. Instead, any such "mixed
-mode" connections will be rejected by the TLS-using side, sacrificing
-availability to maintain security.
-
-## Enabling TLS on the I/O Path
-
-To enable authentication and encryption between clients and brick servers, two
-options must be set:
-
- gluster volume set MYVOLUME client.ssl on
- gluster volume set MYVOLUME server.ssl on
-
-Note that the above options affect only the GlusterFS native protocol. Foreign
-protocols such as NFS, SMB, or Swift will not be affected.
-
-## Using TLS Identities for Authorization
-
-Once TLS has been enabled on the I/O path, TLS identities can be used instead of
-IP addresses or plain usernames to control access to specific volumes. For
-example:
-
- gluster volume set MYVOLUME auth.ssl-allow Zaphod
-
-Here, we're allowing the TLS-authenticated identity "Zaphod" to access MYVOLUME.
-This is intentionally identical to the existing "auth.allow" option, except that
-the name is taken from a TLS certificate instead of a command-line string. Note
-that infelicities in the gluster CLI preclude using names that include spaces,
-which would otherwise be allowed.
-
-## Enabling TLS on the Management Path
-
-Management-daemon traffic is not controlled by an option. Instead, it is
-controlled by the presence of a file on each machine:
-
- /var/lib/glusterd/secure-access
-
-Creating this file will cause glusterd connections made from that machine to use
-TLS. Note that even clients must do this to communicate with a remote glusterd
-while mounting, but not thereafter.
-
-## Additional Options
-
-The GlusterFS TLS implementation supports two additional options related to TLS
-internals.
-
-The first option allows the user to set the certificate depth, as mentioned
-above.
-
- gluster volume set MYVOLUME ssl.cert-depth 2
-
-Here, we're setting our certificate depth to two, as in the introductory
-example. By default this value is zero, meaning that only certificates which
-are directly specified in the local CA file will be accepted (i.e. no signed
-certificates at all).
-
-The second option allows the user to specify the set of allowed TLS ciphers.
-
- gluster volume set MYVOLUME ssl.cipher-list HIGH:!SSLv2
-
-Cipher lists are negotiated between the two parties to a TLS connection, so
-that both sides' security needs are satisfied. In this example, we're setting
-the initial cipher list to HIGH, representing ciphers that the cryptography
-community still believes to be unbroken. We are also explicitly disallowing
-ciphers specific to SSL version 2. The default is based on this example but
-also excludes CBC-based cipher modes to provide extra mitigation against the
-[POODLE][poo] attack.
-
-[tls]: http://tools.ietf.org/html/rfc5246
-[ossl]: https://www.openssl.org/
-[poo]: http://web.nvd.nist.gov/view/vuln/detail?vulnId=CVE-2014-3566
diff --git a/doc/admin-guide/en-US/markdown/admin_start_stop_daemon.md b/doc/admin-guide/en-US/markdown/admin_start_stop_daemon.md
deleted file mode 100644
index a47ece8d95b..00000000000
--- a/doc/admin-guide/en-US/markdown/admin_start_stop_daemon.md
+++ /dev/null
@@ -1,58 +0,0 @@
-#Managing the glusterd Service
-
-After installing GlusterFS, you must start glusterd service. The
-glusterd service serves as the Gluster elastic volume manager,
-overseeing glusterfs processes, and co-ordinating dynamic volume
-operations, such as adding and removing volumes across multiple storage
-servers non-disruptively.
-
-This section describes how to start the glusterd service in the
-following ways:
-
-- [Starting and Stopping glusterd Manually](#manual)
-- [Starting glusterd Automatically](#auto)
-
-> **Note**: You must start glusterd on all GlusterFS servers.
-
-<a name="manual" />
-##Starting and Stopping glusterd Manually
-
-This section describes how to start and stop glusterd manually
-
-- To start glusterd manually, enter the following command:
-
- `# /etc/init.d/glusterd start `
-
-- To stop glusterd manually, enter the following command:
-
- `# /etc/init.d/glusterd stop`
-
-<a name="auto" />
-##Starting glusterd Automatically
-
-This section describes how to configure the system to automatically
-start the glusterd service every time the system boots.
-
-###Red Hat and Fedora distros
-
-To configure Red Hat-based systems to automatically start the glusterd
-service every time the system boots, enter the following from the
-command line:
-
-`# chkconfig glusterd on `
-
-###Debian and derivatives like Ubuntu
-
-To configure Debian-based systems to automatically start the glusterd
-service every time the system boots, enter the following from the
-command line:
-
-`# update-rc.d glusterd defaults`
-
-###Systems Other than Red Hat and Debain
-
-To configure systems other than Red Hat or Debian to automatically start
-the glusterd service every time the system boots, enter the following
-entry to the*/etc/rc.local* file:
-
-`# echo "glusterd" >> /etc/rc.local `
diff --git a/doc/admin-guide/en-US/markdown/admin_storage_pools.md b/doc/admin-guide/en-US/markdown/admin_storage_pools.md
deleted file mode 100644
index de181f58c18..00000000000
--- a/doc/admin-guide/en-US/markdown/admin_storage_pools.md
+++ /dev/null
@@ -1,91 +0,0 @@
-#Setting up Trusted Storage Pools
-
-Before you can configure a GlusterFS volume, you must create a trusted
-storage pool consisting of the storage servers that provides bricks to a
-volume.
-
-A storage pool is a trusted network of storage servers. When you start
-the first server, the storage pool consists of that server alone. To add
-additional storage servers to the storage pool, you can use the probe
-command from a storage server that is already trusted.
-
-> **Note**: Do not self-probe the first server/localhost from itself.
-
-The GlusterFS service must be running on all storage servers that you
-want to add to the storage pool. See ? for more information.
-
-##Adding Servers to Trusted Storage Pool
-
-To create a trusted storage pool, add servers to the trusted storage
-pool
-
-1. **The servers used to create the storage pool must be resolvable by
- hostname.**
-
- To add a server to the storage pool:
-
- `# gluster peer probe `
-
- For example, to create a trusted storage pool of four servers, add
- three servers to the storage pool from server1:
-
- # gluster peer probe server2
- Probe successful
-
- # gluster peer probe server3
- Probe successful
-
- # gluster peer probe server4
- Probe successful
-
-2. **Verify the peer status from the first server using the following
- commands:**
-
- # gluster peer status
- Number of Peers: 3
-
- Hostname: server2
- Uuid: 5e987bda-16dd-43c2-835b-08b7d55e94e5
- State: Peer in Cluster (Connected)
-
- Hostname: server3
- Uuid: 1e0ca3aa-9ef7-4f66-8f15-cbc348f29ff7
- State: Peer in Cluster (Connected)
-
- Hostname: server4
- Uuid: 3e0caba-9df7-4f66-8e5d-cbc348f29ff7
- State: Peer in Cluster (Connected)
-
- 3. **Assign the hostname to the first server by probing it from another server (not the server used in steps 1 and 2):**
-
- server2# gluster peer probe server1
- Probe successful
-
-4. **Verify the peer status from the same server you used in step 3 using the following
- command:**
-
- server2# gluster peer status
- Number of Peers: 3
-
- Hostname: server1
- Uuid: ceed91d5-e8d1-434d-9d47-63e914c93424
- State: Peer in Cluster (Connected)
-
- Hostname: server3
- Uuid: 1e0ca3aa-9ef7-4f66-8f15-cbc348f29ff7
- State: Peer in Cluster (Connected)
-
- Hostname: server4
- Uuid: 3e0caba-9df7-4f66-8e5d-cbc348f29ff7
- State: Peer in Cluster (Connected)
-
-##Removing Servers from the Trusted Storage Pool
-
-To remove a server from the storage pool:
-
-`# gluster peer detach`
-
-For example, to remove server4 from the trusted storage pool:
-
- # gluster peer detach server4
- Detach successful
diff --git a/doc/admin-guide/en-US/markdown/admin_troubleshooting.md b/doc/admin-guide/en-US/markdown/admin_troubleshooting.md
deleted file mode 100644
index fa19a2f71de..00000000000
--- a/doc/admin-guide/en-US/markdown/admin_troubleshooting.md
+++ /dev/null
@@ -1,495 +0,0 @@
-#Troubleshooting GlusterFS
-
-This section describes how to manage GlusterFS logs and most common
-troubleshooting scenarios related to GlusterFS.
-
-##Contents
-* [Managing GlusterFS Logs](#logs)
-* [Troubleshooting Geo-replication](#georep)
-* [Troubleshooting POSIX ACLs](#posix-acls)
-* [Troubleshooting Hadoop Compatible Storage](#hadoop)
-* [Troubleshooting NFS](#nfs)
-* [Troubleshooting File Locks](#file-locks)
-
-<a name="logs" />
-##Managing GlusterFS Logs
-
-###Rotating Logs
-
-Administrators can rotate the log file in a volume, as needed.
-
-**To rotate a log file**
-
- `# gluster volume log rotate `
-
-For example, to rotate the log file on test-volume:
-
- # gluster volume log rotate test-volume
- log rotate successful
-
-> **Note**
-> When a log file is rotated, the contents of the current log file
-> are moved to log-file- name.epoch-time-stamp.
-
-<a name="georep" />
-##Troubleshooting Geo-replication
-
-This section describes the most common troubleshooting scenarios related
-to GlusterFS Geo-replication.
-
-###Locating Log Files
-
-For every Geo-replication session, the following three log files are
-associated to it (four, if the slave is a gluster volume):
-
-- **Master-log-file** - log file for the process which monitors the Master
- volume
-- **Slave-log-file** - log file for process which initiates the changes in
- slave
-- **Master-gluster-log-file** - log file for the maintenance mount point
- that Geo-replication module uses to monitor the master volume
-- **Slave-gluster-log-file** - is the slave's counterpart of it
-
-**Master Log File**
-
-To get the Master-log-file for geo-replication, use the following
-command:
-
-`gluster volume geo-replication config log-file`
-
-For example:
-
-`# gluster volume geo-replication Volume1 example.com:/data/remote_dir config log-file `
-
-**Slave Log File**
-
-To get the log file for Geo-replication on slave (glusterd must be
-running on slave machine), use the following commands:
-
-1. On master, run the following command:
-
- `# gluster volume geo-replication Volume1 example.com:/data/remote_dir config session-owner 5f6e5200-756f-11e0-a1f0-0800200c9a66 `
-
- Displays the session owner details.
-
-2. On slave, run the following command:
-
- `# gluster volume geo-replication /data/remote_dir config log-file /var/log/gluster/${session-owner}:remote-mirror.log `
-
-3. Replace the session owner details (output of Step 1) to the output
- of the Step 2 to get the location of the log file.
-
- `/var/log/gluster/5f6e5200-756f-11e0-a1f0-0800200c9a66:remote-mirror.log`
-
-###Rotating Geo-replication Logs
-
-Administrators can rotate the log file of a particular master-slave
-session, as needed. When you run geo-replication's ` log-rotate`
-command, the log file is backed up with the current timestamp suffixed
-to the file name and signal is sent to gsyncd to start logging to a new
-log file.
-
-**To rotate a geo-replication log file**
-
-- Rotate log file for a particular master-slave session using the
- following command:
-
- `# gluster volume geo-replication log-rotate`
-
- For example, to rotate the log file of master `Volume1` and slave
- `example.com:/data/remote_dir` :
-
- # gluster volume geo-replication Volume1 example.com:/data/remote_dir log rotate
- log rotate successful
-
-- Rotate log file for all sessions for a master volume using the
- following command:
-
- `# gluster volume geo-replication log-rotate`
-
- For example, to rotate the log file of master `Volume1`:
-
- # gluster volume geo-replication Volume1 log rotate
- log rotate successful
-
-- Rotate log file for all sessions using the following command:
-
- `# gluster volume geo-replication log-rotate`
-
- For example, to rotate the log file for all sessions:
-
- # gluster volume geo-replication log rotate
- log rotate successful
-
-###Synchronization is not complete
-
-**Description**: GlusterFS Geo-replication did not synchronize the data
-completely but still the geo- replication status displayed is OK.
-
-**Solution**: You can enforce a full sync of the data by erasing the
-index and restarting GlusterFS Geo- replication. After restarting,
-GlusterFS Geo-replication begins synchronizing all the data. All files
-are compared using checksum, which can be a lengthy and high resource
-utilization operation on large data sets.
-
-
-###Issues in Data Synchronization
-
-**Description**: Geo-replication display status as OK, but the files do
-not get synced, only directories and symlink gets synced with the
-following error message in the log:
-
- [2011-05-02 13:42:13.467644] E [master:288:regjob] GMaster: failed to
- sync ./some\_file\`
-
-**Solution**: Geo-replication invokes rsync v3.0.0 or higher on the host
-and the remote machine. You must verify if you have installed the
-required version.
-
-###Geo-replication status displays Faulty very often
-
-**Description**: Geo-replication displays status as faulty very often
-with a backtrace similar to the following:
-
- 2011-04-28 14:06:18.378859] E [syncdutils:131:log\_raise\_exception]
- \<top\>: FAIL: Traceback (most recent call last): File
- "/usr/local/libexec/glusterfs/python/syncdaemon/syncdutils.py", line
- 152, in twraptf(\*aa) File
- "/usr/local/libexec/glusterfs/python/syncdaemon/repce.py", line 118, in
- listen rid, exc, res = recv(self.inf) File
- "/usr/local/libexec/glusterfs/python/syncdaemon/repce.py", line 42, in
- recv return pickle.load(inf) EOFError
-
-**Solution**: This error indicates that the RPC communication between
-the master gsyncd module and slave gsyncd module is broken and this can
-happen for various reasons. Check if it satisfies all the following
-pre-requisites:
-
-- Password-less SSH is set up properly between the host and the remote
- machine.
-- If FUSE is installed in the machine, because geo-replication module
- mounts the GlusterFS volume using FUSE to sync data.
-- If the **Slave** is a volume, check if that volume is started.
-- If the Slave is a plain directory, verify if the directory has been
- created already with the required permissions.
-- If GlusterFS 3.2 or higher is not installed in the default location
- (in Master) and has been prefixed to be installed in a custom
- location, configure the `gluster-command` for it to point to the
- exact location.
-- If GlusterFS 3.2 or higher is not installed in the default location
- (in slave) and has been prefixed to be installed in a custom
- location, configure the `remote-gsyncd-command` for it to point to
- the exact place where gsyncd is located.
-
-###Intermediate Master goes to Faulty State
-
-**Description**: In a cascading set-up, the intermediate master goes to
-faulty state with the following log:
-
- raise RuntimeError ("aborting on uuid change from %s to %s" % \\
- RuntimeError: aborting on uuid change from af07e07c-427f-4586-ab9f-
- 4bf7d299be81 to de6b5040-8f4e-4575-8831-c4f55bd41154
-
-**Solution**: In a cascading set-up the Intermediate master is loyal to
-the original primary master. The above log means that the
-geo-replication module has detected change in primary master. If this is
-the desired behavior, delete the config option volume-id in the session
-initiated from the intermediate master.
-
-<a name="posix-acls" />
-##Troubleshooting POSIX ACLs
-
-This section describes the most common troubleshooting issues related to
-POSIX ACLs.
-
- setfacl command fails with “setfacl: \<file or directory name\>: Operation not supported” error
-
-You may face this error when the backend file systems in one of the
-servers is not mounted with the "-o acl" option. The same can be
-confirmed by viewing the following error message in the log file of the
-server "Posix access control list is not supported".
-
-**Solution**: Remount the backend file system with "-o acl" option.
-
-<a name="hadoop" />
-##Troubleshooting Hadoop Compatible Storage
-
-###Time Sync
-
-**Problem**: Running MapReduce job may throw exceptions if the time is out-of-sync on
-the hosts in the cluster.
-
-**Solution**: Sync the time on all hosts using ntpd program.
-
-<a name="nfs" />
-##Troubleshooting NFS
-
-This section describes the most common troubleshooting issues related to
-NFS .
-
-###mount command on NFS client fails with “RPC Error: Program not registered”
-
- Start portmap or rpcbind service on the NFS server.
-
-This error is encountered when the server has not started correctly.
-On most Linux distributions this is fixed by starting portmap:
-
-`$ /etc/init.d/portmap start`
-
-On some distributions where portmap has been replaced by rpcbind, the
-following command is required:
-
-`$ /etc/init.d/rpcbind start `
-
-After starting portmap or rpcbind, gluster NFS server needs to be
-restarted.
-
-###NFS server start-up fails with “Port is already in use” error in the log file.
-
-Another Gluster NFS server is running on the same machine.
-
-This error can arise in case there is already a Gluster NFS server
-running on the same machine. This situation can be confirmed from the
-log file, if the following error lines exist:
-
- [2010-05-26 23:40:49] E [rpc-socket.c:126:rpcsvc_socket_listen] rpc-socket: binding socket failed:Address already in use
- [2010-05-26 23:40:49] E [rpc-socket.c:129:rpcsvc_socket_listen] rpc-socket: Port is already in use
- [2010-05-26 23:40:49] E [rpcsvc.c:2636:rpcsvc_stage_program_register] rpc-service: could not create listening connection
- [2010-05-26 23:40:49] E [rpcsvc.c:2675:rpcsvc_program_register] rpc-service: stage registration of program failed
- [2010-05-26 23:40:49] E [rpcsvc.c:2695:rpcsvc_program_register] rpc-service: Program registration failed: MOUNT3, Num: 100005, Ver: 3, Port: 38465
- [2010-05-26 23:40:49] E [nfs.c:125:nfs_init_versions] nfs: Program init failed
- [2010-05-26 23:40:49] C [nfs.c:531:notify] nfs: Failed to initialize protocols
-
-To resolve this error one of the Gluster NFS servers will have to be
-shutdown. At this time, Gluster NFS server does not support running
-multiple NFS servers on the same machine.
-
-###mount command fails with “rpc.statd” related error message
-
-If the mount command fails with the following error message:
-
- mount.nfs: rpc.statd is not running but is required for remote locking.
- mount.nfs: Either use '-o nolock' to keep locks local, or start statd.
-
-For NFS clients to mount the NFS server, rpc.statd service must be
-running on the clients. Start rpc.statd service by running the following command:
-
-`$ rpc.statd `
-
-###mount command takes too long to finish.
-
-**Start rpcbind service on the NFS client**
-
-The problem is that the rpcbind or portmap service is not running on the
-NFS client. The resolution for this is to start either of these services
-by running the following command:
-
-`$ /etc/init.d/portmap start`
-
-On some distributions where portmap has been replaced by rpcbind, the
-following command is required:
-
-`$ /etc/init.d/rpcbind start`
-
-###NFS server glusterfsd starts but initialization fails with “nfsrpc- service: portmap registration of program failed” error message in the log.
-
-NFS start-up can succeed but the initialization of the NFS service can
-still fail preventing clients from accessing the mount points. Such a
-situation can be confirmed from the following error messages in the log
-file:
-
- [2010-05-26 23:33:47] E [rpcsvc.c:2598:rpcsvc_program_register_portmap] rpc-service: Could notregister with portmap
- [2010-05-26 23:33:47] E [rpcsvc.c:2682:rpcsvc_program_register] rpc-service: portmap registration of program failed
- [2010-05-26 23:33:47] E [rpcsvc.c:2695:rpcsvc_program_register] rpc-service: Program registration failed: MOUNT3, Num: 100005, Ver: 3, Port: 38465
- [2010-05-26 23:33:47] E [nfs.c:125:nfs_init_versions] nfs: Program init failed
- [2010-05-26 23:33:47] C [nfs.c:531:notify] nfs: Failed to initialize protocols
- [2010-05-26 23:33:49] E [rpcsvc.c:2614:rpcsvc_program_unregister_portmap] rpc-service: Could not unregister with portmap
- [2010-05-26 23:33:49] E [rpcsvc.c:2731:rpcsvc_program_unregister] rpc-service: portmap unregistration of program failed
- [2010-05-26 23:33:49] E [rpcsvc.c:2744:rpcsvc_program_unregister] rpc-service: Program unregistration failed: MOUNT3, Num: 100005, Ver: 3, Port: 38465
-
-1. **Start portmap or rpcbind service on the NFS server**
-
- On most Linux distributions, portmap can be started using the
- following command:
-
- `$ /etc/init.d/portmap start `
-
- On some distributions where portmap has been replaced by rpcbind,
- run the following command:
-
- `$ /etc/init.d/rpcbind start `
-
- After starting portmap or rpcbind, gluster NFS server needs to be
- restarted.
-
-2. **Stop another NFS server running on the same machine**
-
- Such an error is also seen when there is another NFS server running
- on the same machine but it is not the Gluster NFS server. On Linux
- systems, this could be the kernel NFS server. Resolution involves
- stopping the other NFS server or not running the Gluster NFS server
- on the machine. Before stopping the kernel NFS server, ensure that
- no critical service depends on access to that NFS server's exports.
-
- On Linux, kernel NFS servers can be stopped by using either of the
- following commands depending on the distribution in use:
-
- `$ /etc/init.d/nfs-kernel-server stop`
-
- `$ /etc/init.d/nfs stop`
-
-3. **Restart Gluster NFS server**
-
-###mount command fails with NFS server failed error.
-
-mount command fails with following error
-
- *mount: mount to NFS server '10.1.10.11' failed: timed out (retrying).*
-
-Perform one of the following to resolve this issue:
-
-1. **Disable name lookup requests from NFS server to a DNS server**
-
- The NFS server attempts to authenticate NFS clients by performing a
- reverse DNS lookup to match hostnames in the volume file with the
- client IP addresses. There can be a situation where the NFS server
- either is not able to connect to the DNS server or the DNS server is
- taking too long to responsd to DNS request. These delays can result
- in delayed replies from the NFS server to the NFS client resulting
- in the timeout error seen above.
-
- NFS server provides a work-around that disables DNS requests,
- instead relying only on the client IP addresses for authentication.
- The following option can be added for successful mounting in such
- situations:
-
- `option rpc-auth.addr.namelookup off `
-
- > **Note**: Remember that disabling the NFS server forces authentication
- > of clients to use only IP addresses and if the authentication
- > rules in the volume file use hostnames, those authentication rules
- > will fail and disallow mounting for those clients.
-
- **OR**
-
-2. **NFS version used by the NFS client is other than version 3**
-
- Gluster NFS server supports version 3 of NFS protocol. In recent
- Linux kernels, the default NFS version has been changed from 3 to 4.
- It is possible that the client machine is unable to connect to the
- Gluster NFS server because it is using version 4 messages which are
- not understood by Gluster NFS server. The timeout can be resolved by
- forcing the NFS client to use version 3. The **vers** option to
- mount command is used for this purpose:
-
- `$ mount -o vers=3 `
-
-###showmount fails with clnt\_create: RPC: Unable to receive
-
-Check your firewall setting to open ports 111 for portmap
-requests/replies and Gluster NFS server requests/replies. Gluster NFS
-server operates over the following port numbers: 38465, 38466, and
-38467.
-
-###Application fails with "Invalid argument" or "Value too large for defined data type" error.
-
-These two errors generally happen for 32-bit nfs clients or applications
-that do not support 64-bit inode numbers or large files. Use the
-following option from the CLI to make Gluster NFS return 32-bit inode
-numbers instead: nfs.enable-ino32 \<on|off\>
-
-Applications that will benefit are those that were either:
-
-- built 32-bit and run on 32-bit machines such that they do not
- support large files by default
-- built 32-bit on 64-bit systems
-
-This option is disabled by default so NFS returns 64-bit inode numbers
-by default.
-
-Applications which can be rebuilt from source are recommended to rebuild
-using the following flag with gcc:
-
-` -D_FILE_OFFSET_BITS=64`
-
-<a name="file-locks" />
-##Troubleshooting File Locks
-
-In GlusterFS 3.3 you can use `statedump` command to list the locks held
-on files. The statedump output also provides information on each lock
-with its range, basename, PID of the application holding the lock, and
-so on. You can analyze the output to know about the locks whose
-owner/application is no longer running or interested in that lock. After
-ensuring that the no application is using the file, you can clear the
-lock using the following `clear lock` commands.
-
-1. **Perform statedump on the volume to view the files that are locked
- using the following command:**
-
- `# gluster volume statedump inode`
-
- For example, to display statedump of test-volume:
-
- # gluster volume statedump test-volume
- Volume statedump successful
-
- The statedump files are created on the brick servers in the` /tmp`
- directory or in the directory set using `server.statedump-path`
- volume option. The naming convention of the dump file is
- `<brick-path>.<brick-pid>.dump`.
-
- The following are the sample contents of the statedump file. It
- indicates that GlusterFS has entered into a state where there is an
- entry lock (entrylk) and an inode lock (inodelk). Ensure that those
- are stale locks and no resources own them.
-
- [xlator.features.locks.vol-locks.inode]
- path=/
- mandatory=0
- entrylk-count=1
- lock-dump.domain.domain=vol-replicate-0
- xlator.feature.locks.lock-dump.domain.entrylk.entrylk[0](ACTIVE)=type=ENTRYLK_WRLCK on basename=file1, pid = 714782904, owner=ffffff2a3c7f0000, transport=0x20e0670, , granted at Mon Feb 27 16:01:01 2012
-
- conn.2.bound_xl./gfs/brick1.hashsize=14057
- conn.2.bound_xl./gfs/brick1.name=/gfs/brick1/inode
- conn.2.bound_xl./gfs/brick1.lru_limit=16384
- conn.2.bound_xl./gfs/brick1.active_size=2
- conn.2.bound_xl./gfs/brick1.lru_size=0
- conn.2.bound_xl./gfs/brick1.purge_size=0
-
- [conn.2.bound_xl./gfs/brick1.active.1]
- gfid=538a3d4a-01b0-4d03-9dc9-843cd8704d07
- nlookup=1
- ref=2
- ia_type=1
- [xlator.features.locks.vol-locks.inode]
- path=/file1
- mandatory=0
- inodelk-count=1
- lock-dump.domain.domain=vol-replicate-0
- inodelk.inodelk[0](ACTIVE)=type=WRITE, whence=0, start=0, len=0, pid = 714787072, owner=00ffff2a3c7f0000, transport=0x20e0670, , granted at Mon Feb 27 16:01:01 2012
-
-2. **Clear the lock using the following command:**
-
- `# gluster volume clear-locks`
-
- For example, to clear the entry lock on `file1` of test-volume:
-
- # gluster volume clear-locks test-volume / kind granted entry file1
- Volume clear-locks successful
- vol-locks: entry blocked locks=0 granted locks=1
-
-3. **Clear the inode lock using the following command:**
-
- `# gluster volume clear-locks`
-
- For example, to clear the inode lock on `file1` of test-volume:
-
- # gluster volume clear-locks test-volume /file1 kind granted inode 0,0-0
- Volume clear-locks successful
- vol-locks: inode blocked locks=0 granted locks=1
-
- You can perform statedump on test-volume again to verify that the
- above inode and entry locks are cleared.
-
-
diff --git a/doc/admin-guide/en-US/markdown/did-you-know.md b/doc/admin-guide/en-US/markdown/did-you-know.md
deleted file mode 100644
index 085b4a81a7a..00000000000
--- a/doc/admin-guide/en-US/markdown/did-you-know.md
+++ /dev/null
@@ -1,36 +0,0 @@
-#Did you know?
-
-This document is an attempt to describe less-documented behaviours and features
-of GlusterFS that an admin always wanted to know but was too shy or busy to
-ask.
-
-## Trusted Volfiles
-
-Observant admins would have wondered why there are two similar volume files for
-every volume, namely trusted-<VOLNAME>-fuse.vol and <VOLNAME>-fuse.vol. To
-appreciate this one needs to know about the IP address/hostname based access
-restriction schemes available in GlusterFS. They are "auth-allow" and
-"auth-reject". The "auth-allow" and "auth-reject" options take a comma
-separated list of IP addresses/hostnames as value. "auth-allow" allows access
-_only_ to clients running on machines whose IP address/hostname are on this
-list. It is highly likely for an admin to configure the "auth-allow" option
-without including the list of nodes in the cluster. One would expect this to
-work. Previously, in this configuration (internal) clients such as
-gluster-nfs, glustershd etc., running in the trusted storage pool, would be
-denied access to the volume. This is undesirable and counter-intuitive. The
-work around was to add the IP address/hostnames of all the nodes in the trusted
-storage pool to the "auth-allow" list. This is bad for a reasonably large
-number of nodes. To fix this, an alternate authentication mechanism for nodes
-in the storage pool was introduced. Following is a brief explanation of how
-this works.
-
-The volume file with trusted prefix in its name (i.e trusted-volfile) has a
-username and password option in the client xlator. The trusted-volfile is used
-_only_ by mount processes running in the trusted storage pool (hence the name).
-The username and password, when present, allow "mount" (and other glusterfs)
-processes to access the brick processes even if the node they are running on is
-not explicitly added in "auth-allow" addresses. 'Regular' mount processes,
-running on nodes outside the trusted storage pool, use the non-trusted-volfile.
-The important thing to note is that "trusted" in this context only implied
-belonging to the trusted storage pool.
-
diff --git a/doc/admin-guide/en-US/markdown/glossary.md b/doc/admin-guide/en-US/markdown/glossary.md
deleted file mode 100644
index 496d0a428d4..00000000000
--- a/doc/admin-guide/en-US/markdown/glossary.md
+++ /dev/null
@@ -1,300 +0,0 @@
-Glossary
-========
-
-**Brick**
-: A Brick is the basic unit of storage in GlusterFS, represented by an export
- directory on a server in the trusted storage pool.
- A brick is expressed by combining a server with an export directory in the following format:
-
- `SERVER:EXPORT`
- For example:
- `myhostname:/exports/myexportdir/`
-
-**Volume**
-: A volume is a logical collection of bricks. Most of the gluster
- management operations happen on the volume.
-
-
-**Subvolume**
-: A brick after being processed by at least one translator or in other words
- set of one or more xlator stacked together is called a sub-volume.
-
-
-**Volfile**
-: Volume (vol) files are configuration files that determine the behavior of the
- GlusterFs trusted storage pool. Volume file is a textual representation of a
- collection of modules (also known as translators) that together implement the
- various functions required. The collection of modules are arranged in a graph-like
- fashion. E.g, A replicated volume's volfile, among other things, would have a
- section describing the replication translator and its tunables.
- This section describes how the volume would replicate data written to it.
- Further, a client process that serves a mount point, would interpret its volfile
- and load the translators described in it. While serving I/O, it would pass the
- request to the collection of modules in the order specified in the volfile.
-
- At a high level, GlusterFs has three entities,that is, Server, Client and Management daemon.
- Each of these entities have their own volume files.
- Volume files for servers and clients are generated by the management daemon
- after the volume is created.
-
- Server and Client Vol files are located in /var/lib/glusterd/vols/VOLNAME directory.
- The management daemon vol file is named as glusterd.vol and is located in /etc/glusterfs/
- directory.
-
-**glusterd**
-: The daemon/service that manages volumes and cluster membership. It is required to
- run on all the servers in the trusted storage pool.
-
-**Cluster**
-: A trusted pool of linked computers working together, resembling a single computing resource.
- In GlusterFs, a cluster is also referred to as a trusted storage pool.
-
-**Client**
-: Any machine that mounts a GlusterFS volume. Any applications that use libgfapi access
- mechanism can also be treated as clients in GlusterFS context.
-
-
-**Server**
-: The machine (virtual or bare metal) that hosts the bricks in which data is stored.
-
-
-**Block Storage**
-: Block special files, or block devices, correspond to devices through which the system moves
- data in the form of blocks. These device nodes often represent addressable devices such as
- hard disks, CD-ROM drives, or memory regions. GlusterFS requires a filesystem (like XFS) that
- supports extended attributes.
-
-
-
-**Filesystem**
-: A method of storing and organizing computer files and their data.
- Essentially, it organizes these files into a database for the
- storage, organization, manipulation, and retrieval by the computer's
- operating system.
-
- Source: [Wikipedia][]
-
-**Distributed File System**
-: A file system that allows multiple clients to concurrently access data which is spread across
- servers/bricks in a trusted storage pool. Data sharing among multiple locations is fundamental
- to all distributed file systems.
-
-**Virtual File System (VFS)
- VFS is a kernel software layer which handles all system calls related to the standard Linux file system.
- It provides a common interface to several kinds of file systems.
-
-**POSIX**
-: Portable Operating System Interface (for Unix) is the name of a
- family of related standards specified by the IEEE to define the
- application programming interface (API), along with shell and
- utilities interfaces for software compatible with variants of the
- Unix operating system. Gluster exports a fully POSIX compliant file
- system.
-
-**Extended Attributes**
-: Extended file attributes (abbreviated xattr) is a filesystem feature
- that enables users/programs to associate files/dirs with metadata.
-
-
-**FUSE**
-: Filesystem in Userspace (FUSE) is a loadable kernel module for
- Unix-like computer operating systems that lets non-privileged users
- create their own filesystems without editing kernel code. This is
- achieved by running filesystem code in user space while the FUSE
- module provides only a "bridge" to the actual kernel interfaces.
-
- Source: [Wikipedia][1]
-
-
-**GFID**
-: Each file/directory on a GlusterFS volume has a unique 128-bit number
- associated with it called the GFID. This is analogous to inode in a
- regular filesystem.
-
-
-**Infiniband**
- InfiniBand is a switched fabric computer network communications link
- used in high-performance computing and enterprise data centers.
-
-**Metadata**
-: Metadata is data providing information about one or more other
- pieces of data.
-
-**Namespace**
-: Namespace is an abstract container or environment created to hold a
- logical grouping of unique identifiers or symbols. Each Gluster
- volume exposes a single namespace as a POSIX mount point that
- contains every file in the cluster.
-
-**Node**
-: A server or computer that hosts one or more bricks.
-
-**Open Source**
-: Open source describes practices in production and development that
- promote access to the end product's source materials. Some consider
- open source a philosophy, others consider it a pragmatic
- methodology.
-
- Before the term open source became widely adopted, developers and
- producers used a variety of phrases to describe the concept; open
- source gained hold with the rise of the Internet, and the attendant
- need for massive retooling of the computing source code.
-
- Opening the source code enabled a self-enhancing diversity of
- production models, communication paths, and interactive communities.
- Subsequently, a new, three-word phrase "open source software" was
- born to describe the environment that the new copyright, licensing,
- domain, and consumer issues created.
-
- Source: [Wikipedia][2]
-
-**Petabyte**
-: A petabyte (derived from the SI prefix peta- ) is a unit of
- information equal to one quadrillion (short scale) bytes, or 1000
- terabytes. The unit symbol for the petabyte is PB. The prefix peta-
- (P) indicates a power of 1000:
-
- 1 PB = 1,000,000,000,000,000 B = 10005 B = 1015 B.
-
- The term "pebibyte" (PiB), using a binary prefix, is used for the
- corresponding power of 1024.
-
- Source: [Wikipedia][3]
-
-
-
-**Quorum**
-: The configuration of quorum in a trusted storage pool determines the
- number of server failures that the trusted storage pool can sustain.
- If an additional failure occurs, the trusted storage pool becomes
- unavailable.
-
-**Quota**
-: Quota allows you to set limits on usage of disk space by directories or
- by volumes.
-
-**RAID**
-: Redundant Array of Inexpensive Disks (RAID) is a technology that
- provides increased storage reliability through redundancy, combining
- multiple low-cost, less-reliable disk drives components into a
- logical unit where all drives in the array are interdependent.
-
-**RDMA**
-: Remote direct memory access (RDMA) is a direct memory access from the
- memory of one computer into that of another without involving either
- one's operating system. This permits high-throughput, low-latency
- networking, which is especially useful in massively parallel computer
- clusters.
-
-**Rebalance**
-: A process of fixing layout and resdistributing data in a volume when a
- brick is added or removed.
-
-**RRDNS**
-: Round Robin Domain Name Service (RRDNS) is a method to distribute
- load across application servers. RRDNS is implemented by creating
- multiple A records with the same name and different IP addresses in
- the zone file of a DNS server.
-
-**Samba**
-: Samba allows file and print sharing between computers running Windows and
- computers running Linux. It is an implementation of several services and
- protocols including SMB and CIFS.
-
-**Self-Heal**
-: The self-heal daemon that runs in the background, identifies
- inconsistencies in files/dirs in a replicated volume and then resolves
- or heals them. This healing process is usually required when one or more
- bricks of a volume goes down and then comes up later.
-
-**Split-brain**
-: This is a situation where data on two or more bricks in a replicated
- volume start to diverge in terms of content or metadata. In this state,
- one cannot determine programitically which set of data is "right" and
- which is "wrong".
-
-**Translator**
-: Translators (also called xlators) are stackable modules where each
- module has a very specific purpose. Translators are stacked in a
- hierarchical structure called as graph. A translator receives data
- from its parent translator, performs necessary operations and then
- passes the data down to its child translator in hierarchy.
-
-**Trusted Storage Pool**
-: A storage pool is a trusted network of storage servers. When you
- start the first server, the storage pool consists of that server
- alone.
-
-**Scale-Up Storage**
-: Increases the capacity of the storage device in a single dimension.
- For example, adding additional disk capacity to an existing trusted storage pool.
-
-**Scale-Out Storage**
- Scale out systems are designed to scale on both capacity and performance.
- It increases the capability of a storage device in single dimension.
- For example, adding more systems of the same size, or adding servers to a trusted storage pool
- that increases CPU, disk capacity, and throughput for the trusted storage pool.
-
-**Userspace**
-: Applications running in user space don’t directly interact with
- hardware, instead using the kernel to moderate access. Userspace
- applications are generally more portable than applications in kernel
- space. Gluster is a user space application.
-
-
-**Geo-Replication**
-: Geo-replication provides a continuous, asynchronous, and incremental
- replication service from site to another over Local Area Networks
- (LAN), Wide Area Network (WAN), and across the Internet.
-
-**N-way Replication**
-: Local synchronous data replication which is typically deployed across campus
- or Amazon Web Services Availability Zones.
-
-**Distributed Hash Table Terminology**
-**Hashed subvolume**
-: A Distributed Hash Table Translator subvolume to which the file or directory name is hashed to.
-
-**Cached subvolume**
-: A Distributed Hash Table Translator subvolume where the file content is actually present.
- For directories, the concept of cached-subvolume is not relevant. It is loosely used to mean
- subvolumes which are not hashed-subvolume.
-
-**Linkto-file**
-
-: For a newly created file, the hashed and cached subvolumes are the same.
- When directory entry operations like rename (which can change the name and hence hashed
- subvolume of the file) are performed on the file, instead of moving the entire data in the file
- to a new hashed subvolume, a file is created with the same name on the newly hashed subvolume.
- The purpose of this file is only to act as a pointer to the node where the data is present.
- In the extended attributes of this file, the name of the cached subvolume is stored.
- This file on the newly hashed-subvolume is called a linkto-file.
- The linkto file is relevant only for non-directory entities.
-
-**Directory Layout**
-: The directory layout specifies the hash-ranges of the subdirectories of a directory to which
- subvolumes they correspond to.
-
-**Properties of directory layouts:**
-: The layouts are created at the time of directory creation and are persisted as extended attributes
- of the directory.
- A subvolume is not included in the layout if it remained offline at the time of directory creation
- and no directory entries ( such as files and directories) of that directory are created on
- that subvolume. The subvolume is not part of the layout until the fix-layout is complete
- as part of running the rebalance command. If a subvolume is down during access (after directory creation),
- access to any files that hash to that subvolume fails.
-
-**Fix Layout**
-: A command that is executed during the rebalance process.
- The rebalance process itself comprises of two stages:
- Fixes the layouts of directories to accommodate any subvolumes that are added or removed.
- It also heals the directories, checks whether the layout is non-contiguous, and persists the
- layout in extended attributes, if needed. It also ensures that the directories have the same
- attributes across all the subvolumes.
-
- Migrates the data from the cached-subvolume to the hashed-subvolume.
-
- [Wikipedia]: http://en.wikipedia.org/wiki/Filesystem
- [1]: http://en.wikipedia.org/wiki/Filesystem_in_Userspace
- [2]: http://en.wikipedia.org/wiki/Open_source
- [3]: http://en.wikipedia.org/wiki/Petabyte
diff --git a/doc/admin-guide/en-US/markdown/glusterfs_introduction.md b/doc/admin-guide/en-US/markdown/glusterfs_introduction.md
deleted file mode 100644
index 02334f7b108..00000000000
--- a/doc/admin-guide/en-US/markdown/glusterfs_introduction.md
+++ /dev/null
@@ -1,63 +0,0 @@
-Introducing Gluster File System
-===============================
-
-GlusterFS is an open source, distributed file system capable of scaling to
-several petabytes and handling thousands of clients. It is a file system with
-a modular, stackable design, and a unique no-metadata server architecture.
-This no-metadata server architecture ensures better performance,
-linear scalability, and reliability. GlusterFS can be
-flexibly combined with commodity physical, virtual, and cloud resources
-to deliver highly available and performant enterprise storage at a
-fraction of the cost of traditional solutions.
-
-GlusterFS clusters together storage building blocks over Infiniband RDMA
-and/or TCP/IP interconnect, aggregating disk and memory resources and
-managing data in a single global namespace.
-
-GlusterFS aggregates various storage servers over network interconnects
-into one large parallel network file system. Based on a stackable user space
-design, it delivers exceptional performance for diverse workloads and is a key
-building block of GlusterFS.
-The POSIX compatible GlusterFS servers, use any ondisk file system which supports
-extended attributes (eg: ext4, XFS, etc) to format to store data on disks, can be
-accessed using industry-standard access protocols including Network File System (NFS)
-and Server Message Block (SMB).
-
-![ Virtualized Cloud Environments ](../images/640px-GlusterFS_Architecture.png)
-
-GlusterFS is designed for today's high-performance, virtualized cloud
-environments. Unlike traditional data centers, cloud environments
-require multi-tenancy along with the ability to grow or shrink resources
-on demand. Enterprises can scale capacity, performance, and availability
-on demand, with no vendor lock-in, across on-premise, public cloud, and
-hybrid environments.
-
-GlusterFS is in production at thousands of enterprises spanning media,
-healthcare, government, education, web 2.0, and financial services.
-
-## Commercial offerings and support ##
-
-Several companies offer support or consulting - http://www.gluster.org/consultants/.
-
-Red Hat Storage (http://www.redhat.com/en/technologies/storage/storage-server)
-is a commercial storage software product, based on GlusterFS.
-
-
-## About On-premise Installation ##
-
-GlusterFS for On-Premise allows physical storage to be utilized as a
-virtualized, scalable, and centrally managed pool of storage.
-
-GlusterFS can be installed on commodity servers resulting in a
-powerful, massively scalable, and highly available NAS environment.
-
-GlusterFS On-premise enables enterprises to treat physical storage as a
-virtualized, scalable, and centrally managed storage pool by using commodity
-storage hardware. It supports multi-tenancy by partitioning users or groups into
-logical volumes on shared storage. It enables users to eliminate, decrease, or
-manage their dependence on high-cost, monolithic and difficult-to-deploy storage arrays.
-You can add capacity in a matter of minutes across a wide variety of workloads without
-affecting performance. Storage can also be centrally managed across a variety of
-workloads, thus increasing storage efficiency.
-
-
diff --git a/doc/admin-guide/en-US/markdown/pdfgen.sh b/doc/admin-guide/en-US/markdown/pdfgen.sh
deleted file mode 100755
index 68b320617b1..00000000000
--- a/doc/admin-guide/en-US/markdown/pdfgen.sh
+++ /dev/null
@@ -1,16 +0,0 @@
-#!/bin/bash
-# pdfgen.sh simple pdf generation helper script.
-# Copyright (C) 2012-2013 James Shubin
-# Written by James Shubin <james@shubin.ca>
-
-#dir='/tmp/pdf'
-dir=`pwd`'/output/'
-ln -s ../images images
-mkdir -p "$dir"
-
-for i in *.md; do
- pandoc $i -o "$dir"`echo $i | sed 's/\.md$/\.pdf/'`
-done
-
-rm images # remove symlink
-
diff --git a/doc/debugging/gfid-to-path.md b/doc/debugging/gfid-to-path.md
deleted file mode 100644
index 09c459e52c8..00000000000
--- a/doc/debugging/gfid-to-path.md
+++ /dev/null
@@ -1,73 +0,0 @@
-#Convert GFID to Path
-
-GlusterFS internal file identifier (GFID) is a uuid that is unique to each
-file across the entire cluster. This is analogous to inode number in a
-normal filesystem. The GFID of a file is stored in its xattr named
-`trusted.gfid`.
-
-####Special mount using [gfid-access translator][1]:
-~~~
-mount -t glusterfs -o aux-gfid-mount vm1:test /mnt/testvol
-~~~
-
-Assuming, you have `GFID` of a file from changelog (or somewhere else).
-For trying this out, you can get `GFID` of a file from mountpoint:
-~~~
-getfattr -n glusterfs.gfid.string /mnt/testvol/dir/file
-~~~
-
-
----
-###Get file path from GFID (Method 1):
-**(Lists hardlinks delimited by `:`, returns path as seen from mountpoint)**
-
-####Turn on build-pgfid option
-~~~
-gluster volume set test build-pgfid on
-~~~
-Read virtual xattr `glusterfs.ancestry.path` which contains the file path
-~~~
-getfattr -n glusterfs.ancestry.path -e text /mnt/testvol/.gfid/<GFID>
-~~~
-
-**Example:**
-~~~
-[root@vm1 glusterfs]# ls -il /mnt/testvol/dir/
-total 1
-10610563327990022372 -rw-r--r--. 2 root root 3 Jul 17 18:05 file
-10610563327990022372 -rw-r--r--. 2 root root 3 Jul 17 18:05 file3
-
-[root@vm1 glusterfs]# getfattr -n glusterfs.gfid.string /mnt/testvol/dir/file
-getfattr: Removing leading '/' from absolute path names
-# file: mnt/testvol/dir/file
-glusterfs.gfid.string="11118443-1894-4273-9340-4b212fa1c0e4"
-
-[root@vm1 glusterfs]# getfattr -n glusterfs.ancestry.path -e text /mnt/testvol/.gfid/11118443-1894-4273-9340-4b212fa1c0e4
-getfattr: Removing leading '/' from absolute path names
-# file: mnt/testvol/.gfid/11118443-1894-4273-9340-4b212fa1c0e4
-glusterfs.ancestry.path="/dir/file:/dir/file3"
-~~~
-
----
-###Get file path from GFID (Method 2):
-**(Does not list all hardlinks, returns backend brick path)**
-~~~
-getfattr -n trusted.glusterfs.pathinfo -e text /mnt/testvol/.gfid/<GFID>
-~~~
-
-**Example:**
-~~~
-[root@vm1 glusterfs]# getfattr -n trusted.glusterfs.pathinfo -e text /mnt/testvol/.gfid/11118443-1894-4273-9340-4b212fa1c0e4
-getfattr: Removing leading '/' from absolute path names
-# file: mnt/testvol/.gfid/11118443-1894-4273-9340-4b212fa1c0e4
-trusted.glusterfs.pathinfo="(<DISTRIBUTE:test-dht> <POSIX(/mnt/brick-test/b):vm1:/mnt/brick-test/b/dir//file3>)"
-~~~
-
----
-###Get file path from GFID (Method 3):
-https://gist.github.com/semiosis/4392640
-
----
-####References and links:
-[posix: placeholders for GFID to path conversion](http://review.gluster.org/5951)
-[1]: https://github.com/gluster/glusterfs/blob/master/doc/features/gfid-access.md
diff --git a/doc/debugging/split-brain.md b/doc/debugging/split-brain.md
deleted file mode 100644
index b0d938e26bc..00000000000
--- a/doc/debugging/split-brain.md
+++ /dev/null
@@ -1,251 +0,0 @@
-Steps to recover from File split-brain.
-======================================
-
-Quick Start:
-============
-1. Get the path of the file that is in split-brain:
-> It can be obtained either by
-> a) The command `gluster volume heal info split-brain`.
-> b) Identify the files for which file operations performed
- from the client keep failing with Input/Output error.
-
-2. Close the applications that opened this file from the mount point.
-In case of VMs, they need to be powered-off.
-
-3. Decide on the correct copy:
-> This is done by observing the afr changelog extended attributes of the file on
-the bricks using the getfattr command; then identifying the type of split-brain
-(data split-brain, metadata split-brain, entry split-brain or split-brain due to
-gfid-mismatch); and finally determining which of the bricks contains the 'good copy'
-of the file.
-> `getfattr -d -m . -e hex <file-path-on-brick>`.
-It is also possible that one brick might contain the correct data while the
-other might contain the correct metadata.
-
-4. Reset the relevant extended attribute on the brick(s) that contains the
-'bad copy' of the file data/metadata using the setfattr command.
-> `setfattr -n <attribute-name> -v <attribute-value> <file-path-on-brick>`
-
-5. Trigger self-heal on the file by performing lookup from the client:
-> `ls -l <file-path-on-gluster-mount>`
-
-Detailed Instructions for steps 3 through 5:
-===========================================
-To understand how to resolve split-brain we need to know how to interpret the
-afr changelog extended attributes.
-
-Execute `getfattr -d -m . -e hex <file-path-on-brick>`
-
-* Example:
-[root@store3 ~]# getfattr -d -e hex -m. brick-a/file.txt
-\#file: brick-a/file.txt
-security.selinux=0x726f6f743a6f626a6563745f723a66696c655f743a733000
-trusted.afr.vol-client-2=0x000000000000000000000000
-trusted.afr.vol-client-3=0x000000000200000000000000
-trusted.gfid=0x307a5c9efddd4e7c96e94fd4bcdcbd1b
-
-The extended attributes with `trusted.afr.<volname>-client-<subvolume-index>`
-are used by afr to maintain changelog of the file.The values of the
-`trusted.afr.<volname>-client-<subvolume-index>` are calculated by the glusterfs
-client (fuse or nfs-server) processes. When the glusterfs client modifies a file
-or directory, the client contacts each brick and updates the changelog extended
-attribute according to the response of the brick.
-
-'subvolume-index' is nothing but (brick number - 1) in
-`gluster volume info <volname>` output.
-
-* Example:
-[root@pranithk-laptop ~]# gluster volume info vol
- Volume Name: vol
- Type: Distributed-Replicate
- Volume ID: 4f2d7849-fbd6-40a2-b346-d13420978a01
- Status: Created
- Number of Bricks: 4 x 2 = 8
- Transport-type: tcp
- Bricks:
- brick-a: pranithk-laptop:/gfs/brick-a
- brick-b: pranithk-laptop:/gfs/brick-b
- brick-c: pranithk-laptop:/gfs/brick-c
- brick-d: pranithk-laptop:/gfs/brick-d
- brick-e: pranithk-laptop:/gfs/brick-e
- brick-f: pranithk-laptop:/gfs/brick-f
- brick-g: pranithk-laptop:/gfs/brick-g
- brick-h: pranithk-laptop:/gfs/brick-h
-
-In the example above:
-```
-Brick | Replica set | Brick subvolume index
-----------------------------------------------------------------------------
--/gfs/brick-a | 0 | 0
--/gfs/brick-b | 0 | 1
--/gfs/brick-c | 1 | 2
--/gfs/brick-d | 1 | 3
--/gfs/brick-e | 2 | 4
--/gfs/brick-f | 2 | 5
--/gfs/brick-g | 3 | 6
--/gfs/brick-h | 3 | 7
-```
-
-Each file in a brick maintains the changelog of itself and that of the files
-present in all the other bricks in it's replica set as seen by that brick.
-
-In the example volume given above, all files in brick-a will have 2 entries,
-one for itself and the other for the file present in it's replica pair, i.e.brick-b:
-trusted.afr.vol-client-0=0x000000000000000000000000 -->changelog for itself (brick-a)
-trusted.afr.vol-client-1=0x000000000000000000000000 -->changelog for brick-b as seen by brick-a
-
-Likewise, all files in brick-b will have:
-trusted.afr.vol-client-0=0x000000000000000000000000 -->changelog for brick-a as seen by brick-b
-trusted.afr.vol-client-1=0x000000000000000000000000 -->changelog for itself (brick-b)
-
-The same can be extended for other replica pairs.
-
-Interpreting Changelog (roughly pending operation count) Value:
-Each extended attribute has a value which is 24 hexa decimal digits.
-First 8 digits represent changelog of data. Second 8 digits represent changelog
-of metadata. Last 8 digits represent Changelog of directory entries.
-
-Pictorially representing the same, we have:
-```
-0x 000003d7 00000001 00000000
- | | |
- | | \_ changelog of directory entries
- | \_ changelog of metadata
- \ _ changelog of data
-```
-
-
-For Directories metadata and entry changelogs are valid.
-For regular files data and metadata changelogs are valid.
-For special files like device files etc metadata changelog is valid.
-When a file split-brain happens it could be either data split-brain or
-meta-data split-brain or both. When a split-brain happens the changelog of the
-file would be something like this:
-
-* Example:(Lets consider both data, metadata split-brain on same file).
-[root@pranithk-laptop vol]# getfattr -d -m . -e hex /gfs/brick-?/a
-getfattr: Removing leading '/' from absolute path names
-\#file: gfs/brick-a/a
-trusted.afr.vol-client-0=0x000000000000000000000000
-trusted.afr.vol-client-1=0x000003d70000000100000000
-trusted.gfid=0x80acdbd886524f6fbefa21fc356fed57
-\#file: gfs/brick-b/a
-trusted.afr.vol-client-0=0x000003b00000000100000000
-trusted.afr.vol-client-1=0x000000000000000000000000
-trusted.gfid=0x80acdbd886524f6fbefa21fc356fed57
-
-###Observations:
-
-####According to changelog extended attributes on file /gfs/brick-a/a:
-The first 8 digits of trusted.afr.vol-client-0 are all
-zeros (0x00000000................), and the first 8 digits of
-trusted.afr.vol-client-1 are not all zeros (0x000003d7................).
-So the changelog on /gfs/brick-a/a implies that some data operations succeeded
-on itself but failed on /gfs/brick-b/a.
-
-The second 8 digits of trusted.afr.vol-client-0 are
-all zeros (0x........00000000........), and the second 8 digits of
-trusted.afr.vol-client-1 are not all zeros (0x........00000001........).
-So the changelog on /gfs/brick-a/a implies that some metadata operations succeeded
-on itself but failed on /gfs/brick-b/a.
-
-####According to Changelog extended attributes on file /gfs/brick-b/a:
-The first 8 digits of trusted.afr.vol-client-0 are not all
-zeros (0x000003b0................), and the first 8 digits of
-trusted.afr.vol-client-1 are all zeros (0x00000000................).
-So the changelog on /gfs/brick-b/a implies that some data operations succeeded
-on itself but failed on /gfs/brick-a/a.
-
-The second 8 digits of trusted.afr.vol-client-0 are not
-all zeros (0x........00000001........), and the second 8 digits of
-trusted.afr.vol-client-1 are all zeros (0x........00000000........).
-So the changelog on /gfs/brick-b/a implies that some metadata operations succeeded
-on itself but failed on /gfs/brick-a/a.
-
-Since both the copies have data, metadata changes that are not on the other
-file, it is in both data and metadata split-brain.
-
-Deciding on the correct copy:
------------------------------
-The user may have to inspect stat,getfattr output of the files to decide which
-metadata to retain and contents of the file to decide which data to retain.
-Continuing with the example above, lets say we want to retain the data
-of /gfs/brick-a/a and metadata of /gfs/brick-b/a.
-
-Resetting the relevant changelogs to resolve the split-brain:
--------------------------------------------------------------
-For resolving data-split-brain:
-We need to change the changelog extended attributes on the files as if some data
-operations succeeded on /gfs/brick-a/a but failed on /gfs/brick-b/a. But
-/gfs/brick-b/a should NOT have any changelog which says some data operations
-succeeded on /gfs/brick-b/a but failed on /gfs/brick-a/a. We need to reset the
-data part of the changelog on trusted.afr.vol-client-0 of /gfs/brick-b/a.
-
-For resolving metadata-split-brain:
-We need to change the changelog extended attributes on the files as if some
-metadata operations succeeded on /gfs/brick-b/a but failed on /gfs/brick-a/a.
-But /gfs/brick-a/a should NOT have any changelog which says some metadata
-operations succeeded on /gfs/brick-a/a but failed on /gfs/brick-b/a.
-We need to reset metadata part of the changelog on
-trusted.afr.vol-client-1 of /gfs/brick-a/a
-
-So, the intended changes are:
-On /gfs/brick-b/a:
-For trusted.afr.vol-client-0
-0x000003b00000000100000000 to 0x000000000000000100000000
-(Note that the metadata part is still not all zeros)
-Hence execute
-`setfattr -n trusted.afr.vol-client-0 -v 0x000000000000000100000000 /gfs/brick-b/a`
-
-On /gfs/brick-a/a:
-For trusted.afr.vol-client-1
-0x0000000000000000ffffffff to 0x000003d70000000000000000
-(Note that the data part is still not all zeros)
-Hence execute
-`setfattr -n trusted.afr.vol-client-1 -v 0x000003d70000000000000000 /gfs/brick-a/a`
-
-Thus after the above operations are done, the changelogs look like this:
-[root@pranithk-laptop vol]# getfattr -d -m . -e hex /gfs/brick-?/a
-getfattr: Removing leading '/' from absolute path names
-\#file: gfs/brick-a/a
-trusted.afr.vol-client-0=0x000000000000000000000000
-trusted.afr.vol-client-1=0x000003d70000000000000000
-trusted.gfid=0x80acdbd886524f6fbefa21fc356fed57
-
-\#file: gfs/brick-b/a
-trusted.afr.vol-client-0=0x000000000000000100000000
-trusted.afr.vol-client-1=0x000000000000000000000000
-trusted.gfid=0x80acdbd886524f6fbefa21fc356fed57
-
-
-Triggering Self-heal:
----------------------
-Perform `ls -l <file-path-on-gluster-mount>` to trigger healing.
-
-Fixing Directory entry split-brain:
-----------------------------------
-Afr has the ability to conservatively merge different entries in the directories
-when there is a split-brain on directory.
-If on one brick directory 'd' has entries '1', '2' and has entries '3', '4' on
-the other brick then afr will merge all of the entries in the directory to have
-'1', '2', '3', '4' entries in the same directory.
-(Note: this may result in deleted files to re-appear in case the split-brain
-happens because of deletion of files on the directory)
-Split-brain resolution needs human intervention when there is at least one entry
-which has same file name but different gfid in that directory.
-Example:
-On brick-a the directory has entries '1' (with gfid g1), '2' and on brick-b
-directory has entries '1' (with gfid g2) and '3'.
-These kinds of directory split-brains need human intervention to resolve.
-The user needs to remove either file '1' on brick-a or the file '1' on brick-b
-to resolve the split-brain. In addition, the corresponding gfid-link file also
-needs to be removed.The gfid-link files are present in the .glusterfs folder
-in the top-level directory of the brick. If the gfid of the file is
-0x307a5c9efddd4e7c96e94fd4bcdcbd1b (the trusted.gfid extended attribute got
-from the getfattr command earlier),the gfid-link file can be found at
-> /gfs/brick-a/.glusterfs/30/7a/307a5c9efddd4e7c96e94fd4bcdcbd1b
-
-####Word of caution:
-Before deleting the gfid-link, we have to ensure that there are no hard links
-to the file present on that brick. If hard-links exist,they must be deleted as
-well.
diff --git a/doc/debugging/statedump.md b/doc/debugging/statedump.md
deleted file mode 100644
index f34a5c3436a..00000000000
--- a/doc/debugging/statedump.md
+++ /dev/null
@@ -1,389 +0,0 @@
-#Statedump
-Statedump is a file generated by glusterfs process with different data structure state which may contain the active inodes, fds, mempools, iobufs, memory allocation stats of different types of datastructures per xlator etc.
-
-##How to generate statedump
-We can find the directory where statedump files are created using 'gluster --print-statedumpdir' command.
-Create that directory if not already present based on the type of installation.
-Lets call this directory `statedump-directory`.
-
-We can generate statedump using 'kill -USR1 <pid-of-gluster-process>'.
-gluster-process is nothing but glusterd/glusterfs/glusterfsd process.
-
-There are also commands to generate statedumps for brick processes/nfs server/quotad
-
-For bricks: `gluster volume statedump <volname>`
-
-For nfs server: `gluster volume statedump <volname> nfs`
-
-For quotad: `gluster volume statedump <volname> quotad`
-
-For brick-processes files will be created in `statedump-directory` with name of the file as `hyphenated-brick-path.<pid>.dump.timestamp`. For all other processes it will be `glusterdump.<pid>.dump.timestamp`.
-
-##How to read statedump
-We shall see snippets of each type of statedump.
-
-First and last lines of the file have starting and ending time of writing the statedump file. Times will be in UTC timezone.
-
-mallinfo return status is printed in the following format. Please read man mallinfo for more information about what each field means.
-###Mallinfo
-```
-[mallinfo]
-mallinfo_arena=100020224 /* Non-mmapped space allocated (bytes) */
-mallinfo_ordblks=69467 /* Number of free chunks */
-mallinfo_smblks=449 /* Number of free fastbin blocks */
-mallinfo_hblks=13 /* Number of mmapped regions */
-mallinfo_hblkhd=20144128 /* Space allocated in mmapped regions (bytes) */
-mallinfo_usmblks=0 /* Maximum total allocated space (bytes) */
-mallinfo_fsmblks=39264 /* Space in freed fastbin blocks (bytes) */
-mallinfo_uordblks=96710112 /* Total allocated space (bytes) */
-mallinfo_fordblks=3310112 /* Total free space (bytes) */
-mallinfo_keepcost=133712 /* Top-most, releasable space (bytes) */
-```
-
-###Data structure allocation stats
-For every xlator data structure memory per translator loaded in the call-graph is displayed in the following format:
-
-For xlator with name: glusterfs
-```
-[global.glusterfs - Memory usage] #[global.xlator-name - Memory usage]
-num_types=119 #It shows the number of data types it is using
-```
-
-Now for each data-type it prints the memory usage.
-
-```
-[global.glusterfs - usage-type gf_common_mt_gf_timer_t memusage]
-#[global.xlator-name - usage-type <tag associated with the data-type> memusage]
-size=112 #num_allocs times the sizeof(data-type) i.e. num_allocs * sizeof (data-type)
-num_allocs=2 #Number of allocations of the data-type which are active at the time of taking statedump.
-max_size=168 #max_num_allocs times the sizeof(data-type) i.e. max_num_allocs * sizeof (data-type)
-max_num_allocs=3 #Maximum number of active allocations at any point in the life of the process.
-total_allocs=7 #Number of times this data is allocated in the life of the process.
-```
-
-###Mempools
-
-Mempools are optimization to reduce the number of allocations of a data type. If we create a mem-pool of lets say 1024 elements for a data-type, new elements will be allocated from heap using syscalls like calloc, only if all the 1024 elements in the pool are in active use.
-
-Memory pool allocated by each xlator is displayed in the following format:
-
-```
-[mempool] #Section name
------=-----
-pool-name=fuse:fd_t #pool-name=<xlator-name>:<data-type>
-hot-count=1 #number of mempool elements that are in active use. i.e. for this pool it is the number of 'fd_t' s in active use.
-cold-count=1023 #number of mempool elements that are not in use. If a new allocation is required it will be served from here until all the elements in the pool are in use i.e. cold-count becomes 0.
-padded_sizeof=108 #Each mempool element is padded with a doubly-linked-list + ptr of mempool + is-in-use info to operate the pool of elements, this size is the element-size after padding
-pool-misses=0 #Number of times the element had to be allocated from heap because all elements from the pool are in active use.
-alloc-count=314 #Number of times this type of data is allocated through out the life of this process. This may include pool-misses as well.
-max-alloc=3 #Maximum number of elements from the pool in active use at any point in the life of the process. This does *not* include pool-misses.
-cur-stdalloc=0 #Denotes the number of allocations made from heap once cold-count reaches 0, that are yet to be released via mem_put().
-max-stdalloc=0 #Maximum number of allocations from heap that are in active use at any point in the life of the process.
-```
-
-###Iobufs
-```
-[iobuf.global]
-iobuf_pool=0x1f0d970 #The memory pool for iobufs
-iobuf_pool.default_page_size=131072 #The default size of iobuf (if no iobuf size is specified the default size is allocated)
-#iobuf_arena: One arena represents a group of iobufs of a particular size
-iobuf_pool.arena_size=12976128 # The initial size of the iobuf pool (doesn't include the stdalloc'd memory or the newly added arenas)
-iobuf_pool.arena_cnt=8 #Total number of arenas in the pool
-iobuf_pool.request_misses=0 #The number of iobufs that were stdalloc'd (as they exceeded the default max page size provided by iobuf_pool).
-```
-
-There are 3 lists of arenas
-
-1. Arena list: arenas allocated during iobuf pool creation and the arenas that are in use(active_cnt != 0) will be part of this list.
-2. Purge list: arenas that can be purged(no active iobufs, active_cnt == 0).
-3. Filled list: arenas without free iobufs.
-
-```
-[purge.1] #purge.<S.No.>
-purge.1.mem_base=0x7fc47b35f000 #The address of the arena structure
-purge.1.active_cnt=0 #The number of iobufs active in that arena
-purge.1.passive_cnt=1024 #The number of unused iobufs in the arena
-purge.1.alloc_cnt=22853 #Total allocs in this pool(number of times the iobuf was allocated from this arena)
-purge.1.max_active=7 #Max active iobufs from this arena, at any point in the life of this process.
-purge.1.page_size=128 #Size of all the iobufs in this arena.
-
-[arena.5] #arena.<S.No.>
-arena.5.mem_base=0x7fc47af1f000
-arena.5.active_cnt=0
-arena.5.passive_cnt=64
-arena.5.alloc_cnt=0
-arena.5.max_active=0
-arena.5.page_size=32768
-```
-
-If the active_cnt of any arena is non zero, then the statedump will also have the iobuf list.
-```
-[arena.6.active_iobuf.1] #arena.<S.No>.active_iobuf.<iobuf.S.No.>
-arena.6.active_iobuf.1.ref=1 #refcount of the iobuf
-arena.6.active_iobuf.1.ptr=0x7fdb921a9000 #address of the iobuf
-
-[arena.6.active_iobuf.2]
-arena.6.active_iobuf.2.ref=1
-arena.6.active_iobuf.2.ptr=0x7fdb92189000
-```
-
-At any given point in time if there are lots of filled arenas then that could be a sign of iobuf leaks.
-
-###Call stack
-All the fops received by gluster are handled using call-stacks. Call stack contains the information about uid/gid/pid etc of the process that is executing the fop. Each call-stack contains different call-frames per xlator which handles that fop.
-
-```
-[global.callpool.stack.3] #global.callpool.stack.<Serial-Number>
-stack=0x7fc47a44bbe0 #Stack address
-uid=0 #Uid of the process which is executing the fop
-gid=0 #Gid of the process which is executing the fop
-pid=6223 #Pid of the process which is executing the fop
-unique=2778 #Xlators like afr do copy_frame and perform the operation in a different stack, this id is useful to find out the stacks that are inter-related because of copy-frame
-lk-owner=0000000000000000 #Some of the fuse fops have lk-owner.
-op=LOOKUP #Fop
-type=1 #Type of the op i.e. FOP/MGMT-OP
-cnt=9 #Number of frames in this stack.
-```
-###Call-frame
-Each frame will have information about which xlator the frame belongs to, what is the function it wound to/from and will be unwind to. It also mentions if the unwind happened or not. If we observe hangs in the system and want to find out which xlator is causing it. Take a statedump and see what is the final xlator which is yet to be unwound.
-
-```
-[global.callpool.stack.3.frame.2]#global.callpool.stack.<stack-serial-number>.frame.<frame-serial-number>
-frame=0x7fc47a611dbc #Frame address
-ref_count=0 #Incremented at the time of wind and decremented at the time of unwind.
-translator=r2-client-1 #Xlator this frame belongs to
-complete=0 #if this value is 1 that means this frame is already unwound. 0 if it is yet to unwind.
-parent=r2-replicate-0 #Parent xlator of this frame
-wind_from=afr_lookup #Parent xlator function from which the wind happened
-wind_to=priv->children[i]->fops->lookup
-unwind_to=afr_lookup_cbk #Parent xlator function to which unwind happened
-```
-
-###History of operations in Fuse
-
-Fuse maintains history of operations that happened in fuse.
-
-```
-[xlator.mount.fuse.history]
-TIME=2014-07-09 16:44:57.523364
-message=[0] fuse_release: RELEASE(): 4590:, fd: 0x1fef0d8, gfid: 3afb4968-5100-478d-91e9-76264e634c9f
-
-TIME=2014-07-09 16:44:57.523373
-message=[0] send_fuse_err: Sending Success for operation 18 on inode 3afb4968-5100-478d-91e9-76264e634c9f
-
-TIME=2014-07-09 16:44:57.523394
-message=[0] fuse_getattr_resume: 4591, STAT, path: (/iozone.tmp), gfid: (3afb4968-5100-478d-91e9-76264e634c9f)
-```
-
-###Xlator configuration
-```
-[cluster/replicate.r2-replicate-0] #Xlator type, name information
-child_count=2 #Number of children to the xlator
-#Xlator specific configuration below
-child_up[0]=1
-pending_key[0]=trusted.afr.r2-client-0
-child_up[1]=1
-pending_key[1]=trusted.afr.r2-client-1
-data_self_heal=on
-metadata_self_heal=1
-entry_self_heal=1
-data_change_log=1
-metadata_change_log=1
-entry-change_log=1
-read_child=1
-favorite_child=-1
-wait_count=1
-```
-
-###Graph/inode table
-```
-[active graph - 1]
-
-conn.1.bound_xl./data/brick01a/homegfs.hashsize=14057
-conn.1.bound_xl./data/brick01a/homegfs.name=/data/brick01a/homegfs/inode
-conn.1.bound_xl./data/brick01a/homegfs.lru_limit=16384 #Least recently used size limit
-conn.1.bound_xl./data/brick01a/homegfs.active_size=690 #Number of inodes undergoing some kind of fop to be precise on which there is at least one ref.
-conn.1.bound_xl./data/brick01a/homegfs.lru_size=183 #Number of inodes present in lru list
-conn.1.bound_xl./data/brick01a/homegfs.purge_size=0 #Number of inodes present in purge list
-```
-
-###Inode
-```
-[conn.1.bound_xl./data/brick01a/homegfs.active.324] #324th inode in active inode list
-gfid=e6d337cf-97eb-44b3-9492-379ba3f6ad42 #Gfid of the inode
-nlookup=13 #Number of times lookups happened from the client or from fuse kernel
-fd-count=4 #Number of fds opened on the inode
-ref=11 #Number of refs taken on the inode
-ia_type=1 #Type of the inode. This should be changed to some string :-(
-
-[conn.1.bound_xl./data/brick01a/homegfs.lru.1] #1st inode in lru list. Note that ref count is zero for these inodes.
-gfid=5114574e-69bc-412b-9e52-f13ff087c6fc
-nlookup=5
-fd-count=0
-ref=0
-ia_type=2
-```
-###Inode context
-For each inode per xlator some context could be stored. This context can also be printed in the statedump. Here is the inode ctx of locks xlator
-```
-[xlator.features.locks.homegfs-locks.inode]
-path=/homegfs/users/dfrobins/gfstest/r4/SCRATCH/fort.5102 - path of the file
-mandatory=0
-inodelk-count=5 #Number of inode locks
-lock-dump.domain.domain=homegfs-replicate-0:self-heal #Domain name where self-heals take locks to prevent more than one heal on the same file
-inodelk.inodelk[0](ACTIVE)=type=WRITE, whence=0, start=0, len=0, pid = 18446744073709551615, owner=080b1ada117f0000, client=0xb7fc30, connection-id=compute-30-029.com-3505-2014/06/29-14:46:12:477358-homegfs-client-0-0-1, granted at Sun Jun 29 11:01:00 2014 #Active lock information
-
-inodelk.inodelk[1](BLOCKED)=type=WRITE, whence=0, start=0, len=0, pid = 18446744073709551615, owner=c0cb091a277f0000, client=0xad4f10, connection-id=gfs01a.com-4080-2014/06/29-14:41:36:917768-homegfs-client-0-0-0, blocked at Sun Jun 29 11:04:44 2014 #Blocked lock information
-
-lock-dump.domain.domain=homegfs-replicate-0:metadata #Domain name where metadata operations take locks to maintain replication consistency
-lock-dump.domain.domain=homegfs-replicate-0 #Domain name where entry/data operations take locks to maintain replication consistency
-inodelk.inodelk[0](ACTIVE)=type=WRITE, whence=0, start=11141120, len=131072, pid = 18446744073709551615, owner=080b1ada117f0000, client=0xb7fc30, connection-id=compute-30-029.com-3505-2014/06/29-14:46:12:477358-homegfs-client-0-0-1, granted at Sun Jun 29 11:10:36 2014 #Active lock information
-```
-
-##FAQ
-###How to debug Memory leaks using statedump?
-
-####Using memory accounting feature:
-
-`https://bugzilla.redhat.com/show_bug.cgi?id=1120151` is one of the bugs which was debugged using statedump to see which data-structure is leaking. Here is the process used to find what the leak is using statedump. According to the bug the observation is that the process memory usage is increasing whenever one of the bricks is wiped in a replicate volume and a `full` self-heal is invoked to heal the contents. Statedump of the process is taken using kill -USR1 `<pid-of-gluster-self-heal-daemon>`.
-```
-grep -w num_allocs glusterdump.5225.dump.1405493251
-num_allocs=77078
-num_allocs=87070
-num_allocs=117376
-....
-
-grep hot-count glusterdump.5225.dump.1405493251
-hot-count=16384
-hot-count=16384
-hot-count=4095
-....
-
-Find the occurrences in the statedump file to figure out the tags.
-```
-grep of the statedump revealed too many allocations for the following data-types under replicate,
-
-1. gf_common_mt_asprintf
-2. gf_common_mt_char
-3. gf_common_mt_mem_pool.
-
-After checking afr-code for allocations with tag `gf_common_mt_char` found `data-self-heal` code path does not free one such allocated memory. `gf_common_mt_mem_pool` suggests that there is a leak in pool memory. `replicate-0:dict_t`, `glusterfs:data_t` and `glusterfs:data_pair_t` pools are using lot of memory, i.e. cold_count is `0` and too many allocations. Checking source code of dict.c revealed that `key` in `dict` is allocated with `gf_common_mt_char` i.e. `2.` tag and value is created using gf_asprintf which in-turn uses `gf_common_mt_asprintf` i.e. `1.`. Browsing the code for leak in self-heal code paths lead to a line which over-writes a variable with new dictionary even when it was already holding a reference to another dictionary. After fixing these leaks, ran the same test to verify that none of the `num_allocs` are increasing even after healing 10,000 files directory hierarchy in statedump of self-heal daemon.
-Please check http://review.gluster.org/8316 for more info about patch/code.
-
-####Debugging leaks in memory pools:
-Statedump output of memory pools was used to test and verify the fixes to https://bugzilla.redhat.com/show_bug.cgi?id=1134221. On code analysis, dict_t objects were found to be leaking (in terms of not being unref'd enough number of times, during name self-heal. The test involved creating 100 files on plain replicate volume, removing them from one of the bricks's backend, and then triggering lookup on them from the mount point. Statedump of the mount process was taken before executing the test case and after it, after compiling glusterfs with -DDEBUG flags (to have cold count set to 0 by default).
-
-Statedump output of the fuse mount process before the test case was executed:
-
-```
-
-pool-name=glusterfs:dict_t
-hot-count=0
-cold-count=0
-padded_sizeof=140
-alloc-count=33
-max-alloc=0
-pool-misses=33
-cur-stdalloc=14
-max-stdalloc=18
-
-```
-Statedump output of the fuse mount process after the test case was executed:
-
-```
-
-pool-name=glusterfs:dict_t
-hot-count=0
-cold-count=0
-padded_sizeof=140
-alloc-count=2841
-max-alloc=0
-pool-misses=2841
-cur-stdalloc=214
-max-stdalloc=220
-
-```
-Here, with cold count being 0 by default, cur-stdalloc indicated the number of dict_t objects that were allocated in heap using mem_get(), and yet to be freed using mem_put() (refer to https://github.com/gluster/glusterfs/blob/master/doc/data-structures/mem-pool.md for more details on how mempool works). After the test case (name selfheal of 100 files), there was a rise in the cur-stdalloc value (from 14 to 214) for dict_t.
-
-After these leaks were fixed, glusterfs was again compiled with -DDEBUG flags, and the same steps were performed again and statedump was taken before and after executing the test case, of the mount. This was done to ascertain the validity of the fix. And the following are the results:
-
-Statedump output of the fuse mount process before executing the test case:
-
-```
-pool-name=glusterfs:dict_t
-hot-count=0
-cold-count=0
-padded_sizeof=140
-alloc-count=33
-max-alloc=0
-pool-misses=33
-cur-stdalloc=14
-max-stdalloc=18
-
-```
-Statedump output of the fuse mount process after executing the test case:
-
-```
-pool-name=glusterfs:dict_t
-hot-count=0
-cold-count=0
-padded_sizeof=140
-alloc-count=2837
-max-alloc=0
-pool-misses=2837
-cur-stdalloc=14
-max-stdalloc=119
-
-```
-The value of cur-stdalloc remained 14 before and after the test, indicating that the fix indeed does what it's supposed to do.
-
-###How to debug hangs because of frame-loss?
-`https://bugzilla.redhat.com/show_bug.cgi?id=994959` is one of the bugs where statedump was helpful in finding where the frame was lost. Here is the process used to find where the hang is using statedump.
-When the hang was observed, statedumps are taken for all the processes. On mount's statedump the following stack is shown:
-```
-[global.callpool.stack.1.frame.1]
-ref_count=1
-translator=fuse
-complete=0
-
-[global.callpool.stack.1.frame.2]
-ref_count=0
-translator=r2-client-1
-complete=1 <<----- Client xlator completed the readdirp call and unwound to afr
-parent=r2-replicate-0
-wind_from=afr_do_readdir
-wind_to=children[call_child]->fops->readdirp
-unwind_from=client3_3_readdirp_cbk
-unwind_to=afr_readdirp_cbk
-
-[global.callpool.stack.1.frame.3]
-ref_count=0
-translator=r2-replicate-0
-complete=0 <<---- Afr xlator is not unwinding for some reason.
-parent=r2-dht
-wind_from=dht_do_readdir
-wind_to=xvol->fops->readdirp
-unwind_to=dht_readdirp_cbk
-
-[global.callpool.stack.1.frame.4]
-ref_count=1
-translator=r2-dht
-complete=0
-parent=r2-io-cache
-wind_from=ioc_readdirp
-wind_to=FIRST_CHILD(this)->fops->readdirp
-unwind_to=ioc_readdirp_cbk
-
-[global.callpool.stack.1.frame.5]
-ref_count=1
-translator=r2-io-cache
-complete=0
-parent=r2-quick-read
-wind_from=qr_readdirp
-wind_to=FIRST_CHILD (this)->fops->readdirp
-unwind_to=qr_readdirp_cbk
-
-```
-`unwind_to` shows that call was unwound to `afr_readdirp_cbk` from client xlator.
-Inspecting that function revealed that afr is not unwinding the stack when fop failed.
-Check http://review.gluster.org/5531 for more info about patch/code changes.
diff --git a/doc/features/afr-arbiter-volumes.md b/doc/features/afr-arbiter-volumes.md
deleted file mode 100644
index 1348e5645b8..00000000000
--- a/doc/features/afr-arbiter-volumes.md
+++ /dev/null
@@ -1,53 +0,0 @@
-Usage guide: Replicate volumes with arbiter configuration
-==========================================================
-Arbiter volumes are replica 3 volumes where the 3rd brick of the replica is
-automatically configured as an arbiter node. What this means is that the 3rd
-brick will store only the file name and metadata, but does not contain any data.
-This configuration is helpful in avoiding split-brains while providing the same
-level of consistency as a normal replica 3 volume.
-
-The arbiter volume can be created with the following command:
-`gluster volume create <VOLNAME> replica 3 arbiter 1 host1:brick1 host2:brick2 host3:brick3`
-
-Note that the syntax is similar to creating a normal replica 3 volume with the
-exception of the `arbiter 1` keyword. As seen in the command above, the only
-permissible values for the replica count and arbiter count are 3 and 1
-respectively. Also, the 3rd brick is always chosen as the arbiter brick and it
-is not configurable to have any other brick as the arbiter.
-
-Client/ Mount behaviour:
-========================
-By default, client quorum (`cluster.quorum-type`) is set to `auto` for a replica
-3 volume when it is created; i.e. at least 2 bricks need to be up to satisfy
-quorum and to allow writes. This setting is not to be changed for arbiter
-volumes also. Additionally, the arbiter volume has additional some checks to
-prevent files from ending up in split-brain:
-
-* Clients take full file locks when writing to a file as opposed to range locks
- in a normal replica 3 volume.
-
-* If 2 bricks are up and if one of them is the arbiter (i.e. the 3rd brick) *and*
- it blames the other up brick, then all FOPS will fail with ENOTCONN (Transport
- endpoint is not connected). IF the arbiter doesn't blame the other brick,
- FOPS will be allowed to proceed. 'Blaming' here is w.r.t the values of AFR
- changelog extended attributes.
-
-* If 2 bricks are up and the arbiter is down, then FOPS will be allowed.
-
-* In all cases, if there is only one source before the FOP is initiated and if
- the FOP fails on that source, the application will receive ENOTCONN.
-
-Note: It is possible to see if a replica 3 volume has arbiter configuration from
-the mount point. If
-`$mount_point/.meta/graphs/active/$V0-replicate-0/options/arbiter-count` exists
-and its value is 1, then it is an arbiter volume. Also the client volume graph
-will have arbiter-count as a xlator option for AFR translators.
-
-Self-heal daemon behaviour:
-===========================
-Since the arbiter brick does not store any data for the files, data-self-heal
-from the arbiter brick will not take place. For example if there are 2 source
-bricks B2 and B3 (B3 being arbiter brick) and B2 is down, then data-self-heal
-will *not* happen from B3 to sink brick B1, and will be pending until B2 comes
-up and heal can happen from it. Note that metadata and entry self-heals can
-still happen from B3 if it is one of the sources.
diff --git a/doc/features/afr-statistics.md b/doc/features/afr-statistics.md
deleted file mode 100644
index d0705845aa4..00000000000
--- a/doc/features/afr-statistics.md
+++ /dev/null
@@ -1,142 +0,0 @@
-##gluster volume heal <volume-name> statistics
-
-##Description
-In case of index self-heal, self-heal daemon reads the entries from the
-local bricks, from /brick-path/.glusterfs/indices/xattrop/ directory.
-So based on the entries read by self heal daemon, it will attempt self-heal.
-Executing this command will list the crawl statistics of self heal done for
-each brick.
-
-For each brick, it will list:
-1. Starting time of crawl done for that brick.
-2. Ending time of crawl done for that brick.
-3. No of entries for which self-heal is successfully attempted.
-4. No of split-brain entries found while self-healing.
-5. No of entries for which heal failed.
-
-
-
-Example:
-a) Create a gluster volume with replica count 2.
-b) Create 10 files.
-c) kill brick_1 of this replica.
-d) Overwrite all 10 files.
-e) Kill the other brick (brick_2), and bring back (brick_1) up.
-f) Overwrite all 10 files.
-
-Now we have 10 files, which are in split brain. Self heal daemon will crawl for
-both the bricks, and will count 10 files from each brick.
-It will report 10 files under split-brain with respect to given brick.
-
-Gathering crawl statistics on volume volume1 has been successful
-------------------------------------------------
-
-Crawl statistics for brick no 0
-Hostname of brick 192.168.122.1
-
-Starting time of crawl: Tue May 20 19:13:11 2014
-
-Ending time of crawl: Tue May 20 19:13:12 2014
-
-Type of crawl: INDEX
-No. of entries healed: 0
-No. of entries in split-brain: 10
-No. of heal failed entries: 0
-------------------------------------------------
-
-Crawl statistics for brick no 1
-Hostname of brick 192.168.122.1
-
-Starting time of crawl: Tue May 20 19:13:12 2014
-
-Ending time of crawl: Tue May 20 19:13:12 2014
-
-Type of crawl: INDEX
-No. of entries healed: 0
-No. of entries in split-brain: 10
-No. of heal failed entries: 0
-
-------------------------------------------------
-
-
-As the output shows, self-heal daemon detects 10 files in split-brain with
-resept to given brick.
-
-
-
-
-##gluster volume heal <volume-name> statistics heal-count
-It lists the number of entries present in
-/<brick>/.glusterfs/indices/xattrop from each-brick.
-
-
-1. Create a replicate volume.
-2. Kill one brick of a replicate volume1.
-3. Create 10 files.
-4. Execute above command.
-
---------------------------------------------------------------------------------
-Gathering count of entries to be healed on volume volume1 has been successful
-
-Brick 192.168.122.1:/brick_1
-Number of entries: 10
-
-Brick 192.168.122.1:/brick_2
-No gathered input for this brick
--------------------------------------------------------------------------------
-
-
-
-
-
-
-##gluster volume heal <volume-name> statistics heal-count replica \
- ip_addr:/brick_location
-
-To list the number of entries to be healed from a particular replicate
-subvolume, listing any one child of that replicate subvolume in the command,
-will list the entries for all the childrens of that replicate subvolume.
-
-Example: dht
- / \
- / \
- replica-1 replica-2
- / \ / \
- child-1 child-2 child-3 child-4
- /brick1 /brick2 /brick3 /brick4
-
-gluster volume heal <vol-name> statistics heal-count ip:/brick1
-will list count only for child-1 and child-2.
-
-gluster volume heal <vol-name> statistics heal-count ip:/brick3
-will list count only for child-3 and child-4.
-
-
-
-1. Create a volume same as mentioned in the above graph.
-2. Kill Brick-2.
-3. Create some files.
-4. If we are interested in knowing the number of files to be healed from each
- brick of replica-1 only, mention any one child of that replica.
-
-gluster volume heal volume1 statistics heal-count replica 192.168.122.1:/brick2
-
-output:
--------
-Gathering count of entries to be healed per replica on volume volume1 has \
-been successful
-
-Brick 192.168.122.1:/brick_1
-Number of entries: 10 <--10 files
-
-Brick 192.168.122.1:/brick_2
-No gathered input for this brick <-Brick is down
-
-Brick 192.168.122.1:/brick_3
-No gathered input for this brick <--No result, as we are not
- interested.
-
-Brick 192.168.122.1:/brick_4 <--No result, as we are not
-No gathered input for this brick interested.
-
-
diff --git a/doc/features/afr-v1.md b/doc/features/afr-v1.md
deleted file mode 100644
index 0ab41a1ab4c..00000000000
--- a/doc/features/afr-v1.md
+++ /dev/null
@@ -1,340 +0,0 @@
-#Automatic File Replication
-Afr xlator in glusterfs is responsible for replicating the data across the bricks.
-
-###Responsibilities of AFR
-Its responsibilities include the following:
-
-1. Maintain replication consistency (i.e. Data on both the bricks should be same, even in the cases where there are operations happening on same file/directory in parallel from multiple applications/mount points as long as all the bricks in replica set are up)
-
-2. Provide a way of recovering data in case of failures as long as there is
- at least one brick which has the correct data.
-
-3. Serve fresh data for read/stat/readdir etc
-
-###Transaction framework
-For 1, 2 above afr uses transaction framework which consists of the following 5
-phases which happen on all the bricks in replica set(Bricks which are in replication):
-
-####1.Lock Phase
-####2. Pre-op Phase
-####3. Op Phase
-####4. Post-op Phase
-####5. Unlock Phase
-
-*Op Phase* is the actual operation sent by application (`write/create/unlink` etc). For every operation which afr receives that modifies data it sends that same operation in parallel to all the bricks in its replica set. This is how it achieves replication.
-
-*Lock, Unlock Phases* take necessary locks so that *Op phase* can provide **replication consistency** in normal work flow.
-
-#####For example:
-If an application performs `touch a` and the other one does `mkdir a`, afr makes sure that either file with name `a` or directory with name `a` is created on both the bricks.
-
-*Pre-op, Post-op Phases* provide changelogging which enables afr to figure out which copy is fresh.
-Once afr knows how to figure out fresh copy in the replica set it can **recover data** from fresh copy to stale copy. Also it can **serve fresh** data for `read/stat/readdir` etc.
-
-##Internal Operations
-Brief introduction to internal operations in Glusterfs which make *Locking, Unlocking, Pre/Post ops* possible:
-
-###Internal Locking Operations
-Glusterfs has **locks** translator which provides the following internal locking operations called `inodelk`, `entrylk` which are used by afr to achieve synchronization of operations on files or directories that conflict with each other.
-
-`Inodelk` gives the facility for translators in Glusterfs to obtain range (denoted by tuple with **offset**, **length**) locks in a given domain for an inode.
-Full file lock is denoted by the tuple (offset: `0`, length: `0`) i.e. length `0` is considered as infinity.
-
-`Entrylk` enables translators of Glusterfs to obtain locks on `name` in a given domain for an inode, typically a directory.
-
-**Locks** translator provides both *blocking* and *nonblocking* variants and of these operations.
-
-###Xattrop
-For pre/post ops posix translator provides an operation called xattrop.
-xattrop is a way of *incrementing*/*decrementing* a number present in the extended attribute of the inode *atomically*.
-
-##Transaction Types
-There are 3 types of transactions in AFR.
-1. Data transactions
- - Operations that add/modify/truncate the file contents.
- - `Write`/`Truncate`/`Ftruncate` etc
-
-2. Metadata transactions
- - Operations that modify the data kept in inode.
- - `Chmod`/`Chown` etc
-
-3) Entry transactions
- - Operations that add/remove/rename entries in a directory
- - `Touch`/`Mkdir`/`Mknod` etc
-
-###Data transactions:
-
-*write* (`offset`, `size`) - writes data from `offset` of `size`
-
-*ftruncate*/*truncate* (`offset`) - truncates data from `offset` till the end of file.
-
-Afr internal locking needs to make sure that two conflicting data operations happen in order, one after the other so that it does not result in replication inconsistency. Afr data operations take inodelks in same domain (lets call it `data` domain).
-
-*Write* operation with offset `O` and size `S` takes an inode lock in data domain with range `(O, S)`.
-
-*Ftruncate*/*Truncate* operations with offset `O` take inode locks in `data` domain with range `(O, 0)`. Please note that size `0` means size infinity.
-
-These ranges make sure that overlapping write/truncate/ftruncate operations are done one after the other.
-
-Now that we know the ranges the operations take locks on, we will see how locking happens in afr.
-
-####Lock:
-Afr initially attempts **non-blocking** locks on **all** the bricks of the replica set in **parallel**. If all the locks are successful then it goes on to perform pre-op. But in case **non-blocking** locks **fail** because there is *at least one conflicting operation* which already has a **granted lock** then it **unlocks** the **non-blocking** locks it already acquired in this previous step and proceeds to perform **blocking** locks **one after the other** on each of the subvolumes in the order of subvolumes specified in the volfile.
-
-Chances of **conflicting operations** is **very low** and time elapsed in **non-blocking** locks phase is `Max(latencies of the bricks for responding to inodelk)`, where as time elapsed in **blocking locks** phase is `Sum(latencies of the bricks for responding to inodelk)`. That is why afr always tries for non-blocking locks first and only then it moves to blocking locks.
-
-####Pre-op:
-Each file/dir in a brick maintains the changelog(roughly pending operation count) of itself and that of the files
-present in all the other bricks in it's replica set as seen by that brick.
-
-Lets consider an example replica volume with 2 bricks brick-a and brick-b.
-all files in brick-a will have 2 entries
-one for itself and the other for the file present in it's replica set, i.e.brick-b:
-One can inspect changelogs using getfattr command.
-
- # getfattr -d -e hex -m. brick-a/file.txt
- trusted.afr.vol-client-0=0x000000000000000000000000 -->changelog for itself (brick-a)
- trusted.afr.vol-client-1=0x000000000000000000000000 -->changelog for brick-b as seen by brick-a
-
-Likewise, all files in brick-b will have:
-
- # getfattr -d -e hex -m. brick-b/file.txt
- trusted.afr.vol-client-0=0x000000000000000000000000 -->changelog for brick-a as seen by brick-b
- trusted.afr.vol-client-1=0x000000000000000000000000 -->changelog for itself (brick-b)
-
-#####Interpreting Changelog Value:
-Each extended attribute has a value which is `24` hexa decimal digits. i.e. `12` bytes
-First `8` digits (`4` bytes) represent changelog of `data`. Second `8` digits represent changelog
-of `metadata`. Last 8 digits represent Changelog of `directory entries`.
-
-Pictorially representing the same, we have:
-
- 0x 00000000 00000000 00000000
- | | |
- | | \_ changelog of directory entries
- | \_ changelog of metadata
- \ _ changelog of data
-
-Before write operation is performed on the brick, afr marks the file saying there is a pending operation.
-
-As part of this pre-op afr sends xattrop operation with increment 1 for data operation to make the extended attributes the following:
- # getfattr -d -e hex -m. brick-a/file.txt
- trusted.afr.vol-client-0=0x000000010000000000000000 -->changelog for itself (brick-a)
- trusted.afr.vol-client-1=0x000000010000000000000000 -->changelog for brick-b as seen by brick-a
-
-Likewise, all files in brick-b will have:
-
- # getfattr -d -e hex -m. brick-b/file.txt
- trusted.afr.vol-client-0=0x000000010000000000000000 -->changelog for brick-a as seen by brick-b
- trusted.afr.vol-client-1=0x000000010000000000000000 -->changelog for itself (brick-b)
-
-As the operation is in progress on files on both the bricks all the extended attributes show the same value.
-
-####Op:
-Now it sends the actual write operation to both the bricks. Afr remembers whether the operation is successful or not on all the subvolumes.
-
-####Post-Op:
-If the operation succeeds on all the bricks then there is no pending operations on any of the bricks so as part of POST-OP afr sends xattrop operation with increment -1 i.e. decrement by 1 for data operation to make the extended attributes back to all zeros again.
-
-In case there is a failure on brick-b then there is still a pending operation on brick-b where as no pending operations are there on brick-a. So xattrop operation for both of these extended attributes differs now. For extended attribute corresponding to brick-a i.e. trusted.afr.vol-client-0 decrement by 1 is sent where as for extended attribute corresponding to brick-b increment by '0' is sent to retain the pending operation count.
-
- # getfattr -d -e hex -m. brick-a/file.txt
- trusted.afr.vol-client-0=0x000000000000000000000000 -->changelog for itself (brick-a)
- trusted.afr.vol-client-1=0x000000010000000000000000 -->changelog for brick-b as seen by brick-a
-
- # getfattr -d -e hex -m. brick-b/file.txt
- trusted.afr.vol-client-0=0x000000000000000000000000 -->changelog for brick-a as seen by brick-b
- trusted.afr.vol-client-1=0x000000010000000000000000 -->changelog for itself (brick-b)
-
-####Unlock:
-Once the transaction is completed unlock is sent on all the bricks where lock is acquired.
-
-
-###Meta Data transactions:
-
-setattr, setxattr, removexattr
-All metadata operations take same inode lock with same range in metadata domain.
-
-####Lock:
-Metadata locking also starts initially with non-blocking locks then move on to blocking locks on any failures because of conflicting operations.
-
-####Pre-op:
-Before metadata operation is performed on the brick, afr marks the file saying there is a pending operation.
-As part of this pre-op afr sends xattrop operation with increment 1 for metadata operation to make the extended attributes the following:
- # getfattr -d -e hex -m. brick-a/file.txt
- trusted.afr.vol-client-0=0x000000000000000100000000 -->changelog for itself (brick-a)
- trusted.afr.vol-client-1=0x000000000000000100000000 -->changelog for brick-b as seen by brick-a
-
-Likewise, all files in brick-b will have:
- # getfattr -d -e hex -m. brick-b/file.txt
- trusted.afr.vol-client-0=0x000000000000000100000000 -->changelog for brick-a as seen by brick-b
- trusted.afr.vol-client-1=0x000000000000000100000000 -->changelog for itself (brick-b)
-
-As the operation is in progress on files on both the bricks all the extended attributes show the same value.
-
-####Op:
-Now it sends the actual metadata operation to both the bricks. Afr remembers whether the operation is successful or not on all the subvolumes.
-
-Post-Op:
-If the operation succeeds on all the bricks then there is no pending operations on any of the bricks so as part of POST-OP afr sends xattrop operation with increment -1 i.e. decrement by 1 for metadata operation to make the extended attributes back to all zeros again.
-
-In case there is a failure on brick-b then there is still a pending operation on brick-b where as no pending operations are there on brick-a. So xattrop operation for both of these extended attributes differs now. For extended attribute corresponding to brick-a i.e. trusted.afr.vol-client-0 decrement by 1 is sent where as for extended attribute corresponding to brick-b increment by '0' is sent to retain the pending operation count.
-
- # getfattr -d -e hex -m. brick-a/file.txt
- trusted.afr.vol-client-0=0x000000000000000000000000 -->changelog for itself (brick-a)
- trusted.afr.vol-client-1=0x000000000000000100000000 -->changelog for brick-b as seen by brick-a
-
- # getfattr -d -e hex -m. brick-b/file.txt
- trusted.afr.vol-client-0=0x000000000000000000000000 -->changelog for brick-a as seen by brick-b
- trusted.afr.vol-client-1=0x000000000000000100000000 -->changelog for itself (brick-b)
-
-####Unlock:
-Once the transaction is completed unlock is sent on all the bricks where lock is acquired.
-
-
-###Entry transactions:
-
-create, mknod, mkdir, link, symlink, rename, unlink, rmdir
-Pre-op/Post-op (done using xattrop) always happens on the parent directory.
-
-Entry Locks taken by these entry operations:
-
-**Create** (file `dir/a`): Lock on name `a` in inode of `dir`
-
-**mknod** (file `dir/a`): Lock on name `a` in inode of `dir`
-
-**mkdir** (dir `dir/a`): Lock on name `a` in inode of `dir`
-
-**link** (file `oldfile`, file `dir/newfile`): Lock on name `newfile` in inode of `dir`
-
-**Symlink** (file `oldfile`, file `dir`/`symlinkfile`): Lock on name `symlinkfile` in inode of `dir`
-
-**rename** of (file `dir1`/`file1`, file `dir2`/`file2`): Lock on name `file1` in inode of `dir1`, Lock on name `file2` in inode of `dir2`
-
-**rename** of (dir `dir1`/`dir2`, dir `dir3`/`dir4`): Lock on name `dir2` in inode of `dir1`, Lock on name `dir4` in inode of `dir3`, Lock on `NULL` in inode of `dir4`
-
-**unlink** (file `dir`/`a`): Lock on name `a` in inode of `dir`
-
-**rmdir** (dir dir/a): Lock on name `a` in inode of `dir`, Lock on `NULL` in inode of `a`
-
-####Lock:
-Even entry locking starts initially with non-blocking locks then move on to blocking locks on any failures because of conflicting operations.
-
-####Pre-op:
-Before entry operation is performed on the brick, afr marks the directory saying there is a pending operation.
-
-As part of this pre-op afr sends xattrop operation with increment 1 for entry operation to make the extended attributes the following:
-
- # getfattr -d -e hex -m. brick-a/
- trusted.afr.vol-client-0=0x000000000000000000000001 -->changelog for itself (brick-a)
- trusted.afr.vol-client-1=0x000000000000000000000001 -->changelog for brick-b as seen by brick-a
-
-Likewise, all files in brick-b will have:
- # getfattr -d -e hex -m. brick-b/
- trusted.afr.vol-client-0=0x000000000000000000000001 -->changelog for brick-a as seen by brick-b
- trusted.afr.vol-client-1=0x000000000000000000000001 -->changelog for itself (brick-b)
-
-As the operation is in progress on files on both the bricks all the extended attributes show the same value.
-
-####Op:
-Now it sends the actual entry operation to both the bricks. Afr remembers whether the operation is successful or not on all the subvolumes.
-
-####Post-Op:
-If the operation succeeds on all the bricks then there is no pending operations on any of the bricks so as part of POST-OP afr sends xattrop operation with increment -1 i.e. decrement by 1 for entry operation to make the extended attributes back to all zeros again.
-
-In case there is a failure on brick-b then there is still a pending operation on brick-b where as no pending operations are there on brick-a. So xattrop operation for both of these extended attributes differs now. For extended attribute corresponding to brick-a i.e. trusted.afr.vol-client-0 decrement by 1 is sent where as for extended attribute corresponding to brick-b increment by '0' is sent to retain the pending operation count.
-
- # getfattr -d -e hex -m. brick-a/file.txt
- trusted.afr.vol-client-0=0x000000000000000000000000 -->changelog for itself (brick-a)
- trusted.afr.vol-client-1=0x000000000000000000000001 -->changelog for brick-b as seen by brick-a
-
- # getfattr -d -e hex -m. brick-b/file.txt
- trusted.afr.vol-client-0=0x000000000000000000000000 -->changelog for brick-a as seen by brick-b
- trusted.afr.vol-client-1=0x000000000000000000000001 -->changelog for itself (brick-b)
-
-####Unlock:
-Once the transaction is completed unlock is sent on all the bricks where lock is acquired.
-
-The parts above cover how replication consistency is achieved in afr.
-
-Now let us look at how afr can figure out how to recover from failures given the changelog extended attributes
-
-###Recovering from failures (Self-heal)
-For recovering from failures afr tries to determine which copy is the fresh copy based on the extended attributes.
-
-There are 3 possibilities:
-1. All the extended attributes are zero on all the bricks. This means there are no pending operations on any of the bricks so there is nothing to recover.
-2. According to the extended attributes there is a brick(brick-a) which noticed that there are operations pending on the other brick(brick-b).
- - There are 4 possibilities for brick-b
-
- - It did not even participate in transaction (all extended attributes on brick-b are zeros). Choose brick-a as source and perform recovery to brick-b.
-
- - It participated in the transaction but died even before post-op. (All extended attributes on brick-b have a pending-count). Choose brick-a as source and perform recovery to brick-b.
-
- - It participated in the transaction and after the post-op extended attributes on brick-b show that there are pending operations on itself. Choose brick-a as source and perform recovery to brick-b.
-
- - It participated in the transaction and after the post-op extended attributes on brick-b show that there are pending operations on brick-a. This situation is called Split-brain and there is no way to recover. This situation can happen in cases of network partition.
-
-3. The only possibility now is where both brick-a, brick-b have pending operations. In this case changelogs extended attributes are all non-zeros on all the bricks. Basically what could have happened is the operations started on the file but either the whole replica set went down or the mount process itself dies before post-op is performed. In this case there is a possibility that data on the bricks is different. In this case afr chooses file with bigger size as source, if both files have same size then it choses the subvolume which has witnessed large number of pending operations on the other brick as source. If both have same number of pending operations then it chooses the file with newest ctime as source. If this is also same then it just picks one of the two bricks as source and syncs data on to the other to make sure that the files are replicas to each other.
-
-###Self-healing:
-Afr does 3 types of self-heals for data recovery.
-
-1. Data self-heal
-
-2. Metadata self-heal
-
-3. Entry self-heal
-
-As we have seen earlier, afr depends on changelog extended attributes to figure out which copy is source and which copy is sink. General algorithm for performing this recovery (self-heal) is same for all of these different self-heals.
-
-1. Take appropriate full locks on the file/directory to make sure no other transaction is in progress while inspecting changelog extended attributes.
-In this step, for
- - Data self-heal afr takes inode lock with `offset: 0` and `size: 0`(infinity) in data domain.
- - Entry self-heal takes entry lock on directory with `NULL` name i.e. full directory lock.
- - Metadata self-heal it takes pre-defined range in metadata domain on which all the metadata operations on that inode take locks on. To prevent duplicate data self-heal an inode lock is taken in self-heal domain as well.
-
-2. Perform Sync from fresh copy to stale copy.
-In this step,
- - Metadata self-heal gets the inode attributes, extended attributes from source copy and sets them on the stale copy.
-
- - Entry self-heal reads entries on stale directories and see if they are present on source directory, if they are not present it deletes them. Then it reads entries on fresh directory and creates the missing entries on stale directories.
-
- - Data self-heal does things a bit differently to make sure no other writes on the file are blocked for the duration of self-heal because files sizes could be as big as 100G(VM files) and we don't want to block all the transactions until the self-heal is over. Locks translator allows two overlapping locks to be granted if they are from same lock owner. Using this what data self-heal does is it takes a small 128k size range lock and unlock previous acquired lock, heals just that 128k chunk and takes next 128k chunk lock and unlock previous lock and moves to the next one. It always makes sure that at least one lock is present on the file by selfheal throughout the duration of self-heal so that two self-heals don't happen in parallel.
-
- - Data self-heal has two algorithms, where the file can be copied only when there is data mismatch for that chunk called as 'diff' self-heal. The otherone is blind copy of each chunk called 'full' self-heal
-
-3. Change extended attributes to mark new sources after the sync.
-
-4. Unlock the locks acquired to perform self-heal.
-
-### Transaction Optimizations:
-As we saw earlier afr transaction for all the operations that modify data happens in 5 phases, i.e. it sends 5 operations on the network for every operation. In the following sections we will see optimizations already implemented in afr which reduce the number of operations on the network to just 1 per transaction in best case.
-
-####Changelog-piggybacking
-This optimization comes into picture when on same file descriptor, before write1's post op is complete write2's pre-op starts and the operations are succeeding. When writes come in that manner we can piggyback on the pre-op of write1 for write2 and somehow tell write1 that write2 will do the post-op that was supposed to be done by write1. So write1's post-op does not happen over network, write2's pre-op does not happen over network. This optimization does not hold if there are any failures in write1's phases.
-
-####Delayed Post-op
-This optimization just delays post-op of the write transaction(write1) by a pre-configured amount time to increase the probability of next write piggybacking on the pre-op done by write1.
-
-With the combination of these two optimizations for operations like full file copy which are write intensive operations, what will essentially happen is for the first write a pre-op will happen. Then for the last write on the file post-op happens. So for all the write transactions between first write and last write afr reduced network operations from 5 to 3.
-
-####Eager-locking:
-This optimization comes into picture when only one file descriptor is open on the file and performing writes just like in the previous optimization. What this optimization does is it takes a full file lock on the file irrespective of the offset, size of the write, so that lock acquired by write1 can be piggybacked by write2 and write2 takes the responsibility of unlocking it. both write1, write2 will have same lock owner and afr takes the responsibility of serializing overlapping writes so that replication consistency is maintained.
-
-With the combination of these optimizations for operations like full file copy which are write intensive operations, what will essentially happen is for the first write a pre-op, full-file lock will happen. Then for the last write on the file post-op, unlock happens. So for all the write transactions between first write and last write afr reduced network operations from 5 to 1.
-
-###Quorum in afr:
-To avoid split-brains, afr employs the following quorum policies.
- - In replica set with odd number of bricks, replica set is said to be in quorum if more than half of the bricks are up.
- - In replica set with even number of bricks, if more than half of the bricks are up then it is said to be in quorum but if number of bricks that are up is equal to number of bricks that are down then, it is said to be in quorum if the first brick is also up in the set of bricks that are up.
-
-When quorum is not met in the replica set then modify operations on the mount are not allowed by afr.
-
-###Self-heal daemon and Index translator usage by afr:
-
-####Index xlator:
-On each brick index xlator is loaded. This xlator keeps track of what is happening in afr's pre-op and post-op. If there is an ongoing I/O or a pending self-heal, changelog xattrs would have non-zero values. Whenever xattrop/fxattrop fop (pre-op, post-ops are done using these fops) comes to index xlator a link (with gfid as name of the file on which the fop is performed) is added in <brick>/.glusterfs/indices/xattrop directory. If the value returned by the fop is zero the link is removed from the index otherwise it is kept until zero is returned in the subsequent xattrop/fxattrop fops.
-
-####Self-heal-daemon:
-self-heal-daemon process keeps running on each machine of the trusted storage pool. This process has afr xlators of all the volumes which are started. Its job is to crawl indices on bricks that are local to that machine. If any of the files represented by the gfid of the link name need healing and automatically heal them. This operation is performed every 10 minutes for each replica set. Additionally when a brick comes online also this operation is performed.
diff --git a/doc/features/bit-rot/00-INDEX b/doc/features/bit-rot/00-INDEX
deleted file mode 100644
index d351a1976ff..00000000000
--- a/doc/features/bit-rot/00-INDEX
+++ /dev/null
@@ -1,8 +0,0 @@
-00-INDEX
- - this file
-bitrot-docs.txt
- - links to design, spec and feature page
-object-versioning.txt
- - object versioning mechanism to track object signature
-memory-usage.txt
- - memory usage during object expiry tracking
diff --git a/doc/features/bit-rot/bitrot-docs.txt b/doc/features/bit-rot/bitrot-docs.txt
deleted file mode 100644
index 39cd491dbcd..00000000000
--- a/doc/features/bit-rot/bitrot-docs.txt
+++ /dev/null
@@ -1,5 +0,0 @@
-* Feature page: http://www.gluster.org/community/documentation/index.php/Features/BitRot
-
-* Design: http://goo.gl/Mjy4mD
-
-* CLI specification: http://goo.gl/2o12Fn
diff --git a/doc/features/bit-rot/memory-usage.txt b/doc/features/bit-rot/memory-usage.txt
deleted file mode 100644
index 5fe06d4a209..00000000000
--- a/doc/features/bit-rot/memory-usage.txt
+++ /dev/null
@@ -1,48 +0,0 @@
-object expiry tracking memroy usage
-====================================
-
-Bitrot daemon tracks objects for expiry in a data structure known
-as "timer-wheel" (after which the object is signed). It's a well
-known data structure for tracking million of objects of expiry.
-Let's see the memory usage involved when tracking 1 million
-objects (per brick).
-
-Bitrot daemon uses "br_object" structure to hold information
-needed for signing. An instance of this structure is allocated
-for each object that needs to be signed.
-
-struct br_object {
- xlator_t *this;
-
- br_child_t *child;
-
- void *data;
- uuid_t gfid;
- unsigned long signedversion;
-
- struct list_head list;
-};
-
-Timer-wheel requires an instance of the structure below per
-object that needs to be tracked for expiry.
-
-struct gf_tw_timer_list {
- void *data;
- unsigned long expires;
-
- /** callback routine */
- void (*function)(struct gf_tw_timer_list *, void *, unsigned long);
-
- struct list_head entry;
-};
-
-Structure sizes:
- sizeof (struct br_object): 64 bytes
- sizeof (struct gf_tw_timer_list): 40 bytes
-
-Together, these structures take up 104 bytes. To track all 1 million objects
-at the same time, the amount of memory taken up would be:
-
- 1,000,000 * 104 bytes: ~100MB
-
-Not so bad, I think.
diff --git a/doc/features/bit-rot/object-versioning.txt b/doc/features/bit-rot/object-versioning.txt
deleted file mode 100644
index def901f0fc5..00000000000
--- a/doc/features/bit-rot/object-versioning.txt
+++ /dev/null
@@ -1,236 +0,0 @@
-Object versioning
-=================
-
- Bitrot detection in GlusterFS relies on object (file) checksum (hash) verification,
- also known as "object signature". An object is signed when there are no active
- file desciptors referring to it's inode (i.e., upon last close()). This is just an
- hint for the initiation of hash calculation (and therefore signing). There is
- absolutely no control over when clients can initiate modification operations on
- the object. An object could be under modification while it's hash computation is
- under progress. It would also be in-appropriate to restrict access to such objects
- during the time duration of signing.
-
- Object versioning is used as a mechanism to identify the staleness of an objects
- signature. The document below does not just list down the version update protocol,
- but goes through various factors that led to its design.
-
-NOTE: The word "object" is used to represent a "regular file" (in linux sense) and
- object versions are persisted in extended attributes of the object's inode.
- Signature calculation includes object's data (no metadata as of now).
-
-INDEX
-=====
- i. Version updation protocol
- ii. Correctness guaraantees
- iii. Implementation
- iv. Protocol enhancements
-
-i. Version updation protocol
-============================
- There are two types of versions associated with an object:
-
- a) Ongoing version: This version is incremented on first open() [when
- the in-memory representation of the object (inode) is marked dirty
- and synchronized to disk. When an object is created, a default ongoing
- version of one (1) is assigned. An object lookup() too assigns the
- default version if not present. When a version is initialized upon
- lookup() or creat() FOP, it need to be durable on disk and therefore
- can just be a extended attrbute set with out an expensive fsync()
- syscall.
-
- b) Signing version: This is the version against which an object is deemed
- to be signed. An objects signature is tied to a particular signed version.
- Since, an object is a candidate for signing upon last release() [last
- close()], signing version is the "ongoing version" at that point of time
-
- An object's signature is trustable when the version it was signed against
- matches the ongoing version, i.e., if the hash is calculated by hand and
- compared against the object signature, it *should* be a perfect match if
- and only if the versions are equal. On the other hand, the signature is
- considered stale (might or might not match the hash just calculated).
-
- Initialization of object versions
- ---------------------------------
- An object that existed before the pre versioning days, is assigned the
- default versions upon lookup(). The protocol at this point expects "no"
- durability guarantess of the versions, i.e., extended attribute sets
- need not be followed by an explicit filesystem sync (fsync()). In case
- of a power outage or a crash, versions are re-initialized with defaults
- if found to be non-existant. The signing version is initialized with a
- deafault value of zero (0) and the ongoing version as one (1).
-
- [
- NOTE: If an object already has versions on-disk, lookup() just brings
- the versions in memory. In this case both versions may or may
- not match depending on state the object was left in.
- ]
-
-
- Increment of object versions
- ----------------------------
- During initial versioning, the in-memory representation of the object is
- marked dirty, so that subsequent modification operations on the object
- triggers a versiong synchronization to disk (extended attribute set).
- Moreover, this operation needs to be durable on disk, for the protocol
- to be crash consistent.
-
- Let's picturize the various version states after subsequent open()s.
- Not all modification operations need to increment the ongoing version,
- only the first operations needs to (subsequent operations are NO-OPs).
-
- NOTE: From here one "[s]" depicts a durable filesystem operation and
- "*" depicts the inode as dirty.
-
-
- lookup() open() open() open()
- ===========================================================
-
- OV(m): 1* 2 2 2
- -----------------------------------------
- OV(d): 1 2[s] 2 2
- SV(d): 0 0 0 0
-
-
- Let's now picturize the state when an already signed object undergoes
- file operations.
-
- on-disk state:
- OV(d): 3
- SV(d): 3|<signature>
-
-
- lookup() open() open() open()
- ===========================================================
-
- OV(m): 3* 4 4 4
- -----------------------------------------
- OV(d): 3 4[s] 4 4
- SV(d): 3 3 3 3
-
- Signing process
- ---------------
- As per the above example, when the last open file descriptor is closed,
- signing needs to be performed. The protocol restricts that the signing
- needs to be attached to a version, which in this case is the in-memory
- value of the ongoing version. A release() also marks the inode dirty,
- therefore, the next open() does a durable version synchronization to
- disk.
-
- [carry forwarding the versions from earlier example]
-
- close() release() open() open()
- ===========================================================
-
- OV(m): 4 4* 5 5
- -----------------------------------------
- OV(d): 4 4 5[s] 5
- SV(d): 3 3 3 3
-
- As shown above, a relase() call triggers a signing with signing version
- as OV(m): which in this case is 4. During signing, the object is signed
- with a signature attached to version 4 as shown below (continuing with
- the last open() call from above):
-
- open() sign(4, signature)
- ===========================================================
-
- OV(m): 5 5
- -----------------------------------------
- OV(d): 5 5
- SV(d): 3 4:<signature>[s]
-
- A signature comparison at this point of time is un-trustable due to
- version mismatches. This also protects from node crashes and hard
- reboots due to durability guarantee of on-disk version on first
- open().
-
- close() release() open()
- ===========================================================
-
- OV(m): 4 4* 5
- -------------------------------- CRASH
- OV(d): 4 4 5[s]
- SV(d): 3 3 3
-
- The protocol is immune to signing request after crashes due to
- the version synchronization performed on first open(). Signing
- request for a version lesser than the *current* ongoing version
- can be ignored. It's left upon the implementation to either
- accept or ignore such signing request(s).
-
- [
- NOTE: Inode forget() causes a fresh lookup() to be trigerred.
- Since a forget() call is received when there are no
- active references for an inode, the on-disk version is
- the latest and would be copied in-memory on lookup().
- ]
-
-ii. Correctness Guarantees
-==========================
-
- Concurrent open()'s
- -------------------
- When an inode is dirty (i.e., the very next operations would try to
- synchronize the version to disk), there can be multiple calls [say,
- open()] that would find the inode state as dirty and try to writeback
- the new version to disk. Also, note that, marking the inode as synced
- and updating the in-memory version is done *after* the new version
- is written on disk. This is done to avoid incorrect version stored
- on-disk in case the version synchronization fails (but the in-memory
- version still holding the updated value).
- Coming back to multiple open() calls on an object, each open() call
- tries to synchronize the new version to disk if the inode is marked
- as dirty. This is safe as each open() would try to synchronize the
- new version (ongoingversion + 1) even if the updation is concurrent.
- The in-memory version is finally updated to reflect the updated
- version and mark the inode non-dirty. Again this is done *only* if
- the inode is dirty, thereby open() calls which updated the on-disk
- version but lost the race to update the in-memory version result
- are NO-OPs.
-
- on-disk state:
- OV(d): 3
- SV(d): 3|<signature>
-
-
- lookup() open() open()' open()' open()
- =============================================================
-
- OV(m): 3* 3* 3* 4 NO-OP
- --------------------------------------------------
- OV(d): 3 4[s] 4[s] 4 4
- SV(d): 3 3 3 3 3
-
-
- open()/release() race
- ---------------------
- This race can cause a release() [on last close()] to pick up the
- ongoing version which was just incremented on fresh open(). This
- leads to signing of the object with the same version as the
- ongoing version, thereby, mismatching signatures when calculated.
- Another point that's worth mentioning here is that the open
- file descriptor is *attached* to it's inode *after* it's done
- version synchronization (and increment). Hence, if a release()
- sneaks in this window, the file desriptor list for the given
- inode is still empty, therefore release() considering it as a
- last close().
- To counter this, the protocol should track the open and release
- counts for file descriptors. A release() should only trigger a
- signing request when the file desccriptor for an inode is empty
- and the numbers of releases match the number of opens. When an
- open() sneaks and increments the ongoing version but the file
- descriptor is still not attached to the inode, open and release
- counts mismatch, hence identifying an open() in progress.
-
-
-iii. Implementation
-===================
- Refer to: xlators/feature/bit-rot/src/stub
-
-iv. Protocol enhancements
-=========================
-
- a) Delaying persisting on-disk versions till open()
- b) Lazy version updation (until signing?)
- c) Protocol changes required to handle anonymous file
- descriptors in GlusterFS.
diff --git a/doc/features/brick-failure-detection.md b/doc/features/brick-failure-detection.md
deleted file mode 100644
index 24f2a18f39f..00000000000
--- a/doc/features/brick-failure-detection.md
+++ /dev/null
@@ -1,67 +0,0 @@
-# Brick Failure Detection
-
-This feature attempts to identify storage/file system failures and disable the failed brick without disrupting the remainder of the node's operation.
-
-## Description
-
-Detecting failures on the filesystem that a brick uses makes it possible to handle errors that are caused from outside of the Gluster environment.
-
-There have been hanging brick processes when the underlying storage of a brick went unavailable. A hanging brick process can still use the network and repond to clients, but actual I/O to the storage is impossible and can cause noticible delays on the client side.
-
-Provide better detection of storage subsytem failures and prevent bricks from hanging. It should prevent hanging brick processes when storage-hardware or the filesystem fails.
-
-A health-checker (thread) has been added to the posix xlator. This thread periodically checks the status of the filesystem (implies checking of functional storage-hardware).
-
-`glusterd` can detect that the brick process has exited, `gluster volume status` will show that the brick process is not running anymore. System administrators checking the logs should be able to triage the cause.
-
-## Usage and Configuration
-
-The health-checker is enabled by default and runs a check every 30 seconds. This interval can be changed per volume with:
-
- # gluster volume set <VOLNAME> storage.health-check-interval <SECONDS>
-
-If `SECONDS` is set to 0, the health-checker will be disabled.
-
-## Failure Detection
-
-Error are logged to the standard syslog (mostly `/var/log/messages`):
-
- Jun 24 11:31:49 vm130-32 kernel: XFS (dm-2): metadata I/O error: block 0x0 ("xfs_buf_iodone_callbacks") error 5 buf count 512
- Jun 24 11:31:49 vm130-32 kernel: XFS (dm-2): I/O Error Detected. Shutting down filesystem
- Jun 24 11:31:49 vm130-32 kernel: XFS (dm-2): Please umount the filesystem and rectify the problem(s)
- Jun 24 11:31:49 vm130-32 kernel: VFS:Filesystem freeze failed
- Jun 24 11:31:50 vm130-32 GlusterFS[1969]: [2013-06-24 10:31:50.500674] M [posix-helpers.c:1114:posix_health_check_thread_proc] 0-failing_xfs-posix: health-check failed, going down
- Jun 24 11:32:09 vm130-32 kernel: XFS (dm-2): xfs_log_force: error 5 returned.
- Jun 24 11:32:20 vm130-32 GlusterFS[1969]: [2013-06-24 10:32:20.508690] M [posix-helpers.c:1119:posix_health_check_thread_proc] 0-failing_xfs-posix: still alive! -> SIGTERM
-
-The messages labelled with `GlusterFS` in the above output are also written to the logs of the brick process.
-
-## Recovery after a failure
-
-When a brick process detects that the underlaying storage is not responding anymore, the process will exit. There is no automated way that the brick process gets restarted, the sysadmin will need to fix the problem with the storage first.
-
-After correcting the storage (hardware or filesystem) issue, the following command will start the brick process again:
-
- # gluster volume start <VOLNAME> force
-
-## How To Test
-
-The health-checker thread that is part of each brick process will get started automatically when a volume has been started. Verifying its functionality can be done in different ways.
-
-On virtual hardware:
-
-* disconnect the disk from the VM that holds the brick
-
-On real hardware:
-
-* simulate a RAID-card failure by unplugging the card or cables
-
-On a system that uses LVM for the bricks:
-
-* use device-mapper to load an error-table for the disk, see [this description](http://review.gluster.org/5176).
-
-On any system (writing to random offsets of the block device, more difficult to trigger):
-
-1. cause corruption on the filesystem that holds the brick
-2. read contents from the brick, hoping to hit the corrupted area
-3. the filsystem should abort after hitting a bad spot, the health-checker should notice that shortly afterwards
diff --git a/doc/features/dht.md b/doc/features/dht.md
deleted file mode 100644
index c35dd6d0c27..00000000000
--- a/doc/features/dht.md
+++ /dev/null
@@ -1,223 +0,0 @@
-# How GlusterFS Distribution Works
-
-The defining feature of any scale-out system is its ability to distribute work
-or data among many servers. Accordingly, people in the distributed-system
-community have developed many powerful techniques to perform such distribution,
-but those techniques often remain little known or understood even among other
-members of the file system and database communities that benefit. This
-confusion is represented even in the name of the GlusterFS component that
-performs distribution - DHT, which stands for Distributed Hash Table but is not
-actually a DHT as that term is most commonly used or defined. The way
-GlusterFS's DHT works is based on a few basic principles:
-
- * All operations are driven by clients, which are all equal. There are no
- special nodes with special knowledge of where files are or should be.
-
- * Directories exist on all subvolumes (bricks or lower-level aggregations of
- bricks); files exist on only one.
-
- * Files are assigned to subvolumes based on *consistent hashing*, and even
- more specifically a form of consistent hashing exemplified by Amazon's
- [Dynamo][dynamo].
-
-The result of all this is that users are presented with a set of files that is
-the union of the files present on all subvolumes. The following sections
-describe how this "uniting" process actually works.
-
-## Layouts
-
-The conceptual basis of Dynamo-style consistent hashing is of numbers around a
-circle, like a clock. First, the circle is divided into segments and those
-segments are assigned to bricks. (For the sake of simplicity we'll use
-"bricks" hereafter even though they might actually be replicated/striped
-subvolumes.) Several factors guide this assignment.
-
- * Assignments are done separately for each directory.
-
- * Historically, segments have all been the same size. However, this can lead
- to smaller bricks becoming full while plenty of space remains on larger
- ones. If the *cluster.weighted-rebalance* option is set, segments sizes
- will be proportional to brick sizes.
-
- * Assignments need not include all bricks in the volume. If the
- *cluster.subvols-per-directory* option is set, only that many bricks will
- receive assignments for that directory.
-
-However these assignments are done, they collectively become what we call a
-*layout* for a directory. This layout is then stored using extended
-attributes, with each brick's copy of that extended attribute on that directory
-consisting of four 32-bit fields.
-
- * A version, which might be DHT\_HASH\_TYPE\_DM to represent an assignment as
- described above, or DHT\_HASH\_TYPE\_DM\_USER to represent an assignment made
- manually by the user (or external script).
-
- * A "commit hash" which will be described later.
-
- * The first number in the assigned range (segment).
-
- * The last number in the assigned range.
-
-For example, the extended attributes representing a weighted assignment between
-three bricks, one twice as big as the others, might look like this.
-
- * Brick A (the large one): DHT\_HASH\_TYPE\_DM 1234 0 0x7ffffff
-
- * Brick B: DHT\_HASH\_TYPE\_DM 1234 0x80000000 0xbfffffff
-
- * Brick C: DHT\_HASH\_TYPE\_DM 1234 0xc0000000 0xffffffff
-
-## Placing Files
-
-To place a file in a directory, we first need a layout for that directory - as
-described above. Next, we calculate a hash for the file. To minimize
-collisions either between files in the same directory with different names or
-between files in different directories with the same name, this hash is
-generated using both the (containing) directory's unique GFID and the file's
-name. This hash is then matched to one of the layout assignments, to yield
-what we call a *hashed location*. For example, consider the layout shown
-above. The hash 0xabad1dea is between 0x80000000 and 0xbfffffff, so the
-corresponding file's hashed location would be on Brick B. A second file with a
-hash of 0xfaceb00c would be assigned to Brick C by the same reasoning.
-
-## Looking Up Files
-
-Because layout assignments might change, especially as bricks are added or
-removed, finding a file involves more than calculating its hashed location and
-looking there. That is in fact the first step, and works most of the time -
-i.e. the file is found where we expected it to be - but there are a few more
-steps when that's not the case. Historically, the next step has been to look
-for the file **everywhere** - i.e. to broadcast our lookup request to all
-subvolumes. If the file isn't found that way, it doesn't exist. At this
-point, an open that requires the file's presence will fail, or a create/mkdir
-that requires its absence will be allowed to continue.
-
-Regardless of whether a file is found at its hashed location or elsewhere, we
-now know its *cached location*. As the name implies, this is stored within DHT
-to satisfy future lookups. If it's not the same as the hashed location, we
-also take an extra step. This step is the creation of a *linkfile*, which is a
-special stub left at the **hashed** location pointing to the **cached**
-location. Therefore, if a client naively looks for a file at its hashed
-location and finds a linkfile instead, it can use that linkfile to look up the
-file where it really is instead of needing to inquire everywhere.
-
-## Rebalancing
-
-As bricks are added or removed, or files are renamed, many files can end up
-somewhere other than at their hashed locations. When this happens, the volumes
-need to be rebalanced. This process consists of two parts.
-
- 1. Calculate new layouts, according to the current set of bricks (and possibly
- their characteristics). We call this the "fix-layout" phase.
-
- 2. Migrate any "misplaced" files to their correct (hashed) locations, and
- clean up any linkfiles which are no longer necessary. We call this the
- "migrate-data" phase.
-
-Usually, these two phases are done together. (In fact, the code for them is
-somewhat intermingled.) However, the migrate-data phase can involve a lot of
-I/O and be very disruptive, so users can do just the fix-layout phase and defer
-migrate-data until a more convenient time. This allows new files to be placed
-on new bricks, even though old files might still be in the "wrong" place.
-
-When calculating a new layout to replace an old one, DHT specifically tries to
-maximize overlap of the assigned ranges, thus minimizing data movement. This
-difference can be very large. For example, consider the case where our example
-layout from earlier is updated to add a new double-sided brick. Here's a very
-inefficient way to do that.
-
- * Brick A (the large one): 0x00000000 to 0x55555555
-
- * Brick B: 0x55555556 to 0x7fffffff
-
- * Brick C: 0x80000000 to 0xaaaaaaaa
-
- * Brick D (the new one): 0xaaaaaaab to 0xffffffff
-
-This would cause files in the following ranges to be migrated:
-
- * 0x55555556 to 0x7fffffff (from A to B)
-
- * 0x80000000 to 0xaaaaaaaa (from B to C)
-
- * 0xaaaaaaab to 0xbfffffff (from B to D)
-
- * 0xc0000000 to 0xffffffff (from C to D)
-
-As an historical note, this is exactly what we used to do, and in this case it
-would have meant moving 7/12 of all files in the volume. Now let's consider a
-new layout that's optimized to maximize overlap with the old one.
-
- * Brick A: 0x00000000 to 0x55555555
-
- * Brick D: 0x55555556 to 0xaaaaaaaa <- optimized insertion point
-
- * Brick B: 0xaaaaaaab to 0xd5555554
-
- * Brick C: 0xd5555555 to 0xffffffff
-
-In this case we only need to move 5/12 of all files. In a volume with millions
-or even billions of files, reducing data movement by 1/6 of all files is a
-pretty big improvement. In the future, DHT might use "virtual node IDs" or
-multiple hash rings to make rebalancing even more efficient.
-
-## Rename Optimizations
-
-With the file-lookup mechanisms we already have in place, it's not necessary to
-move a file from one brick to another when it's renamed - even across
-directories. It will still be found, albeit a little less efficiently. The
-first client to look for it after the rename will add a linkfile, which every
-other client will follow from then on. Also, every client that has found the
-file once will continue to find it based on its cached location, without any
-network traffic at all. Because the extra lookup cost is small, and the
-movement cost might be very large, DHT renames the file "in place" on its
-current brick instead (taking advantage of the fact that directories exist
-everywhere).
-
-This optimization is further extended to handle cases where renames are very
-common. For example, rsync and similar tools often use a "write new then
-rename" idiom in which a file "xxx" is actually written as ".xxx.1234" and then
-moved into place only after its contents have been fully written. To make this
-process more efficient, DHT uses a regular expression to separate the permanent
-part of a file's name (in this case "xxx") from what is likely to be a
-temporary part (the leading "." and trailing ".1234"). That way, after the
-file is renamed it will be in its correct hashed location - which it wouldn't
-be otherwise if "xxx" and ".xxx.1234" hash differently - and no linkfiles or
-broadcast lookups will be necessary.
-
-In fact, there are two regular expressions available for this purpose -
-*cluster.rsync-hash-regex* and *cluster.extra-hash-regex*. As its name
-implies, *rsync-hash-regex* defaults to the pattern that regex uses, while
-*extra-hash-regex* can be set by the user to support a second tool using the
-same temporary-file idiom.
-
-## Commit Hashes
-
-A very recent addition to DHT's algorithmic arsenal is intended to reduce the
-number of "broadcast" lookups the it issues. If a volume is completely in
-balance, then no file could exist anywhere but at its hashed location.
-Therefore, if we've already looked there and not found it, then looking
-elsewhere would be pointless (and wasteful). The *commit hash* mechanism is
-used to detect this case. A commit hash is assigned to a volume, and
-separately to each directory, and then updated according to the following
-rules.
-
- * The volume commit hash is changed whenever actions are taken that might
- cause layout assignments across all directories to become invalid - i.e.
- bricks being added, removed, or replaced.
-
- * The directory commit hash is changed whenever actions are taken that might
- cause files to be "misplaced" - e.g. when they're renamed.
-
- * The directory commit hash is set to the volume commit hash when the
- directory is created, and whenever the directory is fully rebalanced so that
- all files are at their hashed locations.
-
-In other words, whenever either the volume or directory commit hash is changed
-that creates a mismatch. In that case we revert to the "pessimistic"
-broadcast-lookup method described earlier. However, if the two hashes match
-then we can with skip the broadcast lookup and return a result immediately.
-This has been observed to cause a 3x performance improvement in workloads that
-involve creating many small files across many bricks.
-
-[dynamo]: http://www.allthingsdistributed.com/files/amazon-dynamo-sosp2007.pdf
diff --git a/doc/features/file-snapshot.md b/doc/features/file-snapshot.md
deleted file mode 100644
index 7f7c419fc7f..00000000000
--- a/doc/features/file-snapshot.md
+++ /dev/null
@@ -1,91 +0,0 @@
-#File Snapshot
-This feature gives the ability to take snapshot of files.
-
-##Descritpion
-This feature adds file snapshotting support to glusterfs. Snapshots can be created , deleted and reverted.
-
-To take a snapshot of a file, file should be in QCOW2 format as the code for the block layer snapshot has been taken from Qemu and put into gluster as a translator.
-
-With this feature, glusterfs will have better integration with Openstack Cinder, and in general ability to take snapshots of files (typically VM images).
-
-New extended attribute (xattr) will be added to identify files which are 'snapshot managed' vs raw files.
-
-##Volume Options
-Following volume option needs to be set on the volume for taking file snapshot.
-
- # features.file-snapshot on
-##CLI parameters
-Following cli parameters needs to be passed with setfattr command to create, delete and revert file snapshot.
-
- # trusted.glusterfs.block-format
- # trusted.glusterfs.block-snapshot-create
- # trusted.glusterfs.block-snapshot-goto
-##Fully loaded Example
-Download glusterfs3.5 rpms from download.gluster.org
-Install these rpms.
-
-start glusterd by using the command
-
- # service glusterd start
-Now create a volume by using the command
-
- # gluster volume create <vol_name> <brick_path>
-Run the command below to make sure that volume is created.
-
- # gluster volume info
-Now turn on the snapshot feature on the volume by using the command
-
- # gluster volume set <vol_name> features.file-snapshot on
-Verify that the option is set by using the command
-
- # gluster volume info
-User should be able to see another option in the volume info
-
- # features.file-snapshot: on
-Now mount the volume using fuse mount
-
- # mount -t glusterfs <vol_name> <mount point>
-cd into the mount point
- # cd <mount_point>
- # touch <file_name>
-Size of the file can be set and format of the file can be changed to QCOW2 by running the command below. File size can be in KB/MB/GB
-
- # setfattr -n trusted.glusterfs.block-format -v qcow2:<file_size> <file_name>
-Now create another file and send data to that file by running the command
-
- # echo 'ABCDEFGHIJ' > <data_file1>
-copy the data to the one file to another by running the command
-
- # dd if=data-file1 of=big-file conv=notrunc
-Now take the `snapshot of the file` by running the command
-
- # setfattr -n trusted.glusterfs.block-snapshot-create -v <image1> <file_name>
-Add some more contents to the file and take another file snaphot by doing the following steps
-
- # echo '1234567890' > <data_file2>
- # dd if=<data_file2> of=<file_name> conv=notrunc
- # setfattr -n trusted.glusterfs.block-snapshot-create -v <image2> <file_name>
-Now `revert` both the file snapshots and write data to some files so that data can be compared.
-
- # setfattr -n trusted.glusterfs.block-snapshot-goto -v <image1> <file_name>
- # dd if=<file_name> of=<out-file1> bs=11 count=1
- # setfattr -n trusted.glusterfs.block-snapshot-goto -v <image2> <file_name>
- # dd if=<file_name> of=<out-file2> bs=11 count=1
-Now read the contents of the files and compare as below:
-
- # cat <data_file1>, <out_file1> and compare contents.
- # cat <data_file2>, <out_file2> and compare contents.
-##one line description for the variables used
-file_name = File which will be creating in the mount point intially.
-
-data_file1 = File which contains data 'ABCDEFGHIJ'
-
-image1 = First file snapshot which has 'ABCDEFGHIJ' + some null values.
-
-data_file2 = File which contains data '1234567890'
-
-image2 = second file snapshot which has '1234567890' + some null values.
-
-out_file1 = After reverting image1 this contains 'ABCDEFGHIJ'
-
-out_file2 = After reverting image2 this contians '1234567890'
diff --git a/doc/features/geo-replication/distributed-geo-rep.md b/doc/features/geo-replication/distributed-geo-rep.md
deleted file mode 100644
index 0a3183d6269..00000000000
--- a/doc/features/geo-replication/distributed-geo-rep.md
+++ /dev/null
@@ -1,71 +0,0 @@
-Introduction
-============
-
-This document goes through the new design of distributed geo-replication, it's features and the nature of changes involved. First we list down some of the important features.
-
- - Distributed asynchronous replication
- - Fast and versatile change detection
- - Replica failover
- - Hardlink synchronization
- - Effective handling of deletes and renames
- - Configurable sync engine (rsync, tar+ssh)
- - Adaptive to a wide variety of workloads
- - GFID synchronization
-
-Geo-replication makes use of the all new *journaling* infrastructure (a.k.a. changelog) to achieve great performance and feature improvements as mentioned above. To understand more about changelogging and the helper library (*libgfchangelog*) refer to document: doc/features/geo-replication/libgfchangelog.md
-
-Data Replication
-----------------
-
-Geo-replication is responsible to incrementally replicate data from the master node to the slave. But isn't that similar to what AFR does? Yes, but here the slave is located geographically distant from the master. Geo-replication follows the eventually consistent replication model, which implies, at any point of time, the slave would be lagging w.r.t. master, but would eventually catch up. Replication performance is dependent on two crucial factors:
- - Network latency
- - Change detection
-
-Network latency is something that is not in direct control for many reasons, but still there is always a best effort. Therefore, geo-replication offloads the data replicaiton part to common UNIX file transfer utilities. We choose the grand daddy of file transfers [rsync(1)] [1] as the default synchronization engine, as it's best known for it's diff transfer algorithm for effcient usage of network and lightning fast transfers (leave alone the flexibiliy). But what about small files performance? Due to it's checksumming algorithm, rsync has more overhead for small files -- the overhead of checksumming outweighs the bytes to be transferred for small files. Therefore, geo-replication can also use combination of tar piped over ssh to transfer large number of small files. Tests have shown a great improvement over standard rsync. However, sync engine is not yet dynamic to the file type and needs to be chosen manually by a configuration option.
-
-OTOH, change detection is something that is in full control of the application. Earlier (< release 3.5), geo-replicaiton would perform a file system crawl to indentify changes in the file system. This was not an unintelligent *check-every-single-inode* in the file system, but crawl logic based on *xtime*. xtime is an extended attribute maintained by the *marker* translator for each inode on the master and follows an upward-recursive marking pattern. Geo-replication would traverse a directory based on this simple condition:
-
-> xtime(master) > xtime(slave)
-
-E.g.:
-
-> MASTER SLAVE
->
-> /\ /\
-> d0 dir0 d0 dir0
-> / \ / \
-> d1 dir1 d1 dir1
-> / /
-> d2 d2
-> / /
-> file0 file0
-
-Consider the directory tree above. Assume that master and slave were in sync and the following operation happens on master:
-```
-touch /d0/d1/d2/file0
-```
-This would trigger a xtime marking (xtime being the current timestamp) from the leaf (*file0*) upto the root (*/*), i.e. an *xattr* of *file0*, *d2*, *d1*, *d0* and finally */*. Geo-replication daemon would crawl the file system based the condition mentioned before and hence would only crawl the **left** part of the directory tree (as the **right** part would hve equal xtimes).
-
-Although the above crawling algorithm is fast, it still has to crawl a good part of the file system. Also, to decide whether to crawl a particular subdirectory, geo-rep need to compare xtime -- which is basically a **getxattr()** call on the master and slave (remember, *slave* is over a WAN).
-
-Therefore, in 3.5 the need arised to take crawling to the next level. Geo-replication now uses the changelogging infrastructure to idenitify changes in the filesystem. Actually, there is absolutely no crawl involved. Changelogging based detection is notification based. Geo-replication daemon registers itself with the changelog consumer library (*libgfchangelog*) and basically invokes a set of APIs to get the list of changes in the filesystem and replays them onto the slave. There is absolutely no crawl or any kind of extended attribute gets involved.
-
-Distributed Geo-Replication
----------------------------
-Geo-replication (also known as gsyncd or geo-rep) used to be non-distributed before release 3.5. The node on which geo-rep start command was executed was responsible for replication data to the slave. If this node goes offline due to some reason (reboot, crash, etc..), replication would thereby be ceased. So one of the main development efforts for release 3.5 was to *distributify* geo-replication. Geo-rep daemon running on each node (per brick) is responsible for replicating data **local** to each brick. This results in full parallelism and effective use of cluster/network resource.
-
-With release 3.5, geo-rep start command would spawn a geo-replication daemon on each node in the master cluster (one per brick). Geo-rep *status* command shown geo-rep session status from each master node. Similary, *stop* would gracefully tear down the session from all nodes.
-
-What else is synced?
---------------------
- - GFID: Synchronizing the inode number (GFID) between master and the slave helps in synchronizing hardlinks.
- - Purges are also handled effectively as there is no entry comparison between master and slave. With changelog replay, geo-rep perform unlink operation without having to resort to expensive **readdir()** over the WAN.
- - Renames: With earlier geo-replication, because of the path based nature of crawling, renames were actually a delete and a create on the slave, followed by data transfer (not to mention the inode number change). Now, with changelogging, it's actually a **rename()** call on the slave.
-
-Replica Failover
-----------------
-One of the basic volume configuration is a replicated volume (synchronous replication). Having geo-replication sync data from all replicas would mean wastage of network bandwidth and possibly data corruption on the slave (though that's unlikely). Therefore, geo-rep on such volume configurations works in an **ACTIVE** and **PASSIVE** mode. Geo-rep daemon on one of the replicas is responsible for replicating data (**ACTIVE**), while the other geo-rep daemon is basically doing nothing (**PASSIVE**).
-
-On the event of the *ACTIVE* node going offline, the *PASSIVE* node identifies this event (there's a lag of max 60 seconds for this identification) and switches to *ACTIVE*; thereby taking over the role of replicating data from where the earlier *ACTIVE* node left off. This guarantees uninterrupted data replication even on node reboot/failures.
-
-[1]:http://rsync.samba.org
diff --git a/doc/features/geo-replication/libgfchangelog.md b/doc/features/geo-replication/libgfchangelog.md
deleted file mode 100644
index 1dd0d24253a..00000000000
--- a/doc/features/geo-replication/libgfchangelog.md
+++ /dev/null
@@ -1,119 +0,0 @@
-libgfchangelog: "GlusterFS changelog" consumer library
-======================================================
-
-This document puts forward the intended need for GlusterFS changelog consumer library (a.k.a. libgfchangelog) for consuming changlogs produced by the Changelog translator. Further, it mentions the proposed design and the API exposed by it. A brief explanation of changelog translator can also be found as a commit message in the upstream source tree and the review link can be [accessed here] [1].
-
-Initial consumer of changelogs would be Geo-Replication (release 3.5). Possible consumers in the future could be backup utilities, GlusterFS self-heal, bit-rot detection, AV scanners. All these utilities have one thing in common - to get a list of changed entities (created/modified/deleted) in the file system. Therefore, the need arises to provide such functionality in the form of a shared library that applications can link against and query for changes (See API section). There is no plan as of now to provide language bindings as such, but for shell script friendliness: 'gfind' command line utility (which would be dynamically linked with libgfchangelog) would be helpful. As of now, development for this utility is still not commenced.
-
-The next section gives a brief introduction about how changelogs are organized and managed. Then we propose couple of designs for libgfchangelog. API set is not covered in this document (maybe later).
-
-Changelogs
-==========
-
-Changelogs can be thought as a running history for an entity in the file system from the time the entity came into existance. The goal is to capture all possible transitions the entity underwent till the time it got purged. The transition namespace is broken up into three categories with each category represented by a specific changelog format. Changes are recorded in a flat file in the filesystem and are rolled over after a specific time interval. All three types of categories are recorded in a single changelog file (sequentially) with a type for each entry. Having a single file reduces disk seeks and fragmentation and less number of files to deal with. Stratergy for pruning of old logs is still undecided.
-
-
-Changelog Transition Namespace
-------------------------------
-
-As mentioned before the transition namespace is categorized into three types:
- - TYPE-I : Data operation
- - TYPE-II : Metadata operation
- - TYPE-III : Entry operation
-
-One could visualize the transition of an file system entity as a state machine transitioning from one type to another. For TYPE-I and TYPE-II operations there is no state transition as such, but TYPE-III operation involves a state change from the file systems perspective. We can now classify file operations (fops) into one of the three types:
- - Data operation: write(), writev(), truncate(), ftruncate()
- - Metadata operation: setattr(), fsetattr(), setxattr(), fsetxattr(), removexattr(), fremovexattr()
- - Entry operation: create(), mkdir(), mknod(), symlink(), link(), rename(), unlink(), rmdir()
-
-Changelog Entry Format
-----------------------
-
-In order to record the type of operation and entity underwent, a type identifier is used. Normally, the entity on which the operation is performed would be identified by the pathname, which is the most common way of addressing in a file system, but we choose to use GlusterFS internal file identifier (GFID) instead (as GlusterFS supports GFID based backend and the pathname field may not always be valid and other reasons which are out of scope of this this document). Therefore, the format of the record for the three types of operation can be summarized as follows:
-
- - TYPE-I : GFID of the file
- - TYPE-II : GFID of the file
- - TYPE-III : GFID + FOP + MODE + UID + GID + PARGFID/BNAME [PARGFID/BNAME]
-
-GFID's are analogous to inodes. TYPE-I and TYPE-II fops record the GFID of the entity on which the operation was performed: thereby recording that there was an data/metadata change on the inode. TYPE-III fops record at the minimum a set of six or seven records (depending on the type of operation), that is sufficient to identify what type of operation the entity underwent. Normally this record inculdes the GFID of the entity, the type of file operation (which is an integer [an enumerated value which is used in GluterFS]) and the parent GFID and the basename (analogous to parent inode and basename).
-
-Changelogs can be either in ascii or binary format, the difference being the format of the records that is persisted. In a binary changelog the gfids are recorded in it's native format ie. 16 byte record and the fop number as a 4 byte integer. In an ascii changelog, the gfids are stored in their canonical form and the fop number is stringified and persisted. Null charater is used as the record serarator and changelogs. This makes it hard to read changelogs from the command line, but the packed format is needed to support file names with spaces and special characters. Below is a snippet of a changelog along side it's hexdump.
-
-```
-00000000 47 6c 75 73 74 65 72 46 53 20 43 68 61 6e 67 65 |GlusterFS Change|
-00000010 6c 6f 67 20 7c 20 76 65 72 73 69 6f 6e 3a 20 76 |log | version: v|
-00000020 31 2e 31 20 7c 20 65 6e 63 6f 64 69 6e 67 20 3a |1.1 | encoding :|
-00000030 20 32 0a 45 61 36 39 33 63 30 34 65 2d 61 66 39 | 2.Ea693c04e-af9|
-00000040 65 2d 34 62 61 35 2d 39 63 61 37 2d 31 63 34 61 |e-4ba5-9ca7-1c4a|
-00000050 34 37 30 31 30 64 36 32 00 32 33 00 33 33 32 36 |47010d62.23.3326|
-00000060 31 00 30 00 30 00 66 36 35 34 32 33 32 65 2d 61 |1.0.0.f654232e-a|
-00000070 34 32 62 2d 34 31 62 33 2d 62 35 61 61 2d 38 30 |42b-41b3-b5aa-80|
-00000080 33 62 33 64 61 34 35 39 33 37 2f 6c 69 62 76 69 |3b3da45937/libvi|
-00000090 72 74 5f 64 72 69 76 65 72 5f 6e 65 74 77 6f 72 |rt_driver_networ|
-000000a0 6b 2e 73 6f 00 44 61 36 39 33 63 30 34 65 2d 61 |k.so.Da693c04e-a|
-000000b0 66 39 65 2d 34 62 61 35 2d 39 63 61 37 2d 31 63 |f9e-4ba5-9ca7-1c|
-000000c0 34 61 34 37 30 31 30 64 36 32 00 45 36 65 39 37 |4a47010d62.E6e97|
-```
-
-As you can see, there is an *entry* operation (journal record starting with an "E"). Records for this operation are:
- - GFID : a693c04e-af9e-4ba5-9ca7-1c4a-47010d62
- - FOP : 23 (create)
- - Mode : 33261
- - UID : 0
- - GID : 0
- - PARGFID/BNAME: f654232e-a42b-41b3-b5aa-803b3da45937
-
-**NOTE**: In case of a rename operation, there would be an additional record (for the target PARGFID/BNAME).
-
-libgfchangelog
---------------
-
-NOTE: changelogs generated by the changelog translator are rolled over [with the timestamp as the suffix] after a specific interval, after which a new change is started. The current changelog [changelog file without the timestamp as the suffix] should never be processed unless it's rolled over. The rolled over logs should be treated read-only.
-
-Capturing changes performed on a file system is useful for applications that rely on file system scan (crawl) to figure out such information. Backup utilities, automatic file healing in a replicated environment, bit-rot detection and the likes are some of the end user applications that require a set of changed entities in a file system to act on. Goal of libgfchangelog is to provide the application (consumer) a fast and easy to use common query interface (API). The consumer need not worry about the changelog format, nomenclature of the changelog files etc.
-
-Now we list functionality and some of the features.
-
-Functionality
--------------
-
-Changelog Processing: Processing involes reading changelog file(s), converting the entries into human-readable (or application understandable) format (in case of binary log format).
-Book-keeping: Keeping track of how much the application has consumed the changelog (ie. changes during the time slice start-time -> end-time).
-Serve API request: Update the consumer by providing the set of changes.
-
-Processing could be done in two ways:
-
-* Pre-processing (pre-processing from the library POV):
-Once a changelog file is rolled over (by the changelog translator), a set of post processing operations are performed. These operations could include conversion of a binary log file to an understandable format, collate a bunch of logs into a larger sampling period or just keep a private copy of the changelog (in ascii format). Extra disk space is consumed to store this private copy. The library would then be free to consume these logs and serve application requests.
-
-* On-demand:
-The processing of the changelogs is trigerred when an application requests for changes. Downside of this being additional time spent on decoding the logs and data accumulation during application request time (but no additional disk space is used over the time period).
-
-After processing, the changelog is ready to be consumed by the application. The function of processing is to convert the logs into human/application readable format (an example is shown below):
-
-```
-E a7264fe2-dd6b-43e1-8786-a03b42cc2489 CREATE 33188 0 0 00000000-0000-0000-0000-000000000001%2Fservices1
-M a7264fe2-dd6b-43e1-8786-a03b42cc2489 NULL
-M 00000000-0000-0000-0000-000000000001 NULL
-D a7264fe2-dd6b-43e1-8786-a03b42cc2489
-```
-
-Features
---------
-
-The following points mention some of the features that the library could provide.
-
- - Consumer could choose the update type when it registers with the library. 'types' could be:
- - Streaming: The consumer is updated via stream of changes, ie. the library would just replay the logs
- - Consolidated: The consumer is provided with a consolidated view of the changelog, eg. if <gfid> had an DATA and a METADATA operation, it would be presented as a single update. Similarly for ENTRY operations.
- - Raw: This mode provides the consumer with the pathnames of the changelog files itself (after processing). The changelogs should be strictly treated as read-only. This gives the flexibility to the consumer to extract updates using thier own preferred way (eg. using command line tools like sed, awk, sort | uniq etc.).
- - Application may choose to adopt a synchronous (blocking) or an asynchronous (callback) notification mechanism.
- - Provide a unified view of changelogs from multiple peers (replication scenario) or a global changelog view of the entire cluster.
-
-
-** The first cut of the library supports:**
- - Raw access mode
- - Synchronous programming model
- - Per brick changelog consumption ie. no unified/globally aggregated changelog
-
-[1]:http://review.gluster.org/5127
diff --git a/doc/features/gfid-access.md b/doc/features/gfid-access.md
deleted file mode 100644
index 2d324a18bdb..00000000000
--- a/doc/features/gfid-access.md
+++ /dev/null
@@ -1,73 +0,0 @@
-#Gfid-access Translator
-The 'gfid-access' translator provides access to data in glusterfs using a
-virtual path. This particular translator is designed to provide direct access to
-files in glusterfs using its gfid. 'GFID' is glusterfs's inode number for a file
-to identify it uniquely. As of now, Geo-replication is the only consumer of this
-translator. The changelog translator logs the 'gfid' with corresponding file
-operation in journals which are consumed by Geo-Replication to replicate the
-files using gfid-access translator very efficiently.
-
-###Implications and Usage
-A new virtual directory called '.gfid' is exposed in the aux-gfid mount
-point when gluster volume is mounted with 'aux-gfid-mount' option.
-All the gfids of files are exposed in one level under the '.gfid' directory.
-No matter at what level the file resides, it is accessed using its
-gfid under this virutal directory as shown in example below. All access
-protocols work seemlessly, as the complexities are handled internally.
-
-###Testing
-1. Mount glusterfs client with '-o aux-gfid-mount' as follows.
-
- mount -t glusterfs -o aux-gfid-mount <node-ip>:<volname> <mountpoint>
-
- Example:
-
- #mount -t glusterfs -o aux-gfid-mount rhs1:master /master-aux-mnt
-
-2. Get the 'gfid' of a file using normal mount or aux-gfid-mount and do some
- operations as follows.
-
- getfattr -n glusterfs.gfid.string <file>
-
- Example:
-
- #getfattr -n glusterfs.gfid.string /master-aux-mnt/file
- # file: file
- glusterfs.gfid.string="796d3170-0910-4853-9ff3-3ee6b1132080"
-
- #cat /master-aux-mnt/file
- sample data
-
- #stat /master-aux-mnt/file
- File: `file'
- Size: 12 Blocks: 1 IO Block: 131072 regular file
- Device: 13h/19d Inode: 11525625031905452160 Links: 1
- Access: (0644/-rw-r--r--) Uid: ( 0/ root) Gid: ( 0/ root)
- Access: 2014-05-23 20:43:33.239999863 +0530
- Modify: 2014-05-23 17:36:48.224999989 +0530
- Change: 2014-05-23 20:44:10.081999938 +0530
-
-
-3. Access files using virtual path as follows.
-
- /mountpoint/.gfid/<actual-canonical-gfid-of-the-file\>'
-
- Example:
-
- #cat /master-aux-mnt/.gfid/796d3170-0910-4853-9ff3-3ee6b1132080
- sample data
- #stat /master-aux-mnt/.gfid/796d3170-0910-4853-9ff3-3ee6b1132080
- File: `.gfid/796d3170-0910-4853-9ff3-3ee6b1132080'
- Size: 12 Blocks: 1 IO Block: 131072 regular file
- Device: 13h/19d Inode: 11525625031905452160 Links: 1
- Access: (0644/-rw-r--r--) Uid: ( 0/ root) Gid: ( 0/ root)
- Access: 2014-05-23 20:43:33.239999863 +0530
- Modify: 2014-05-23 17:36:48.224999989 +0530
- Change: 2014-05-23 20:44:10.081999938 +0530
-
- We can notice that 'cat' command on the 'file' using path and using virtual
- path displays the same data. Similarly 'stat' command on the 'file' and using
- virtual path with gfid gives same Inode Number confirming that its same file.
-
-###Nature of changes
-This feature is introduced with 'gfid-access' translator.
diff --git a/doc/features/glusterfs_nfs-ganesha_integration.md b/doc/features/glusterfs_nfs-ganesha_integration.md
deleted file mode 100644
index b30671506d7..00000000000
--- a/doc/features/glusterfs_nfs-ganesha_integration.md
+++ /dev/null
@@ -1,123 +0,0 @@
-# GlusterFS and NFS-Ganesha integration
-
-Nfs-ganesha can support NFS (v3, 4.0, 4.1 pNFS) and 9P (from the Plan9 operating system) protocols concurrently. It provides a FUSE-compatible File System Abstraction Layer(FSAL) to allow the file-system developers to plug in their own storage mechanism and access it from any NFS client.
-
-With NFS-GANESHA, the NFS client talks to the NFS-GANESHA server instead, which is in the user address space already. NFS-GANESHA can access the FUSE filesystems directly through its FSAL without copying any data to or from the kernel, thus potentially improving response times. Of course the network streams themselves (TCP/UDP) will still be handled by the Linux kernel when using NFS-GANESHA.
-
-Even GlusterFS has been integrated with NFS-Ganesha, in the recent past to export the volumes created via glusterfs, using “libgfapi”. libgfapi is a new userspace library developed to access data in glusterfs. It performs I/O on gluster volumes directly without FUSE mount. It is a filesystem like api which runs/sits in the application process context(which is NFS-Ganesha here) and eliminates the use of fuse and the kernel vfs layer from the glusterfs volume access. Thus by integrating NFS-Ganesha and libgfapi, the speed and latency have been improved compared to FUSE mount access.
-
-### 1.) Pre-requisites
-
- - Before starting to setup NFS-Ganesha, a GlusterFS volume should be created.
- - Disable kernel-nfs, gluster-nfs services on the system using the following commands
- - service nfs stop
- - gluster vol set <volname> nfs.disable ON (Note: this command has to be repeated for all the volumes in the trusted-pool)
- - Usually the libgfapi.so* files are installed in “/usr/lib” or “/usr/local/lib”, based on whether you have installed glusterfs using rpm or sources. Verify if those libgfapi.so* files are linked in “/usr/lib64″ and “/usr/local/lib64″ as well. If not create the links for those .so files in those directories.
-
-### 2.) Installing nfs-ganesha
-
-##### i) using rpm install
-
- - nfs-ganesha rpms are available in Fedora19 or later packages. So to install nfs-ganesha, run
- - *#yum install nfs-ganesha*
- - Using CentOS or EL, download the rpms from the below link :
- - http://download.gluster.org/pub/gluster/glusterfs/nfs-ganesha
-
-##### ii) using sources
-
- - cd /root
- - git clone git://github.com/nfs-ganesha/nfs-ganesha.git
- - cd nfs-ganesha/
- - git submodule update --init
- - git checkout -b next origin/next (Note : origin/next is the current development branch)
- - rm -rf ~/build; mkdir ~/build ; cd ~/build
- - cmake -DUSE_FSAL_GLUSTER=ON -DCURSES_LIBRARY=/usr/lib64 -DCURSES_INCLUDE_PATH=/usr/include/ncurses -DCMAKE_BUILD_TYPE=Maintainer /root/nfs-ganesha/src/
- - make; make install
-> Note: libcap-devel, libnfsidmap, dbus-devel, libacl-devel ncurses* packages
-> may need to be installed prior to running this command. For Fedora, libjemalloc,
-> libjemalloc-devel may also be required.
-
-### 3.) Run nfs-ganesha server
-
- - To start nfs-ganesha manually, execute the following command:
- - *#ganesha.nfsd -f <location_of_nfs-ganesha.conf_file> -L <location_of_log_file> -N <log_level> -d
-
-```sh
-For example:
-#ganesha.nfsd -f nfs-ganesha.conf -L nfs-ganesha.log -N NIV_DEBUG -d
-where:
-nfs-ganesha.log is the log file for the ganesha.nfsd process.
-nfs-ganesha.conf is the configuration file
-NIV_DEBUG is the log level.
-```
- - To check if nfs-ganesha has started, execute the following command:
- - *#ps aux | grep ganesha*
- - By default '/' will be exported
-
-### 4.) Exporting GlusterFS volume via nfs-ganesha
-
-#####step 1 :
-
-To export any GlusterFS volume or directory inside volume, create the EXPORT block for each of those entries in a .conf file, for example export.conf. The following paremeters are required to export any entry.
-- *#cat export.conf*
-
-```sh
-EXPORT{
- Export_Id = 1 ; # Export ID unique to each export
- Path = "volume_path"; # Path of the volume to be exported. Eg: "/test_volume"
-
- FSAL {
- name = GLUSTER;
- hostname = "10.xx.xx.xx"; # IP of one of the nodes in the trusted pool
- volume = "volume_name"; # Volume name. Eg: "test_volume"
- }
-
- Access_type = RW; # Access permissions
- Squash = No_root_squash; # To enable/disable root squashing
- Disable_ACL = TRUE; # To enable/disable ACL
- Pseudo = "pseudo_path"; # NFSv4 pseudo path for this export. Eg: "/test_volume_pseudo"
- Protocols = "3","4" ; # NFS protocols supported
- Transports = "UDP","TCP" ; # Transport protocols supported
- SecType = "sys"; # Security flavors supported
-}
-```
-
-#####step 2 :
-
-Define/copy “nfs-ganesha.conf” file to a suitable location. This file is available in “/etc/glusterfs-ganesha” on installation of nfs-ganesha rpms or incase if using the sources, rename “/root/nfs-ganesha/src/FSAL/FSAL_GLUSTER/README” file to “nfs-ganesha.conf” file.
-
-#####step 3 :
-
-Now include the “export.conf” file in nfs-ganesha.conf. This can be done by adding the line below at the end of nfs-ganesha.conf.
- - %include “export.conf”
-
-#####step 4 :
-
- - run ganesha server as mentioned in section 3
- - To check if the volume is exported, run
- - *#showmount -e localhost*
-
-### 5.) Additional Notes
-
-To switch back to gluster-nfs/kernel-nfs, kill the ganesha daemon and start those services using the below commands :
-
- - pkill ganesha
- - service nfs start (for kernel-nfs)
- - gluster v set <volname> nfs.disable off
-
-
-### 6.) References
-
- - Setup and create glusterfs volumes :
-http://www.gluster.org/community/documentation/index.php/QuickStart
-
- - NFS-Ganesha wiki : https://github.com/nfs-ganesha/nfs-ganesha/wiki
-
- - Sample configuration files
- - /root/nfs-ganesha/src/config_samples/gluster.conf
- - https://github.com/nfs-ganesha/nfs-ganesha/blob/master/src/config_samples/gluster.conf
-
- - https://forge.gluster.org/nfs-ganesha-and-glusterfs-integration/pages/Home
-
- - http://blog.gluster.org/2014/09/glusterfs-and-nfs-ganesha-integration/
-
diff --git a/doc/features/heal-info-and-split-brain-resolution.md b/doc/features/heal-info-and-split-brain-resolution.md
deleted file mode 100644
index 7a6691db14e..00000000000
--- a/doc/features/heal-info-and-split-brain-resolution.md
+++ /dev/null
@@ -1,459 +0,0 @@
-The following document explains the usage of volume heal info and split-brain
-resolution commands.
-
-##`gluster volume heal <VOLNAME> info [split-brain]` commands
-###volume heal info
-Usage: `gluster volume heal <VOLNAME> info`
-
-This lists all the files that need healing (either their path or
-GFID is printed).
-###Interpretting the output
-All the files that are listed in the output of this command need healing to be
-done. Apart from this, there are 2 special cases that may be associated with
-an entry -
-a) Is in split-brain
-&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; A file in data/metadata split-brain will
-be listed with " - Is in split-brain" appended after its path/gfid. Eg.,
-"/file4" in the output provided below. But for a gfid split-brain,
- the parent directory of the file is shown to be in split-brain and the file
-itself is shown to be needing heal. Eg., "/dir" in the output provided below
-which is in split-brain because of gfid split-brain of file "/dir/a".
-b) Is possibly undergoing heal
-&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; A file is said to be possibly undergoing
- heal because it is possible that the file was undergoing heal when heal status
-was being determined but it cannot be said for sure. It could so have happened
-that self-heal daemon and glfsheal process that is trying to get heal information
-are competing for the same lock leading to such conclusion. Another possible case
- could be multiple glfsheal processes running simultaneously (e.g., multiple users
- ran heal info command at the same time), competing for same lock.
-
-The following is an example of heal info command's output.
-###Example
-Consider a replica volume "test" with 2 bricks b1 and b2;
-self-heal daemon off, mounted at /mnt.
-
-`gluster volume heal test info`
-~~~
-Brick \<hostname:brickpath-b1>
-<gfid:aaca219f-0e25-4576-8689-3bfd93ca70c2> - Is in split-brain
-<gfid:39f301ae-4038-48c2-a889-7dac143e82dd> - Is in split-brain
-<gfid:c3c94de2-232d-4083-b534-5da17fc476ac> - Is in split-brain
-<gfid:6dc78b20-7eb6-49a3-8edb-087b90142246>
-
-Number of entries: 4
-
-Brick <hostname:brickpath-b2>
-/dir/file2
-/dir/file1 - Is in split-brain
-/dir - Is in split-brain
-/dir/file3
-/file4 - Is in split-brain
-/dir/a
-
-
-Number of entries: 6
-~~~
-
-###Analysis of the output
-It can be seen that
-A) from brick b1 4 entries need healing:
-&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1) file with gfid:6dc78b20-7eb6-49a3-8edb-087b90142246 needs healing
-&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2) "aaca219f-0e25-4576-8689-3bfd93ca70c2",
-"39f301ae-4038-48c2-a889-7dac143e82dd" and "c3c94de2-232d-4083-b534-5da17fc476ac"
- are in split-brain
-
-B) from brick b2 6 entries need healing-
-&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1) "a", "file2" and "file3" need healing
-&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2) "file1", "file4" & "/dir" are in split-brain
-
-###volume heal info split-brain
-Usage: `gluster volume heal <VOLNAME> info split-brain`
-This command shows all the files that are in split-brain.
-##Example
-`gluster volume heal test info split-brain`
-~~~
-Brick <hostname:brickpath-b1>
-<gfid:aaca219f-0e25-4576-8689-3bfd93ca70c2>
-<gfid:39f301ae-4038-48c2-a889-7dac143e82dd>
-<gfid:c3c94de2-232d-4083-b534-5da17fc476ac>
-Number of entries in split-brain: 3
-
-Brick <hostname:brickpath-b2>
-/dir/file1
-/dir
-/file4
-Number of entries in split-brain: 3
-~~~
-Note that, similar to heal info command, for gfid split-brains (same filename but different gfid)
-their parent directories are listed to be in split-brain.
-
-##Resolution of split-brain using CLI
-Once the files in split-brain are identified, their resolution can be done
-from the command line. Note that entry/gfid split-brain resolution is not supported.
-Split-brain resolution commands let the user resolve split-brain in 3 ways.
-###Select the bigger-file as source
-This command is useful for per file healing where it is known/decided that the
-file with bigger size is to be considered as source.
-1.`gluster volume heal <VOLNAME> split-brain bigger-file <FILE>`
-`<FILE>` can be either the full file name as seen from the root of the volume
-(or) the gfid-string representation of the file, which sometimes gets displayed
-in the heal info command's output.
-Once this command is executed, the replica containing the FILE with bigger
-size is found out and heal is completed with it as source.
-
-###Example :
-Consider the above output of heal info split-brain command.
-
-Before healing the file, notice file size and md5 checksums :
-~~~
-On brick b1:
-# stat b1/dir/file1
- File: ‘b1/dir/file1’
- Size: 17 Blocks: 16 IO Block: 4096 regular file
-Device: fd03h/64771d Inode: 919362 Links: 2
-Access: (0644/-rw-r--r--) Uid: ( 0/ root) Gid: ( 0/ root)
-Access: 2015-03-06 13:55:40.149897333 +0530
-Modify: 2015-03-06 13:55:37.206880347 +0530
-Change: 2015-03-06 13:55:37.206880347 +0530
- Birth: -
-
-# md5sum b1/dir/file1
-040751929ceabf77c3c0b3b662f341a8 b1/dir/file1
-
-On brick b2:
-# stat b2/dir/file1
- File: ‘b2/dir/file1’
- Size: 13 Blocks: 16 IO Block: 4096 regular file
-Device: fd03h/64771d Inode: 919365 Links: 2
-Access: (0644/-rw-r--r--) Uid: ( 0/ root) Gid: ( 0/ root)
-Access: 2015-03-06 13:54:22.974451898 +0530
-Modify: 2015-03-06 13:52:22.910758923 +0530
-Change: 2015-03-06 13:52:22.910758923 +0530
- Birth: -
-# md5sum b2/dir/file1
-cb11635a45d45668a403145059c2a0d5 b2/dir/file1
-~~~
-Healing file1 using the above command -
-`gluster volume heal test split-brain bigger-file /dir/file1`
-Healed /dir/file1.
-
-After healing is complete, the md5sum and file size on both bricks should be the same.
-~~~
-On brick b1:
-# stat b1/dir/file1
- File: ‘b1/dir/file1’
- Size: 17 Blocks: 16 IO Block: 4096 regular file
-Device: fd03h/64771d Inode: 919362 Links: 2
-Access: (0644/-rw-r--r--) Uid: ( 0/ root) Gid: ( 0/ root)
-Access: 2015-03-06 14:17:27.752429505 +0530
-Modify: 2015-03-06 13:55:37.206880347 +0530
-Change: 2015-03-06 14:17:12.880343950 +0530
- Birth: -
-# md5sum b1/dir/file1
-040751929ceabf77c3c0b3b662f341a8 b1/dir/file1
-
-On brick b2:
-# stat b2/dir/file1
- File: ‘b2/dir/file1’
- Size: 17 Blocks: 16 IO Block: 4096 regular file
-Device: fd03h/64771d Inode: 919365 Links: 2
-Access: (0644/-rw-r--r--) Uid: ( 0/ root) Gid: ( 0/ root)
-Access: 2015-03-06 14:17:23.249403600 +0530
-Modify: 2015-03-06 13:55:37.206880000 +0530
-Change: 2015-03-06 14:17:12.881343955 +0530
- Birth: -
-
-# md5sum b2/dir/file1
-040751929ceabf77c3c0b3b662f341a8 b2/dir/file1
-~~~
-###Select one replica as source for a particular file
-2.`gluster volume heal <VOLNAME> split-brain source-brick <HOSTNAME:BRICKNAME> <FILE>`
-`<HOSTNAME:BRICKNAME>` is selected as source brick,
-FILE present in the source brick is taken as source for healing.
-
-###Example :
-Notice the md5 checksums and file size before and after heal.
-
-Before heal :
-~~~
-On brick b1:
-
- stat b1/file4
- File: ‘b1/file4’
- Size: 4 Blocks: 16 IO Block: 4096 regular file
-Device: fd03h/64771d Inode: 919356 Links: 2
-Access: (0644/-rw-r--r--) Uid: ( 0/ root) Gid: ( 0/ root)
-Access: 2015-03-06 13:53:19.417085062 +0530
-Modify: 2015-03-06 13:53:19.426085114 +0530
-Change: 2015-03-06 13:53:19.426085114 +0530
- Birth: -
-# md5sum b1/file4
-b6273b589df2dfdbd8fe35b1011e3183 b1/file4
-
-On brick b2:
-
-# stat b2/file4
- File: ‘b2/file4’
- Size: 4 Blocks: 16 IO Block: 4096 regular file
-Device: fd03h/64771d Inode: 919358 Links: 2
-Access: (0644/-rw-r--r--) Uid: ( 0/ root) Gid: ( 0/ root)
-Access: 2015-03-06 13:52:35.761833096 +0530
-Modify: 2015-03-06 13:52:35.769833142 +0530
-Change: 2015-03-06 13:52:35.769833142 +0530
- Birth: -
-# md5sum b2/file4
-0bee89b07a248e27c83fc3d5951213c1 b2/file4
-~~~
-`gluster volume heal test split-brain source-brick test-host:/test/b1 gfid:c3c94de2-232d-4083-b534-5da17fc476ac`
-Healed gfid:c3c94de2-232d-4083-b534-5da17fc476ac.
-
-After healing :
-~~~
-On brick b1:
-# stat b1/file4
- File: ‘b1/file4’
- Size: 4 Blocks: 16 IO Block: 4096 regular file
-Device: fd03h/64771d Inode: 919356 Links: 2
-Access: (0644/-rw-r--r--) Uid: ( 0/ root) Gid: ( 0/ root)
-Access: 2015-03-06 14:23:38.944609863 +0530
-Modify: 2015-03-06 13:53:19.426085114 +0530
-Change: 2015-03-06 14:27:15.058927962 +0530
- Birth: -
-# md5sum b1/file4
-b6273b589df2dfdbd8fe35b1011e3183 b1/file4
-
-On brick b2:
-# stat b2/file4
- File: ‘b2/file4’
- Size: 4 Blocks: 16 IO Block: 4096 regular file
-Device: fd03h/64771d Inode: 919358 Links: 2
-Access: (0644/-rw-r--r--) Uid: ( 0/ root) Gid: ( 0/ root)
-Access: 2015-03-06 14:23:38.944609000 +0530
-Modify: 2015-03-06 13:53:19.426085000 +0530
-Change: 2015-03-06 14:27:15.059927968 +0530
- Birth: -
-# md5sum b2/file4
-b6273b589df2dfdbd8fe35b1011e3183 b2/file4
-~~~
-Note that, as mentioned earlier, entry split-brain and gfid split-brain healing
- are not supported using CLI. However, they can be fixed using the method described
- [here](https://github.com/gluster/glusterfs/blob/master/doc/debugging/split-brain.md).
-###Example:
-Trying to heal /dir would fail as it is in entry split-brain.
-`gluster volume heal test split-brain source-brick test-host:/test/b1 /dir`
-Healing /dir failed:Operation not permitted.
-Volume heal failed.
-
-3.`gluster volume heal <VOLNAME> split-brain source-brick <HOSTNAME:BRICKNAME>`
-Consider a scenario where many files are in split-brain such that one brick of
-replica pair is source. As the result of the above command all split-brained
-files in `<HOSTNAME:BRICKNAME>` are selected as source and healed to the sink.
-
-###Example:
-Consider a volume having three entries "a, b and c" in split-brain.
-~~~
-`gluster volume heal test split-brain source-brick test-host:/test/b1`
-Healed gfid:944b4764-c253-4f02-b35f-0d0ae2f86c0f.
-Healed gfid:3256d814-961c-4e6e-8df2-3a3143269ced.
-Healed gfid:b23dd8de-af03-4006-a803-96d8bc0df004.
-Number of healed entries: 3
-~~~
-
-## An overview of working of heal info commands
-When these commands are invoked, a "glfsheal" process is spawned which reads
-the entries from `/<brick-path>/.glusterfs/indices/xattrop/` directory of all
-the bricks that are up (that it can connect to) one after another. These
-entries are GFIDs of files that might need healing. Once GFID entries from a
-brick are obtained, based on the lookup response of this file on each
-participating brick of replica-pair & trusted.afr.* extended attributes it is
-found out if the file needs healing, is in split-brain etc based on the
-requirement of each command and displayed to the user.
-
-
-##Resolution of split-brain from the mount point
-A set of getfattr and setfattr commands have been provided to detect the data and metadata split-brain status of a file and resolve split-brain, if any, from mount point.
-
-Consider a volume "test", having bricks b0, b1, b2 and b3.
-
-~~~
-# gluster volume info test
-
-Volume Name: test
-Type: Distributed-Replicate
-Volume ID: 00161935-de9e-4b80-a643-b36693183b61
-Status: Started
-Number of Bricks: 2 x 2 = 4
-Transport-type: tcp
-Bricks:
-Brick1: test-host:/test/b0
-Brick2: test-host:/test/b1
-Brick3: test-host:/test/b2
-Brick4: test-host:/test/b3
-~~~
-
-Directory structure of the bricks is as follows:
-
-~~~
-# tree -R /test/b?
-/test/b0
-├── dir
-│   └── a
-└── file100
-
-/test/b1
-├── dir
-│   └── a
-└── file100
-
-/test/b2
-├── dir
-├── file1
-├── file2
-└── file99
-
-/test/b3
-├── dir
-├── file1
-├── file2
-└── file99
-~~~
-
-Some files in the volume are in split-brain.
-~~~
-# gluster v heal test info split-brain
-Brick test-host:/test/b0/
-/file100
-/dir
-Number of entries in split-brain: 2
-
-Brick test-host:/test/b1/
-/file100
-/dir
-Number of entries in split-brain: 2
-
-Brick test-host:/test/b2/
-/file99
-<gfid:5399a8d1-aee9-4653-bb7f-606df02b3696>
-Number of entries in split-brain: 2
-
-Brick test-host:/test/b3/
-<gfid:05c4b283-af58-48ed-999e-4d706c7b97d5>
-<gfid:5399a8d1-aee9-4653-bb7f-606df02b3696>
-Number of entries in split-brain: 2
-~~~
-###To know data/metadata split-brain status of a file:
-~~~
-getfattr -n replica.split-brain-status <path-to-file>
-~~~
-The above command executed from mount provides information if a file is in data/metadata split-brain. Also provides the list of afr children to analyze to get more information about the file.
-This command is not applicable to gfid/directory split-brain.
-
-###Example:
-1) "file100" is in metadata split-brain. Executing the above mentioned command for file100 gives :
-~~~
-# getfattr -n replica.split-brain-status file100
-# file: file100
-replica.split-brain-status="data-split-brain:no metadata-split-brain:yes Choices:test-client-0,test-client-1"
-~~~
-
-2) "file1" is in data split-brain.
-~~~
-# getfattr -n replica.split-brain-status file1
-# file: file1
-replica.split-brain-status="data-split-brain:yes metadata-split-brain:no Choices:test-client-2,test-client-3"
-~~~
-
-3) "file99" is in both data and metadata split-brain.
-~~~
-# getfattr -n replica.split-brain-status file99
-# file: file99
-replica.split-brain-status="data-split-brain:yes metadata-split-brain:yes Choices:test-client-2,test-client-3"
-~~~
-
-4) "dir" is in directory split-brain but as mentioned earlier, the above command is not applicable to such split-brain. So it says that the file is not under data or metadata split-brain.
-~~~
-# getfattr -n replica.split-brain-status dir
-# file: dir
-replica.split-brain-status="The file is not under data or metadata split-brain"
-~~~
-
-5) "file2" is not in any kind of split-brain.
-~~~
-# getfattr -n replica.split-brain-status file2
-# file: file2
-replica.split-brain-status="The file is not under data or metadata split-brain"
-~~~
-
-### To analyze the files in data and metadata split-brain
-Trying to do operations (say cat, getfattr etc) from the mount on files in split-brain, gives an input/output error. To enable the users analyze such files, a setfattr command is provided.
-
-~~~
-# setfattr -n replica.split-brain-choice -v "choiceX" <path-to-file>
-~~~
-Using this command, a particular brick can be chosen to access the file in split-brain from.
-
-###Example:
-1) "file1" is in data-split-brain. Trying to read from the file gives input/output error.
-~~~
-# cat file1
-cat: file1: Input/output error
-~~~
-Split-brain choices provided for file1 were test-client-2 and test-client-3.
-
-Setting test-client-2 as split-brain choice for file1 serves reads from b2 for the file.
-~~~
-# setfattr -n replica.split-brain-choice -v test-client-2 file1
-~~~
-Now, read operations on the file can be done.
-~~~
-# cat file1
-xyz
-~~~
-Similarly, to inspect the file from other choice, replica.split-brain-choice is to be set to test-client-3.
-
-Trying to inspect the file from a wrong choice errors out.
-
-To undo the split-brain-choice that has been set, the above mentioned setfattr command can be used
-with "none" as the value for extended attribute.
-
-###Example:
-~~~
-1) setfattr -n replica.split-brain-choice -v none file1
-~~~
-Now performing cat operation on the file will again result in input/output error, as before.
-~~~
-# cat file
-cat: file1: Input/output error
-~~~
-
-The user can access each file for a timeout amount of period every time replica.split-brain-choice is set. This timeout is configurable by user, with a default value of 5 minutes.
-### To set split-brain-choice timeout
-A setfattr command from the mount allows the user set this timeout, to be specified in minutes.
-~~~
-# setfattr -n replica.split-brain-choice-timeout -v <timeout-in-minutes> <mount_point/file>
-~~~
-This is a global timeout, i.e. applicable to all files as long as the mount exists. So, the timeout need not be set each time a file needs to be inspected but for a new mount it will have to be set again for the first time. This option also needs to be set every time there is a client graph switch (_See note #3_).
-
-### Resolving the split-brain
-Once the choice for resolving split-brain is made, source brick is supposed to be set for the healing to be done.
-This is done using the following command:
-
-~~~
-# setfattr -n replica.split-brain-heal-finalize -v <heal-choice> <path-to-file>
-~~~
-
-##Example
-~~~
-# setfattr -n replica.split-brain-heal-finalize -v test-client-2 file1
-~~~
-The above process can be used to resolve data and/or metadata split-brain on all the files.
-
-NOTE:
-1) If "fopen-keep-cache" fuse mount option is disabled then inode needs to be invalidated each time before selecting a new replica.split-brain-choice to inspect a file. This can be done by using:
-~~~
-# sefattr -n inode-invalidate -v 0 <path-to-file>
-~~~
-
-2) The above mentioned process for split-brain resolution from mount will not work on nfs mounts as it doesn't provide xattrs support.
-
-3) Client graph switch occurs when there is a change in the client side translator graph; typically during addition of new translators to the graph on client side and add-brick/remove-brick operations.
diff --git a/doc/features/libgfapi.md b/doc/features/libgfapi.md
deleted file mode 100644
index dfc8cfe6527..00000000000
--- a/doc/features/libgfapi.md
+++ /dev/null
@@ -1,381 +0,0 @@
-One of the known methods to access glusterfs is via fuse module. However, it has some overhead or performance issues because of the number of context switches which need to be performed to complete one i/o transaction[1].
-
-
-To over come this limitation, a new method called ‘libgfapi’ is introduced. libgfapi support is available from GlusterFS-3.4 release.
-
-libgfapi is a userspace library for accessing data in glusterfs. libgfapi library perform IO on gluster volumes directly without FUSE mount. It is a filesystem like api and runs/sits in application process context. libgfapi eliminates the fuse and the kernel vfs layer from the glusterfs volume access. The speed and latency have improved with libgfapi access. [1]
-
-
-Using libgfapi, various user-space filesystems (like NFS-Ganesha or Samba) or the virtualizer (like QEMU) can interact with GlusterFS which serves as back-end filesystem. Currently below projects integrate with glusterfs using libgfapi interfaces.
-
-
-* qemu storage layer
-* Samba VFS plugin
-* NFS-Ganesha
-
-All the APIs in libgfapi make use of `struct glfs` object. This object
-contains information about volume name, glusterfs context associated,
-subvols in the graph etc which makes it unique for each volume.
-
-
-For any application to make use of libgfapi, it should typically start
-with the below APIs in the following order -
-
-* To create a new glfs object :
-
- glfs_t *glfs_new (const char *volname) ;
-
- glfs_new() returns glfs_t object.
-
-
-* On this newly created glfs_t, you need to be either set a volfile path
- (glfs_set_volfile) or a volfile server (glfs_set_volfile_server).
- Incase of failures, the corresponding cleanup routine is
- "glfs_unset_volfile_server"
-
- int glfs_set_volfile (glfs_t *fs, const char *volfile);
-
- int glfs_set_volfile_server (glfs_t *fs, const char *transport,const char *host, int port) ;
-
- int glfs_unset_volfile_server (glfs_t *fs, const char *transport,const char *host, int port) ;
-
-* Specify logging parameters using glfs_set_logging():
-
- int glfs_set_logging (glfs_t *fs, const char *logfile, int loglevel) ;
-
-* Initializes the glfs_t object using glfs_init()
- int glfs_init (glfs_t *fs) ;
-
-#### FOPs APIs available with libgfapi :
-
-
-
- int glfs_get_volumeid (struct glfs *fs, char *volid, size_t size);
-
- int glfs_setfsuid (uid_t fsuid) ;
-
- int glfs_setfsgid (gid_t fsgid) ;
-
- int glfs_setfsgroups (size_t size, const gid_t *list) ;
-
- glfs_fd_t *glfs_open (glfs_t *fs, const char *path, int flags) ;
-
- glfs_fd_t *glfs_creat (glfs_t *fs, const char *path, int flags,mode_t mode) ;
-
- int glfs_close (glfs_fd_t *fd) ;
-
- glfs_t *glfs_from_glfd (glfs_fd_t *fd) ;
-
- int glfs_set_xlator_option (glfs_t *fs, const char *xlator, const char *key,const char *value) ;
-
- typedef void (*glfs_io_cbk) (glfs_fd_t *fd, ssize_t ret, void *data);
-
- ssize_t glfs_read (glfs_fd_t *fd, void *buf,size_t count, int flags) ;
-
- ssize_t glfs_write (glfs_fd_t *fd, const void *buf,size_t count, int flags) ;
-
- int glfs_read_async (glfs_fd_t *fd, void *buf, size_t count, int flags, glfs_io_cbk fn, void *data) ;
-
- int glfs_write_async (glfs_fd_t *fd, const void *buf, size_t count, int flags, glfs_io_cbk fn, void *data) ;
-
- ssize_t glfs_readv (glfs_fd_t *fd, const struct iovec *iov, int iovcnt,int flags) ;
-
- ssize_t glfs_writev (glfs_fd_t *fd, const struct iovec *iov, int iovcnt,int flags) ;
-
- int glfs_readv_async (glfs_fd_t *fd, const struct iovec *iov, int count, int flags, glfs_io_cbk fn, void *data) ;
-
- int glfs_writev_async (glfs_fd_t *fd, const struct iovec *iov, int count, int flags, glfs_io_cbk fn, void *data) ;
-
- ssize_t glfs_pread (glfs_fd_t *fd, void *buf, size_t count, off_t offset,int flags) ;
-
- ssize_t glfs_pwrite (glfs_fd_t *fd, const void *buf, size_t count, off_t offset, int flags) ;
-
- int glfs_pread_async (glfs_fd_t *fd, void *buf, size_t count, off_t offset,int flags, glfs_io_cbk fn, void *data) ;
-
- int glfs_pwrite_async (glfs_fd_t *fd, const void *buf, int count, off_t offset,int flags, glfs_io_cbk fn, void *data) ;
-
- ssize_t glfs_preadv (glfs_fd_t *fd, const struct iovec *iov, int iovcnt, int count, off_t offset, int flags,glfs_io_cbk fn, void *data) ;
-
- ssize_t glfs_pwritev (glfs_fd_t *fd, const struct iovec *iov, int iovcnt,int count, off_t offset, int flags, glfs_io_cbk fn, void *data) ;
-
- int glfs_preadv_async (glfs_fd_t *fd, const struct iovec *iov, glfs_io_cbk fn, void *data) ;
-
- int glfs_pwritev_async (glfs_fd_t *fd, const struct iovec *iov, glfs_io_cbk fn, void *data) ;
-
- off_t glfs_lseek (glfs_fd_t *fd, off_t offset, int whence) ;
-
- int glfs_truncate (glfs_t *fs, const char *path, off_t length) ;
-
- int glfs_ftruncate (glfs_fd_t *fd, off_t length) ;
-
- int glfs_ftruncate_async (glfs_fd_t *fd, off_t length, glfs_io_cbk fn,void *data) ;
-
- int glfs_lstat (glfs_t *fs, const char *path, struct stat *buf) ;
-
- int glfs_stat (glfs_t *fs, const char *path, struct stat *buf) ;
-
- int glfs_fstat (glfs_fd_t *fd, struct stat *buf) ;
-
- int glfs_fsync (glfs_fd_t *fd) ;
-
- int glfs_fsync_async (glfs_fd_t *fd, glfs_io_cbk fn, void *data) ;
-
- int glfs_fdatasync (glfs_fd_t *fd) ;
-
- int glfs_fdatasync_async (glfs_fd_t *fd, glfs_io_cbk fn, void *data) ;
-
- int glfs_access (glfs_t *fs, const char *path, int mode) ;
-
- int glfs_symlink (glfs_t *fs, const char *oldpath, const char *newpath) ;
-
- int glfs_readlink (glfs_t *fs, const char *path,char *buf, size_t bufsiz) ;
-
- int glfs_mknod (glfs_t *fs, const char *path, mode_t mode, dev_t dev) ;
-
- int glfs_mkdir (glfs_t *fs, const char *path, mode_t mode) ;
-
- int glfs_unlink (glfs_t *fs, const char *path) ;
-
- int glfs_rmdir (glfs_t *fs, const char *path) ;
-
- int glfs_rename (glfs_t *fs, const char *oldpath, const char *newpath) ;
-
- int glfs_link (glfs_t *fs, const char *oldpath, const char *newpath) ;
-
- glfs_fd_t *glfs_opendir (glfs_t *fs, const char *path) ;
-
- int glfs_readdir_r (glfs_fd_t *fd, struct dirent *dirent,struct dirent **result) ;
-
- int glfs_readdirplus_r (glfs_fd_t *fd, struct stat *stat, struct dirent *dirent, struct dirent **result) ;
-
- struct dirent *glfs_readdir (glfs_fd_t *fd) ;
-
- struct dirent *glfs_readdirplus (glfs_fd_t *fd, struct stat *stat) ;
-
- long glfs_telldir (glfs_fd_t *fd) ;
-
- void glfs_seekdir (glfs_fd_t *fd, long offset) ;
-
- int glfs_closedir (glfs_fd_t *fd) ;
-
- int glfs_statvfs (glfs_t *fs, const char *path, struct statvfs *buf) ;
-
- int glfs_chmod (glfs_t *fs, const char *path, mode_t mode) ;
-
- int glfs_fchmod (glfs_fd_t *fd, mode_t mode) ;
-
- int glfs_chown (glfs_t *fs, const char *path, uid_t uid, gid_t gid) ;
-
- int glfs_lchown (glfs_t *fs, const char *path, uid_t uid, gid_t gid) ;
-
- int glfs_fchown (glfs_fd_t *fd, uid_t uid, gid_t gid) ;
-
- int glfs_utimens (glfs_t *fs, const char *path,struct timespec times[2]) ;
-
- int glfs_lutimens (glfs_t *fs, const char *path,struct timespec times[2]) ;
-
- int glfs_futimens (glfs_fd_t *fd, struct timespec times[2]) ;
-
- ssize_t glfs_getxattr (glfs_t *fs, const char *path, const char *name,void *value, size_t size) ;
-
- ssize_t glfs_lgetxattr (glfs_t *fs, const char *path, const char *name,void *value, size_t size) ;
-
- ssize_t glfs_fgetxattr (glfs_fd_t *fd, const char *name,void *value, size_t size) ;
-
- ssize_t glfs_listxattr (glfs_t *fs, const char *path,void *value, size_t size) ;
-
- ssize_t glfs_llistxattr (glfs_t *fs, const char *path, void *value,size_t size) ;
-
- ssize_t glfs_flistxattr (glfs_fd_t *fd, void *value, size_t size) ;
-
- int glfs_setxattr (glfs_t *fs, const char *path, const char *name,const void *value, size_t size, int flags) ;
-
- int glfs_lsetxattr (glfs_t *fs, const char *path, const char *name,const void *value, size_t size, int flags) ;
-
- int glfs_fsetxattr (glfs_fd_t *fd, const char *name,const void *value, size_t size, int flags) ;
-
- int glfs_removexattr (glfs_t *fs, const char *path, const char *name) ;
-
- int glfs_lremovexattr (glfs_t *fs, const char *path, const char *name) ;
-
- int glfs_fremovexattr (glfs_fd_t *fd, const char *name) ;
-
- int glfs_fallocate(glfs_fd_t *fd, int keep_size, off_t offset, size_t len) ;
-
- int glfs_discard(glfs_fd_t *fd, off_t offset, size_t len) ;
-
- int glfs_discard_async (glfs_fd_t *fd, off_t length, size_t lent, glfs_io_cbk fn, void *data) ;
-
- int glfs_zerofill(glfs_fd_t *fd, off_t offset, off_t len) ;
-
- int glfs_zerofill_async (glfs_fd_t *fd, off_t length, off_t len, glfs_io_cbk fn, void *data) ;
-
- char *glfs_getcwd (glfs_t *fs, char *buf, size_t size) ;
-
- int glfs_chdir (glfs_t *fs, const char *path) ;
-
- int glfs_fchdir (glfs_fd_t *fd) ;
-
- char *glfs_realpath (glfs_t *fs, const char *path, char *resolved_path) ;
-
- int glfs_posix_lock (glfs_fd_t *fd, int cmd, struct flock *flock) ;
-
- glfs_fd_t *glfs_dup (glfs_fd_t *fd) ;
-
-
- struct glfs_object *glfs_h_lookupat (struct glfs *fs,struct glfs_object *parent,
- const char *path,
- struct stat *stat) ;
-
- struct glfs_object *glfs_h_creat (struct glfs *fs, struct glfs_object *parent,
- const char *path, int flags, mode_t mode,
- struct stat *sb) ;
-
- struct glfs_object *glfs_h_mkdir (struct glfs *fs, struct glfs_object *parent,
- const char *path, mode_t flags,
- struct stat *sb) ;
-
- struct glfs_object *glfs_h_mknod (struct glfs *fs, struct glfs_object *parent,
- const char *path, mode_t mode, dev_t dev,
- struct stat *sb) ;
-
- struct glfs_object *glfs_h_symlink (struct glfs *fs, struct glfs_object *parent,
- const char *name, const char *data,
- struct stat *stat) ;
-
-
- int glfs_h_unlink (struct glfs *fs, struct glfs_object *parent,
- const char *path) ;
-
- int glfs_h_close (struct glfs_object *object) ;
-
- int glfs_caller_specific_init (void *uid_caller_key, void *gid_caller_key,
- void *future) ;
-
- int glfs_h_truncate (struct glfs *fs, struct glfs_object *object,
- off_t offset) ;
-
- int glfs_h_stat(struct glfs *fs, struct glfs_object *object,
- struct stat *stat) ;
-
- int glfs_h_getattrs (struct glfs *fs, struct glfs_object *object,
- struct stat *stat) ;
-
- int glfs_h_getxattrs (struct glfs *fs, struct glfs_object *object,
- const char *name, void *value,
- size_t size) ;
-
- int glfs_h_setattrs (struct glfs *fs, struct glfs_object *object,
- struct stat *sb, int valid) ;
-
- int glfs_h_setxattrs (struct glfs *fs, struct glfs_object *object,
- const char *name, const void *value,
- size_t size, int flags) ;
-
- int glfs_h_readlink (struct glfs *fs, struct glfs_object *object, char *buf,
- size_t bufsiz) ;
-
- int glfs_h_link (struct glfs *fs, struct glfs_object *linktgt,
- struct glfs_object *parent, const char *name) ;
-
- int glfs_h_rename (struct glfs *fs, struct glfs_object *olddir,
- const char *oldname, struct glfs_object *newdir,
- const char *newname) ;
-
- int glfs_h_removexattrs (struct glfs *fs, struct glfs_object *object,
- const char *name) ;
-
- ssize_t glfs_h_extract_handle (struct glfs_object *object,
- unsigned char *handle, int len) ;
-
- struct glfs_object *glfs_h_create_from_handle (struct glfs *fs,
- unsigned char *handle, int len,
- struct stat *stat) ;
-
-
- struct glfs_fd *glfs_h_opendir (struct glfs *fs,
- struct glfs_object *object) ;
-
- struct glfs_fd *glfs_h_open (struct glfs *fs, struct glfs_object *object,
- int flags) ;
-
-For more details on these apis please refer glfs.h and glfs-handles.h in the source tree (api/src/) of glusterfs:
-
-* Incase of failures or to close the connection and destroy glfs_t
-object, use glfs_fini.
-
- int glfs_fini (glfs_t *fs) ;
-
-
-All the fileops are typically divided into below categories
-
-* a) Handle based Operations -
-
-These APIs create/make use of a glfs_object (referred as handles) unique
-to each file within a volume.
-The structure glfs_object contains inode pointer and gfid.
-
-For example: Since NFS protocol uses file handles to access files, these APIs are
-mainly used by NFS-Ganesha server.
-
-Eg:
-
- struct glfs_object *glfs_h_lookupat (struct glfs *fs,
- struct glfs_object *parent,
- const char *path,
- struct stat *stat);
-
- struct glfs_object *glfs_h_creat (struct glfs *fs,
- struct glfs_object *parent,
- const char *path,
- int flags, mode_t mode,
- struct stat *sb);
-
- struct glfs_object *glfs_h_mkdir (struct glfs *fs,
- struct glfs_object *parent,
- const char *path, mode_t flags,
- struct stat *sb);
-
-
-
-* b) File path/descriptor based Operations -
-
-These APIs make use of file path/descriptor to determine the file on
-which it needs to operate on.
-
-For example: Samba uses these APIs for file operations.
-
-Examples of the APIs using file path -
-
- int glfs_chdir (glfs_t *fs, const char *path) ;
-
- char *glfs_realpath (glfs_t *fs, const char *path, char *resolved_path) ;
-
-Once the file is opened, the file-descriptor generated is used for
-further operations.
-
-Eg:
-
- int glfs_posix_lock (glfs_fd_t *fd, int cmd, struct flock *flock) ;
- glfs_fd_t *glfs_dup (glfs_fd_t *fd) ;
-
-
-
-#### libgfapi bindings :
-
-libgfapi bindings are available for below languages:
-
- - Go
- - Java
- - python [2]
- - Ruby
-
-For more details on these bindings,please refer :
-
- #http://www.gluster.org/community/documentation/index.php/Language_Bindings
-
-References:
-
-[1] http://humblec.com/libgfapi-interface-glusterfs/
-[2] http://www.gluster.org/2014/04/play-with-libgfapi-and-its-python-bindings/
-
diff --git a/doc/features/mount_gluster_volume_using_pnfs.md b/doc/features/mount_gluster_volume_using_pnfs.md
deleted file mode 100644
index 403f0c80e81..00000000000
--- a/doc/features/mount_gluster_volume_using_pnfs.md
+++ /dev/null
@@ -1,56 +0,0 @@
-# How to export gluster volumes using pNFS?
-
-The Parallel Network File System (pNFS) is part of the NFS v4.1 protocol that
-allows compute clients to access storage devices directly and in parallel.
-The pNFS cluster consists of MDS(Meta-Data-Server) and DS (Data-Server).
-The client sends all the read/write requests directly to DS and all other
-operations are handle by the MDS. pNFS support is implemented as part of
-glusterFS+NFS-ganesha integration.
-
-### 1.) Pre-requisites
-
- - Create a GlusterFS volume
-
- - Install nfs-ganesha (refer section 5)
-
- - Disable kernel-nfs, gluster-nfs services on the system using the following commands
- - service nfs stop
- - gluster vol set <volname> nfs.disable ON (Note: this command has to be repeated for all the volumes in the trusted-pool)
-
-### 2.) Configure nfs-ganesha for pNFS
-
- - Disable nfs-ganesha and tear down HA cluster via gluster cli (pNFS did not need to disturb HA setup)
- - gluster features.ganesha disable
-
- - For the optimal working of pNFS, ganesha servers should run on every node in the trusted pool manually(refer section 5)
- - *#ganesha.nfsd -f <location_of_nfs-ganesha.conf_file> -L <location_of_log_file> -N <log_level> -d*
-
- - Check whether volume is exported via nfs-ganesha in all the nodes.
- - *#showmount -e localhost*
-
-### 3.) Mount volume via pNFS
-
-Mount the volume using any nfs-ganesha server in the trusted pool.By default, nfs version 4.1 will use pNFS protocol for gluster volumes
- - *#mount -t nfs4 -o minorversion=1 <ip of server>:/<volume name> <mount path>*
-
-### 4.) Points to be noted
-
- - Current architecture supports only single MDS and mulitple DS. The server with which client mounts will act as MDS and all severs including MDS can act as DS.
-
- - If any of the DS goes down , then MDS will handle those I/O's.
-
- - Hereafter, all the subsequent nfs clients need to use same server for mounting that volume via pNFS. i.e more than one MDS for a volume is not prefered
-
- - pNFS support is only tested with distributed, replicated or distribute-replicate volumes
-
- - It is tested and verfied with RHEL 6.5 , fedora 20, fedora 21 nfs clients. It is always better to use latest nfs-clients
-
-### 5.) References
-
- - Setup and create glusterfs volumes : http://www.gluster.org/community/documentation/index.php/QuickStart
-
- - NFS-Ganesha wiki : https://github.com/nfs-ganesha/nfs-ganesha/wiki
-
- - For installing, running NFS-Ganesha and exporting a volume :
- - read doc/features/glusterfs_nfs-ganesha_integration.md
- - http://blog.gluster.org/2014/09/glusterfs-and-nfs-ganesha-integration/
diff --git a/doc/features/nufa.md b/doc/features/nufa.md
deleted file mode 100644
index 03b8194b4c0..00000000000
--- a/doc/features/nufa.md
+++ /dev/null
@@ -1,20 +0,0 @@
-# NUFA Translator
-
-The NUFA ("Non Uniform File Access") is a variant of the DHT ("Distributed Hash
-Table") translator, intended for use with workloads that have a high locality
-of reference. Instead of placing new files pseudo-randomly, it places them on
-the same nodes where they are created so that future accesses can be made
-locally. For replicated volumes, this means that one copy will be local and
-others will be remote; the read-replica selection mechanisms will then favor
-the local copy for reads. For non-replicated volumes, the only copy will be
-local.
-
-## Interface
-
-Use of NUFA is controlled by a volume option, as follows.
-
- gluster volume set myvolume cluster.nufa on
-
-This will cause the NUFA translator to be used wherever the DHT translator
-otherwise would be. The rest is all automatic.
-
diff --git a/doc/features/ovirt-integration.md b/doc/features/ovirt-integration.md
deleted file mode 100644
index 46dbeabbbaa..00000000000
--- a/doc/features/ovirt-integration.md
+++ /dev/null
@@ -1,106 +0,0 @@
-##Ovirt Integration with glusterfs
-
-oVirt is an opensource virtualization management platform. You can use oVirt to manage
-hardware nodes, storage and network resources, and to deploy and monitor virtual machines
-running in your data center. oVirt serves as the bedrock for Red Hat''s Enterprise Virtualization product,
-and is the "upstream" project where new features are developed in advance of their inclusion
-in that supported product offering.
-
-To know more about ovirt please visit http://www.ovirt.org/ and to configure
-#http://www.ovirt.org/Quick_Start_Guide#Install_oVirt_Engine_.28Fedora.29%60
-
-For the installation step of ovirt, please refer
-#http://www.ovirt.org/Quick_Start_Guide#Install_oVirt_Engine_.28Fedora.29%60
-
-When oVirt integrated with gluster, glusterfs can be used in below forms:
-
-* As a storage domain to host VM disks.
-
-There are mainly two ways to exploit glusterfs as a storage domain.
- - POSIXFS_DOMAIN ( >=oVirt 3.1 )
- - GLUSTERFS_DOMAIN ( >=oVirt 3.3)
-
-The former one has performance overhead and is not an ideal way to consume images hosted in glusterfs volumes.
-When used by this method, qemu uses glusterfs `mount point` to access VM images and invite FUSE overhead.
-The libvirt treats this as a file type disk in its xml schema.
-
-The latter is the recommended way of using glusterfs with ovirt as a storage domain. This provides better
-and efficient way to access images hosted under glusterfs volumes.When qemu accessing glusterfs volume using this method,
-it make use of `libgfapi` implementation of glusterfs and this method is called native integration.
-Here the glusterfs is added as a block backend to qemu and libvirt treat this as a `network` type disk.
-
-For more details on this, please refer # http://www.ovirt.org/Features/GlusterFS_Storage_Domain
-However there are 2 bugs which block usage of this feature.
-
-https://bugzilla.redhat.com/show_bug.cgi?id=1022961
-https://bugzilla.redhat.com/show_bug.cgi?id=1017289
-
-Please check above bugs for latest status.
-
-* To manage gluster trusted pools.
-
-oVirt web admin console can be used to -
- - add new / import existing gluster cluster
- - add/delete volumes
- - add/delete bricks
- - set/reset volume options
- - optimize volume for virt store
- - Rebalance and Remove bricks
- - Monitor gluster deployment - node, brick, volume status,
- Enhanced service monitoring (Physical node resources as well Quota, geo-rep and self-heal status) through Nagios integration(>=oVirt 3.4)
-
-
-
-When configuing ovirt to manage only gluster cluster/trusted pool, you need to select `gluster` as an input for
-`Application mode` in OVIRT ENGINE CONFIGURATION option of `engine-setup` command.
-Refer # http://www.ovirt.org/Quick_Start_Guide#Install_oVirt_Engine_.28Fedora.29%60
-
-If you want to use gluster as both ( as a storage domain to host VM disks and to manage gluster trusted pools)
-you need to input `both` as a value for `Application mode` in engine-setup command.
-
-Once you have successfully installed oVirt Engine as mentioned above, you will be provided with instructions
-to access oVirt''s web console.
-
-Below example shows how to configure gluster nodes in fedora.
-
-
-#Configuring gluster nodes.
-
-On the machine designated as your host, install any supported distribution( ex:Fedora/CentOS/RHEL...etc).
-A minimal installation is sufficient.
-
-Refer # http://www.ovirt.org/Quick_Start_Guide#Install_Hosts
-
-
-##Connect to Ovirt Engine
-
-Log In to Administration Console
-
-Ensure that you have the administrator password configured during installation of oVirt engine.
-
-- To connect to oVirt webadmin console
-
-
-Open a browser and navigate to https://domain.example.com/webadmin. Substitute domain.example.com with the URL provided during installation
-
-If this is your first time connecting to the administration console, oVirt Engine will issue
-security certificates for your browser. Click the link labelled this certificate to trust the
-ca.cer certificate. A pop-up displays, click Open to launch the Certificate dialog.
-Click `Install Certificate` and select to place the certificate in Trusted Root Certification Authorities store.
-
-
-The console login screen displays. Enter admin as your User Name, and enter the Password that
-you provided during installation. Ensure that your domain is set to Internal. Click Login.
-
-
-You have now successfully logged in to the oVirt web administration console. Here, you can configure and manage all your gluster resources.
-
-To manage gluster trusted pool:
-
-- Create a cluster with "Enable gluster service" - turned on. (Turn on "Enable virt service" if the same nodes are used as hypervisor as well)
-- Add hosts which have already been set up as in step Configuring gluster nodes.
-- Create a volume, and click on "Optimize for virt store",This sets the volume tunables optimize volume to be used as an image store
-
-To use this volume as a storage domain:
-
-Please refer `User interface` section of www.ovirt.org/Features/GlusterFS_Storage_Domain
diff --git a/doc/features/qemu-integration.md b/doc/features/qemu-integration.md
deleted file mode 100644
index b44dc06bb43..00000000000
--- a/doc/features/qemu-integration.md
+++ /dev/null
@@ -1,231 +0,0 @@
-Using GlusterFS volumes to host VM images and data was sub-optimal due to the FUSE overhead involved in accessing gluster volumes via GlusterFS native client. However this has changed now with two specific enhancements:
-
-- A new library called libgfapi is now available as part of GlusterFS that provides POSIX-like C APIs for accessing gluster volumes. libgfapi support is available from GlusterFS-3.4 release.
-- QEMU (starting from QEMU-1.3) will have GlusterFS block driver that uses libgfapi and hence there is no FUSE overhead any longer when QEMU works with VM images on gluster volumes.
-
-GlusterFS with its pluggable translator model can serve as a flexible storage backend for QEMU. QEMU has to just talk to GlusterFS and GlusterFS will hide different file systems and storage types underneath. Various GlusterFS storage features like replication and striping will automatically be available for QEMU. Efforts are also on to add block device backend in Gluster via Block Device (BD) translator that will expose underlying block devices as files to QEMU. This allows GlusterFS to be a single storage backend for both file and block based storage types.
-
-###GlusterFS specifcation in QEMU
-
-VM image residing on gluster volume can be specified on QEMU command line using URI format
-
- gluster[+transport]://[server[:port]]/volname/image[?socket=...]
-
-
-
-* `gluster` is the protocol.
-
-* `transport` specifies the transport type used to connect to gluster management daemon (glusterd). Valid transport types are `tcp, unix and rdma.` If a transport type isn’t specified, then tcp type is assumed.
-
-* `server` specifies the server where the volume file specification for the given volume resides. This can be either hostname, ipv4 address or ipv6 address. ipv6 address needs to be within square brackets [ ]. If transport type is unix, then server field should not be specified. Instead the socket field needs to be populated with the path to unix domain socket.
-
-* `port` is the port number on which glusterd is listening. This is optional and if not specified, QEMU will send 0 which will make gluster to use the default port. If the transport type is unix, then port should not be specified.
-
-* `volname` is the name of the gluster volume which contains the VM image.
-
-* `image` is the path to the actual VM image that resides on gluster volume.
-
-
-###Examples:
-
- gluster://1.2.3.4/testvol/a.img
- gluster+tcp://1.2.3.4/testvol/a.img
- gluster+tcp://1.2.3.4:24007/testvol/dir/a.img
- gluster+tcp://[1:2:3:4:5:6:7:8]/testvol/dir/a.img
- gluster+tcp://[1:2:3:4:5:6:7:8]:24007/testvol/dir/a.img
- gluster+tcp://server.domain.com:24007/testvol/dir/a.img
- gluster+unix:///testvol/dir/a.img?socket=/tmp/glusterd.socket
- gluster+rdma://1.2.3.4:24007/testvol/a.img
-
-
-
-NOTE: (GlusterFS URI description and above examples are taken from QEMU documentation)
-
-###Configuring QEMU with GlusterFS backend
-
-While building QEMU from source, in addition to the normal configuration options, ensure that –enable-glusterfs options are specified explicitly with ./configure script to get glusterfs support in qemu.
-
-Starting with QEMU-1.6, pkg-config is used to configure the GlusterFS backend in QEMU. If you are using GlusterFS compiled and installed from sources, then the GlusterFS package config file (glusterfs-api.pc) might not be present at the standard path and you will have to explicitly add the path by executing this command before running the QEMU configure script:
-
- export PKG_CONFIG_PATH=/usr/local/lib/pkgconfig/
-
-Without this, GlusterFS driver will not be compiled into QEMU even when GlusterFS is present in the system.
-
-* Creating a VM image on GlusterFS backend
-
-qemu-img command can be used to create VM images on gluster backend. The general syntax for image creation looks like this:
-
-For ex:
-
- qemu-img create gluster://server/volname/path/to/image size
-
-## How to setup the environment:
-
-This usecase ( using glusterfs backend for VM disk store), is known as 'Virt-Store' usecase. Steps for the entire procedure could be split to:
-
-* Steps to be done on gluster volume side
-* Steps to be done on Hypervisor side
-
-
-##Steps to be done on gluster side
-
-These are the steps that needs to be done on the gluster side. Precisely this involves
-
- Creating "Trusted Storage Pool"
- Creating a volume
- Tuning the volume for virt-store
- Tuning glusterd to accept requests from QEMU
- Tuning glusterfsd to accept requests from QEMU
- Setting ownership on the volume
- Starting the volume
-
-* Creating "Trusted Storage Pool"
-
-Install glusterfs rpms on the NODE. You can create a volume with a single node. You can also scale up the cluster, as we call as `Trusted Storage Pool`, by adding more nodes to the cluster
-
- gluster peer probe <hostname>
-
-* Creating a volume
-
-It is highly recommended to have replicate volume or distribute-replicate volume for virt-store usecase, as it would add high availability and fault-tolerance. Remember the plain distribute works equally well
-
- gluster volume create replica 2 <brick1> .. <brickN>
-
-where, `<brick1> is <hostname>:/<path-of-dir> `
-
-
-Note: It is recommended to create sub-directories inside brick and that could be used to create a volume.For example, say, /home/brick1 is the mountpoint of XFS, then you can create a sub-directory inside it /home/brick1/b1 and use it while creating a volume.You can also use space available in root filesystem for bricks. Gluster cli, by default, throws warning in that case. You can override it by using force option
-
- gluster volume create replica 2 <brick1> .. <brickN> force
-
-If you are new to GlusterFS, you can take a look at QuickStart (http://www.gluster.org/community/documentation/index.php/QuickStart) guide.
-
-* Tuning the volume for virt-store
-
-There are recommended settings available for virt-store. This provide good performance characteristics when enabled on the volume that was used for virt-store
-
-Refer to http://www.gluster.org/community/documentation/index.php/Virt-store-usecase#Tunables for recommended tunables and for applying them on the volume, http://www.gluster.org/community/documentation/index.php/Virt-store-usecase#Applying_the_Tunables_on_the_volume
-
-
-* Tuning glusterd to accept requests from QEMU
-
-glusterd receives the request only from the applications that run with port number less than 1024 and it blocks otherwise. QEMU uses port number greater than 1024 and to make glusterd accept requests from QEMU, edit the glusterd vol file, /etc/glusterfs/glusterd.vol and add the following,
-
- option rpc-auth-allow-insecure on
-
-Note: If you have installed glusterfs from source, you can find glusterd vol file at `/usr/local/etc/glusterfs/glusterd.vol`
-
-Restart glusterd after adding that option to glusterd vol file
-
- service glusterd restart
-
-* Tuning glusterfsd to accept requests from QEMU
-
-Enable the option `allow-insecure` on the particular volume
-
- gluster volume set <volname> server.allow-insecure on
-
-IMPORTANT : As of now(april 2,2014)there is a bug, as allow-insecure is not dynamically set on a volume.You need to restart the volume for the change to take effect
-
-
-* Setting ownership on the volume
-
-Set the ownership of qemu:qemu on to the volume
-
- gluster volume set <vol-name> storage.owner-uid 107
- gluster volume set <vol-name> storage.owner-gid 107
-
-* Starting the volume
-
-Start the volume
-
- gluster volume start <vol-name>
-
-## Steps to be done on Hypervisor Side:
-
-To create a raw image,
-
- qemu-img create gluster://1.2.3.4/testvol/dir/a.img 5G
-
-To create a qcow2 image,
-
- qemu-img create -f qcow2 gluster://server.domain.com:24007/testvol/a.img 5G
-
-
-
-
-
-## Booting VM image from GlusterFS backend
-
-A VM image 'a.img' residing on gluster volume testvol can be booted using QEMU like this:
-
-
- qemu-system-x86_64 -drive file=gluster://1.2.3.4/testvol/a.img,if=virtio
-
-In addition to VM images, gluster drives can also be used as data drives:
-
- qemu-system-x86_64 -drive file=gluster://1.2.3.4/testvol/a.img,if=virtio -drive file=gluster://1.2.3.4/datavol/a-data.img,if=virtio
-
-Here 'a-data.img' from datavol gluster volume appears as a 2nd drive for the guest.
-
-It is also possible to make use of libvirt to define a disk and use it with qemu:
-
-
-### Create libvirt XML to define Virtual Machine
-
-virt-install is python wrapper which is mostly used to create VM using set of params. How-ever virt-install doesn't support any network filesystem [ https://bugzilla.redhat.com/show_bug.cgi?id=1017308 ]
-
-Create a libvirt VM xml - http://libvirt.org/formatdomain.html where the disk section is formatted in such a way, qemu driver for glusterfs is being used. This can be seen in the following example xml description
-
-
- <disk type='network' device='disk'>
- <driver name='qemu' type='raw' cache='none'/>
- <source protocol='gluster' name='distrepvol/vm3.img'>
- <host name='10.70.37.106' port='24007'/>
- </source>
- <target dev='vda' bus='virtio'/>
- <address type='pci' domain='0x0000' bus='0x00' slot='0x04' function='0x0'/>
- </disk>
-
-
-
-
-
-* Define the VM from the XML file that was created earlier
-
-
- virsh define <xml-file-description>
-
-* Verify that the VM is created successfully
-
-
- virsh list --all
-
-* Start the VM
-
-
- virsh start <VM>
-
-* Verification
-
-You can verify the disk image file that is being used by VM
-
- virsh domblklist <VM-Domain-Name/ID>
-
-The above should show the volume name and image name. Here is the example,
-
-
- [root@test ~]# virsh domblklist vm-test2
- Target Source
- ------------------------------------------------
- vda distrepvol/test.img
- hdc -
-
-
-Reference:
-
-For more details on this feature implementation and its advantages, please refer:
-
-http://raobharata.wordpress.com/2012/10/29/qemu-glusterfs-native-integration/
-
-http://www.gluster.org/community/documentation/index.php/Libgfapi_with_qemu_libvirt
diff --git a/doc/features/quota/quota-object-count.md b/doc/features/quota/quota-object-count.md
deleted file mode 100644
index 063aa7c5d61..00000000000
--- a/doc/features/quota/quota-object-count.md
+++ /dev/null
@@ -1,47 +0,0 @@
-Previous mechanism:
-====================
-
-The only way we could have retrieved the number of files/objects in a directory or volume was to do a crawl of the entire directory/volume. That was expensive and was not scalable.
-
-New Design Implementation:
-==========================
-The proposed mechanism will provide an easier alternative to determine the count of files/objects in a directory or volume.
-
-The new mechanism will store count of objects/files as part of an extended attribute of a directory. Each directory extended attribute value will indicate the number of files/objects present in a tree with the directory being considered as the root of the tree.
-
-Inode quota management
-======================
-
-**setting limits**
-
-Syntax:
-*gluster volume quota <volname\> limit-objects <path\> <number\>*
-
-Details:
-<number\> is a hard-limit for number of objects limitation for path <path\>. If hard-limit is exceeded, creation of file or directory is no longer permitted.
-
-**list-objects**
-
-Syntax:
-*gluster volume quota <volname\> list-objects \[path\] ...*
-
-Details:
-If path is not specified, then all the directories which has object limit set on it will be displayed. If we provide path then only that particular path is displayed along with the details associated with that.
-
-Sample output:
-
- Path Hard-limit Soft-limit Files Dirs Available Soft-limit exceeded? Hard-limit exceeded?
- ---------------------------------------------------------------------------------------------------------------------------------------------
- /dir 10 80% 0 1 9 No No
-
-**Deleting limits**
-
-Syntax:
-*gluster volume quota <volname\> remove-objects <path\>*
-
-Details:
-This will remove the object limit set on the specified path.
-
-Note: There is a known issue associated with remove-objects. When both usage limit and object limit is set on a path, then removal of any limit will lead to removal of other limit as well. This is tracked in the bug #1202244
-
-
diff --git a/doc/features/quota/quota-scalability.md b/doc/features/quota/quota-scalability.md
deleted file mode 100644
index e47c898dd2a..00000000000
--- a/doc/features/quota/quota-scalability.md
+++ /dev/null
@@ -1,52 +0,0 @@
-Issues with older implemetation:
------------------------------------
-* >#### Enforcement of quota was done on client side. This had following two issues :
- > >* All clients are not trusted and hence enforcement is not secure.
- > >* Quota enforcer caches directory size for a certain time out period to reduce network calls to fetch size. On time out, this cache is validated by querying server. With more clients, the traffic caused due to this
-validation increases.
-
-* >#### Relying on lookup calls on a file/directory (inode) to update its contribution [time consuming]
-
-* >####Hardlimits were stored in a comma separated list.
- > >* Hence, changing hard limit of one directory is not an independent operation and would invalidate hard limits of other directories. We need to parse the string once for each of these directories just to identify whether its hard limit is changed. This limits the number of hard limits we can configure.
-
-* >####Cli used to fetch the list of directories on which quota-limit is set, from glusterd.
- > >* With more number of limits, the network overhead incurred to fetch this list limits the scalability of number of directories on which we can set quota.
-
-* >#### Problem with NFS mount
- > >* Quota, for its enforcement and accounting requires all the ancestors of a file/directory till root. However, with NFS relying heavily on nameless lookups (through which there is no guarantee that ancestry can be
-accessed) this ancestry is not always present. Hence accounting and enforcement was not correct.
-
-
-New Design Implementation:
---------------------------------
-
-* Quota enforcement is moved to server side. This addresses issues that arose because of client side enforcement.
-
-* Two levels of quota limits, soft and hard quota is introduced.
- This will result in a message being logged on reaching soft quota and writes will fail with EDQUOT after hard limit is reached.
-
-Work Flow
------------------
-
-* Accounting
- # This is done using the marker translator loaded on each brick of the volume. Accounting happens in the background. Ie, it doesn't happen in-flight with the file operation. The file operations latency is not
-directly affected by the time taken to perform accounting. This update is sent recursively upwards up to the root of the volume.
-
-* Enforcement
- # The enforcer updates its 'view' (cached) of directory's disk usage on the incidence of a file operation after the expiry of hard/soft timeout, depending on the current usage. Enforcer uses quotad to get the
-aggregated disk usage of a directory from the accounting information present on each brick (viz, provided by marker).
-
-* Aggregator (quotad)
- # Quotad is a daemon that serves volume-wide disk usage of a directory, on which quota is configured. It is present on all nodes in the cluster (trusted storage pool) as bricks don't have a global view of cluster.
-Quotad queries the disk usage information from all the bricks in that volume and aggregates. It manages all the volumes on which quota is enabled.
-
-
-Benefit to GlusterFS
----------------------------------
-
-* Support upto 65536 quota configurations per volume.
-* More quotas can be configured in a single volume thereby leading to support GlusterFS for use cases like home directory.
-
-###For more information on quota usability refer the following link :
-> https://access.redhat.com/site/documentation/en-US/Red_Hat_Storage/2.1/html-single/Administration_Guide/index.html#chap-User_Guide-Dir_Quota-Enable
diff --git a/doc/features/rdmacm.md b/doc/features/rdmacm.md
deleted file mode 100644
index 2c287e85fb6..00000000000
--- a/doc/features/rdmacm.md
+++ /dev/null
@@ -1,26 +0,0 @@
-## Rdma Connection manager ##
-
-### What? ###
-Infiniband requires addresses of end points to be exchanged using an out-of-band channel (like tcp/ip). Glusterfs used a custom protocol over a tcp/ip channel to exchange this address. However, librdmacm provides the same functionality with the advantage of being a standard protocol. This helps if we want to communicate with a non-glusterfs entity (say nfs client with gluster nfs server) over infiniband.
-
-### Dependencies ###
-* [IP over Infiniband](http://pkg-ofed.alioth.debian.org/howto/infiniband-howto-5.html) - The value to *option* **remote-host** in glusterfs transport configuration should be an IPoIB address
-* [rdma cm kernel module](http://pkg-ofed.alioth.debian.org/howto/infiniband-howto-4.html#ss4.4)
-* [user space rdmacm library - librdmacm](https://www.openfabrics.org/downloads/rdmacm)
-
-### rdma-cm in >= GlusterFs 3.4 ###
-
-Following is the impact of http://review.gluster.org/#change,149.
-
-New userspace packages needed:
-librdmacm
-librdmacm-devel
-
-### Limitations ###
-
-* Because of bug [890502](https://bugzilla.redhat.com/show_bug.cgi?id=890502), we've to probe the peer on an IPoIB address. This imposes a restriction that all volumes created in the future have to communicate over IPoIB address (irrespective of whether they use gluster's tcp or rdma transport).
-
-* Currently client has independence to choose b/w tcp and rdma transports while communicating with the server (by creating volumes with **transport-type tcp,rdma**). This independence was a by-product of our ability to use the tcp/ip channel - transports with *option transport-type tcp* - for rdma connection establishment handshake too. However, with new requirement of IPoIB address for connection establishment, we loose this independence (till we bring in [multi-network support](https://bugzilla.redhat.com/show_bug.cgi?id=765437) - where a brick can be identified by a set of ip-addresses and we can choose different pairs of ip-addresses for communication based on our requirements - in glusterd).
-
-### External links ###
-* [Infiniband Howto](http://pkg-ofed.alioth.debian.org/howto/infiniband-howto.html)
diff --git a/doc/features/readdir-ahead.md b/doc/features/readdir-ahead.md
deleted file mode 100644
index 5302a021202..00000000000
--- a/doc/features/readdir-ahead.md
+++ /dev/null
@@ -1,14 +0,0 @@
-## Readdir-ahead ##
-
-### Summary ###
-Provide read-ahead support for directories to improve sequential directory read performance.
-
-### Owners ###
-Brian Foster
-
-### Detailed Description ###
-The read-ahead feature for directories is analogous to read-ahead for files. The objective is to detect sequential directory read operations and establish a pipeline for directory content. When a readdir request is received and fulfilled, preemptively issue subsequent readdir requests to the server in anticipation of those requests from the user. If sequential readdir requests are received, the directory content is already immediately available in the client. If subsequent requests are not sequential or not received, said data is simply dropped and the optimization is bypassed.
-
-readdir-ahead is currently disabled by default. It can be enabled with the following command:
-
- gluster volume set <volname> readdir-ahead on
diff --git a/doc/features/rebalance.md b/doc/features/rebalance.md
deleted file mode 100644
index e7212d4011f..00000000000
--- a/doc/features/rebalance.md
+++ /dev/null
@@ -1,74 +0,0 @@
-## Background
-
-
-For a more detailed description, view Jeff Darcy's blog post [here]
-(http://pl.atyp.us/hekafs.org/index.php/2012/03/glusterfs-algorithms-distribution/)
-
-GlusterFS uses the distribute translator (DHT) to aggregate space of multiple servers. DHT distributes files among its subvolumes using a consistent hashing method providing 32-bit hashes. Each DHT subvolume is given a range in the 32-bit hash space. A hash value is calculated for every file using a combination of its name. The file is then placed in the subvolume with the hash range that contains the hash value.
-
-## What is rebalance?
-
-The rebalance process migrates files between the DHT subvolumes when necessary.
-
-## When is rebalance required?
-
-Rebalancing is required for two main cases.
-
-1. Addition/Removal of bricks
-
-2. Renaming of a file
-
-## Addition/Removal of bricks
-
-Whenever the number or order of DHT subvolumes change, the hash range given to each subvolume is recalculated. When this happens, already existing files on the volume will need to be moved to the correct subvolume based on their hash. Rebalance does this activity.
-
-Addition of bricks which increase the size of a volume will increase the number of DHT subvolumes and lead to recalculation of hash ranges (This doesn't happen when bricks are added to a volume to increase redundancy, i.e. increase replica count of a volume). This will require an explicit rebalance command to be issued to migrate the files.
-
-Removal of bricks which decrease the size of a volumes also causes the hash ranges of DHT to be recalculated. But we don't need to issue an explicit rebalance command in this case, as rebalance is done automatically by the remove-brick process if needed.
-
-## Renaming of a file
-
-Renaming of file will cause its hash to change. The file now needs to be moved to the correct subvolume based on its new hash. Rebalance does this.
-
-## How does rebalance work?
-
-At a high level, the rebalance process consists of the following 3 steps:
-
-1. Crawl the volume to access all files
-2. Calculate the hash for the file
-3. If needed move the migrate the file to the correct subvolume.
-
-
-The rebalance process has been optimized by making it distributed across the trusted storage pool. With distributed rebalance, a rebalance process is launched on each peer in the cluster. Each rebalance process will crawl files on only those bricks of the volume which are present on it, and migrate the files which need migration to the correct brick. This speeds up the rebalance process considerably.
-
-## What will happen if rebalance is not run?
-
-### Addition of bricks
-
-With the current implementation of add-brick, when the size of a volume is augmented by adding new bricks, the new bricks are not put into use immediately i.e., the hash ranges there not recalculated immediately. This means that the files will still be placed only onto the existing bricks, leaving the newly added storage space unused. Starting a rebalance process on the volume will cause the hash ranges to be recalculated with the new bricks included, which allows the newly added storage space to be used.
-
-### Renaming a file
-
-When a file rename causes the file to be hashed to a new subvolume, DHT writes a link file on the new subvolume leaving the actual file on the original subvolume. A link file is an empty file, which has an extended attribute set that points to the subvolume on which the actual file exists. So, when a client accesses the renamed file, DHT first looks for the file in the hashed subvolume and gets the link file. DHT understands the link file, and gets the actual file from the subvolume pointed to by the link file. This leads to a slight reduction in performance. A rebalance will move the actual file to the hashed subvolume, allowing clients to access the file directly once again.
-
-## Are clients affected during a rebalance process?
-
-The rebalance process is transparent to applications on the clients. Applications which have open files on the volume will not be affected by the rebalance process, even if the open file requires migration. The DHT translator on the client will hide the migration from the applications.
-
-##How are open files migrated?
-
-(A more technical description of the algorithm used can be seen in the commit message of commit a07bb18c8adeb8597f62095c5d1361c5bad01f09.)
-
-To achieve migration of open files, two things need to be assured of,
-a) any writes or changes happening to the file during migration are correctly synced to destination subvolume after the migration is complete.
-b) any further changes should be made to the destination subvolume
-
-Both of these requirements require sending notificatoins to clients. Clients are notified by overloading an attribute used in every callback functions. DHT understands these attributes in the callbacks and can be notified if a file is being migrated or not.
-
-During rebalance, a file will be in two phases
-
-1. Migration in process - In this phase the file is being migrated by the rebalance process from the source subvolume to the destination subvolume. The rebalance process will set a 'in-migration' attribute on the file, which will notify the clients' DHT translator. The clients' DHT translator will then take care to send any further changes to the destination subvolume as well. This way we satisfy the first requirement
-
-2. Migration completed - Once the file has been migrated, the rebalance process will set a 'migration-complete' attribute on the file. The clients will be notified of the completion and all further operations on the file will happen on the destination subvolume.
-
-The DHT translator handles the above and allows the applications on the clients to continue working on a file under migration.
diff --git a/doc/features/server-quorum.md b/doc/features/server-quorum.md
deleted file mode 100644
index 7b20084cea8..00000000000
--- a/doc/features/server-quorum.md
+++ /dev/null
@@ -1,44 +0,0 @@
-# Server Quorum
-
-Server quorum is a feature intended to reduce the occurrence of "split brain"
-after a brick failure or network partition. Split brain happens when different
-sets of servers are allowed to process different sets of writes, leaving data
-in a state that can not be reconciled automatically. The key to avoiding split
-brain is to ensure that there can be only one set of servers - a quorum - that
-can continue handling writes. Server quorum does this by the brutal but
-effective means of forcing down all brick daemons on cluster nodes that can no
-longer reach enough of their peers to form a majority. Because there can only
-be one majority, there can be only one set of bricks remaining, and thus split
-brain can not occur.
-
-## Options
-
-Server quorum is controlled by two parameters:
-
- * **cluster.server-quorum-type**
-
- This value may be "server" to indicate that server quorum is enabled, or
- "none" to mean it's disabled.
-
- * **cluster.server-quorum-ratio**
-
- This is the percentage of cluster nodes that must be up to maintain quorum.
- More precisely, this percentage of nodes *plus one* must be up.
-
-Note that these are cluster-wide flags. All volumes served by the cluster will
-be affected. Once these values are set, quorum actions - starting or stopping
-brick daemons in response to node or network events - will be automatic.
-
-## Best Practices
-
-If a cluster with an even number of nodes is split exactly down the middle,
-neither half can have quorum (which requires **more than** half of the total).
-This is particularly important when N=2, in which case the loss of either node
-leads to loss of quorum. Therefore, it is highly advisable to ensure that the
-cluster size is three or greater. The "extra" node in this case need not have
-any bricks or serve any data. It need only be present to preserve the notion
-of a quorum majority less than the entire cluster membership, allowing the
-cluster to survive the loss of a single node without losing quorum.
-
-
-
diff --git a/doc/features/shard.md b/doc/features/shard.md
deleted file mode 100644
index 3588531a2b4..00000000000
--- a/doc/features/shard.md
+++ /dev/null
@@ -1,68 +0,0 @@
-### Sharding xlator (Stripe 2.0)
-
-GlusterFS's answer to very large files (those which can grow beyond a
-single brick) has never been clear. There is a stripe xlator which allows you to
-do that, but that comes at a cost of flexibility - you can add servers only in
-multiple of stripe-count x replica-count, mixing striped and unstriped files is
-not possible in an "elegant" way. This also happens to be a big limiting factor
-for the big data/Hadoop use case where super large files are the norm (and where
-you want to split a file even if it could fit within a single server.)
-
-The proposed solution for this is to replace the current stripe xlator with a
-new Shard xlator. Unlike the stripe xlator, Shard is not a cluster xlator. It is
-placed on top of DHT. Initially all files will be created as normal files, even
-up to a certain configurable size. The first block (default 4MB) will be stored
-like a normal file. However further blocks will be stored in a file, named by
-the GFID and block index in a separate namespace (like /.shard/GFID1.1,
-/.shard/GFID1.2 ... /.shard/GFID1.N). File IO happening to a particular offset
-will write to the appropriate "piece file", creating it if necessary. The
-aggregated file size and block count will be stored in the xattr of the original
-(first block) file.
-
-The advantage of such a model:
-
-- Data blocks are distributed by DHT in a "normal way".
-- Adding servers can happen in any number (even one at a time) and DHT's
- rebalance will spread out the "piece files" evenly.
-- Self-healing of a large file is now more distributed into smaller files across
- more servers.
-- piece file naming scheme is immune to renames and hardlinks.
-
-Source: https://gist.github.com/avati/af04f1030dcf52e16535#sharding-xlator-stripe-20
-
-## Usage:
-
-Shard translator is disabled by default. To enable it on a given volume, execute
-<code>
-gluster volume set <VOLNAME> features.shard on
-</code>
-
-The default shard block size is 4MB. To modify it, execute
-<code>
-gluster volume set <VOLNAME> features.shard-block-size <value>
-</code>
-
-When a file is created in a volume with sharding disabled, its block size is
-persisted in its xattr on the first block. This property of the file will remain
-even if the shard-block-size for the volume is reconfigured later.
-
-If you want to disable sharding on a volume, it is advisable to create a new
-volume without sharding and copy out contents of this volume into the new
-volume.
-
-## Note:
-* Shard translator is still a beta feature in 3.7.0 and will be possibly fully
- supported in one of the 3.7.x releases.
-* It is advisable to use shard translator in volumes with replication enabled
- for fault tolerance.
-
-## TO-DO:
-* Complete implementation of zerofill, discard and fallocate fops.
-* Introduce caching and its invalidation within shard translator to store size
- and block count of shard'ed files.
-* Make shard translator work for non-Hadoop and non-VM use cases where there are
- multiple clients operating on the same file.
-* Serialize appending writes.
-* Manage recovery of size and block count better in the face of faults during
- ongoing inode write fops.
-* Anything else that could crop up later :)
diff --git a/doc/features/tier/tier.md b/doc/features/tier/tier.md
deleted file mode 100644
index 13e7d971bdf..00000000000
--- a/doc/features/tier/tier.md
+++ /dev/null
@@ -1,168 +0,0 @@
-##Tiering
-
-* ####Feature page:
-http://www.gluster.org/community/documentation/index.php/Features/data-classification
-
-* #####Design: goo.gl/bkU5qv
-
-###Theory of operation
-
-
-The tiering feature enables different storage types to be used by the same
-logical volume. In Gluster 3.7, the two types are classified as "cold" and
-"hot", and are represented as two groups of bricks. The hot group acts as
-a cache for the cold group. The bricks within the two groups themselves are
-arranged according to standard Gluster volume conventions, e.g. replicated,
-distributed replicated, or dispersed.
-
-A normal gluster volume can become a tiered volume by "attaching" bricks
-to it. The attached bricks become the "hot" group. The bricks within the
-original gluster volume are the "cold" bricks.
-
-For example, the original volume may be dispersed on HDD, and the "hot"
-tier could be distributed-replicated SSDs.
-
-Once this new "tiered" volume is built, I/Os to it are subjected to cacheing
-heuristics:
-
-* All I/Os are forwarded to the hot tier.
-
-* If a lookup fails to the hot tier, the I/O will be forwarded to the cold
-tier. This is a "cache miss".
-
-* Files on the hot tier that are not touched within some time are demoted
-(moved) to the cold tier (see performance parameters, below).
-
-* Files on the cold tier that are touched one or more times are promoted
-(moved) to the hot tier. (see performance parameters, below).
-
-This resembles implementations by Ceph and the Linux data management (DM)
-component.
-
-Performance enhancements being considered include:
-
-* Biasing migration of large files over small.
-
-* Only demoting when the hot tier is close to full.
-
-* Write-back cache for database updates.
-
-###Code organization
-
-The design endevors to be upward compatible with future migration policies,
-such as scheduled file migration, data classification, etc. For example,
-the caching logic is self-contained and separate from the file migration. A
-different set of migration policies could use the same underlying migration
-engine. The I/O tracking and meta data store compontents are intended to be
-reusable for things besides caching semantics.
-
-####Libgfdb:
-
-Libgfdb provides abstract mechanism to record extra/rich metadata
-required for data maintenance, such as data tiering/classification.
-It provides consumer with API for recording and querying, keeping
-the consumer abstracted from the data store used beneath for storing data.
-It works in a plug-and-play model, where data stores can be plugged-in.
-Presently we have plugin for Sqlite3. In the future will provide recording
-and querying performance optimizer. In the current implementation the schema
-of metadata is fixed.
-
-####Schema:
-
- GF_FILE_TB Table:
- This table has one entry per file inode. It holds the metadata required to
- make decisions in data maintenance.
- GF_ID (Primary key) : File GFID (Universal Unique IDentifier in the namespace)
- W_SEC, W_MSEC : Write wind time in sec & micro-sec
- UW_SEC, UW_MSEC : Write un-wind time in sec & micro-sec
- W_READ_SEC, W_READ_MSEC : Read wind time in sec & micro-sec
- UW_READ_SEC, UW_READ_MSEC : Read un-wind time in sec & micro-sec
- WRITE_FREQ_CNTR INTEGER : Write Frequency Counter
- READ_FREQ_CNTR INTEGER : Read Frequency Counter
-
- GF_FLINK_TABLE:
- This table has all the hardlinks to a file inode.
- GF_ID : File GFID (Composite Primary Key)``|
- GF_PID : Parent Directory GFID (Composite Primary Key) |-> Primary Key
- FNAME : File Base Name (Composite Primary Key)__|
- FPATH : File Full Path (Its redundant for now, this will go)
- W_DEL_FLAG : This Flag is used for crash consistancy, when a link is unlinked.
- i.e Set to 1 during unlink wind and during unwind this record is deleted
- LINK_UPDATE : This Flag is used when a link is changed i.e rename.
- Set to 1 when rename wind and set to 0 in rename unwind
-
-Libgfdb API :
-Refer libglusterfs/src/gfdb/gfdb_data_store.h
-
-####ChangeTimeRecorder (CTR) Translator:
-
-ChangeTimeRecorder(CTR) is server side xlator(translator) which sits
-just above posix xlator. The main role of this xlator is to record the
-access/write patterns on a file residing the brick. It records the
-read(only data) and write(data and metadata) times and also count on
-how many times a file is read or written. This xlator also captures
-the hard links to a file(as its required by data tiering to move
-files).
-
-CTR Xlator is the consumer of libgfdb.
-
-To Enable/Disable CTR Xlator:
-
- **gluster volume set <volume-name> features.ctr-enabled {on/off}**
-
-To Enable/Disable Frequency Counter Recording in CTR Xlator:
-
- **gluster volume set <volume-name> features.record-counters {on/off}**
-
-
-####Migration daemon:
-
-When a tiered volume is created, a migration daemon starts. There is one daemon
-for every tiered volume per node. The daemon sleeps and then periodically
-queries the database for files to promote or demote. The query callbacks
-assembles files in a list, which is then enumerated. The frequencies by
-which promotes and demotes happen is subject to user configuration.
-
-Selected files are migrated between the tiers using existing DHT migration
-logic. The tier translator will leverage DHT rebalance performance
-enhancements.
-
-Configurable for Migration daemon:
-
- gluster volume set <volume-name> cluster.tier-demote-frequency <SECS>
-
- gluster volume set <volume-name> cluster.tier-promote-frequency <SECS>
-
- gluster volume set <volume-name> cluster.read-freq-threshold <SECS>
-
- gluster volume set <volume-name> cluster.write-freq-threshold <SECS>
-
-
-####Tier Translator:
-
-The tier translator is the root node in tiered volumes. The first subvolume
-is the cold tier, and the second the hot tier. DHT logic for fowarding I/Os
-is largely unchanged. Exceptions are handled according to the dht_methods_t
-structure, which forks control according to DHT or tier type.
-
-The major exception is DHT's layout is not utilized for choosing hashed
-subvolumes. Rather, the hot tier is always the hashed subvolume.
-
-Changes to DHT were made to allow "stacking", i.e. DHT over DHT:
-
-* readdir operations remember the index of the "leaf node" in the volume graph
-(client id), rather than a unique index for each DHT instance.
-
-* Each DHT instance uses a unique extended attribute for tracking migration.
-
-* In certain cases, it is legal for tiered volumes to have unpopulated inodes
-(wheras this would be an error in DHT's case).
-
-Currently tiered volume expansion (adding and removing bricks) is unsupported.
-
-####glusterd:
-
-The tiered volume tree is a composition of two other volumes. The glusterd
-daemon builds it. Existing logic for adding and removing bricks is heavily
-leveraged to attach and detach tiers, and perform statistics collection.
-
diff --git a/doc/features/trash.md b/doc/features/trash.md
deleted file mode 100644
index 3e38e872cf7..00000000000
--- a/doc/features/trash.md
+++ /dev/null
@@ -1,80 +0,0 @@
-Trash Translator
-=================
-
-Trash translator will allow users to access deleted or truncated files. Every brick will maintain a hidden .trashcan directory , which will be used to store the files deleted or truncated from the respective brick .The aggreagate of all those .trashcan directory can be accesed from the mount point.In order to avoid name collisions , a time stamp is appended to the original file name while it is being moved to trash directory.
-
-##Implications and Usage
-Apart from the primary use-case of accessing files deleted or truncated by user , the trash translator can be helpful for internal operations such as self-heal and rebalance . During self-heal and rebalance it is possible to lose crucial data.In those circumstances the trash translator can assist in recovery of the lost data. The trash translator is designed to intercept unlink, truncate and ftruncate fops, store a copy of the current file in the trash directory, and then perform the fop on the original file. For the internal operations , the files are stored under 'internal_op' folder inside trash directory.
-
-##Volume Options
-1. *gluster volume set &lt;VOLNAME> features.trash &lt;on | off>*
-
- This command can be used to enable trash translator in a volume. If set to on, trash directory will be created in every brick inside the volume during volume start command. By default translator is loaded during volume start but remains non-functional. Disabling trash with the help of this option will not remove the trash directory or even its contents from the volume.
-
-2. *gluster volume set &lt;VOLNAME> features.trash-dir &lt;name>*
-
- This command is used to reconfigure the trash directory to a user specified name. The argument is a valid directory name. Directory will be created inside every brick under this name. If not specified by the user, the trash translator will create the trash directory with the default name “.trashcan”. This can be used only when trash-translator is on.
-
-3. *gluster volume set &lt;VOLNAME> features.trash-max-filesize &lt;size>*
-
- This command can be used to filter files entering trash directory based on their size. Files above trash_max_filesize are deleted/truncated directly. Value for size may be followed by mutliplicative suffixes KB (=1024), MB (=1024*1024 and GB. Default size is set to 5MB. As of now any value specified higher than 1GB will be changed to 1GB at the maximum level.
-
-4. *gluster volume set &lt;VOLNAME> features.trash-eliminate-path &lt;path1> [ , &lt;path2> , . . . ]*
-
- This command can be used to set the eliminate pattern for the trash translator. Files residing under this pattern will not be moved to trash directory during deletion/truncation. Path must be a valid one present in volume.
-
-5. *gluster volume set &lt;VOLNAME> features.trash-internal-op &lt;on | off>*
-
- This command can be used to enable trash for internal operations like self-heal and re-balance. By default set to off.
-
-##Testing
-Following steps give illustrates a simple scenario of deletion of file from directory
-
-1. Create a distributed volume with two bricks and start it.
-
- gluster volume create test rhs:/home/brick
-
- gluster volume start test
-
-2. Enable trash translator
-
- gluster volume set test feature.trash on
-
-3. Mount glusterfs client as follows.
-
- mount -t glusterfs rhs:test /mnt
-
-4. Create a directory and file in the mount.
-
- mkdir mnt/dir
-
- echo abc > mnt/dir/file
-
-5. Delete the file from the mount.
-
- rm mnt/dir/file -rf
-
-6. Checkout inside the trash directory.
-
- ls mnt/.trashcan
-
-We can find the deleted file inside the trash directory with timestamp appending on its filename.
-
-For example,
-
- [root@rh-host ~]#mount -t glusterfs rh-host:/test /mnt/test
- [root@rh-host ~]#mkdir /mnt/test/abc
- [root@rh-host ~]#touch /mnt/test/abc/file
- [root@rh-host ~]#rm /mnt/test/abc/filer
- remove regular empty file ‘/mnt/test/abc/file’? y
- [root@rh-host ~]#ls /mnt/test/abc
- [root@rh-host ~]#
- [root@rh-host ~]#ls /mnt/test/.trashcan/abc/
- file2014-08-21_123400
-
-##Points to be remembered
-[1] As soon as the volume is started, trash directory will be created inside the volume and will be visible through mount. Disabling trash will not have any impact on its visibilty from the mount.
-[2] Eventhough deletion of trash-directory is not permitted, currently residing trash contents will be removed on issuing delete on it and only an empty trash-directory exists.
-
-##Known issues
-[1] Since trash translator resides on the server side, DHT translator is unaware of rename and truncate operations being done by this translator which will eventually moves the files to trash directory. Unless and until a complete-path-based lookup comes on trashed files, those may not be visible from the mount.
diff --git a/doc/features/upcall.md b/doc/features/upcall.md
deleted file mode 100644
index 894bd54264d..00000000000
--- a/doc/features/upcall.md
+++ /dev/null
@@ -1,33 +0,0 @@
-## Upcall ##
-
-### Summary ###
-A generic and extensible framework, used to maintain states in the glusterfsd process for each of the files accessed (including the clients info doing the fops) and send notifications to the respective glusterfs clients incase of any change in that state.
-
-Few of the use-cases (currently using) this infrastructure are:
-
- Inode Update/Invalidation
-
-### Detailed Description ###
-GlusterFS, a scale-out storage platform, comprises of distributed file system which follows client-server architectural model.
-
-Its the client(glusterfs) which usually initiates an rpc request to the server(glusterfsd). After processing the request, reply is sent to the client as response to the same request. So till now, there was no interface and use-case present for the server to intimate or make a request to the client.
-
-This support is now being added using “Upcall Infrastructure”.
-
-A new xlator(Upcall) has been defined to maintain and process state of the events which require server to send upcall notifications. For each I/O on a inode, we create/update a ‘upcall_inode_ctx’ and store/update the list of clients’ info ‘upcall_client_t’ in the context.
-
-#### Cache Invalidation ####
-Each of the GlusterFS clients/applications cache certain state of the files (for eg, inode or attributes). In a muti-node environment these caches could lead to data-integrity issues, for certain time, if there are multiple clients accessing the same file simulataneously.
-To avoid such scenarios, we need server to notify clients incase of any change in the file state/attributes.
-
-More details can be found in the below links -
- http://www.gluster.org/community/documentation/index.php/Features/Upcall-infrastructure
- https://soumyakoduri.wordpress.com/2015/02/25/glusterfs-understanding-upcall-infrastructure-and-cache-invalidation-support/
-
-cache-invalidation is currently disabled by default. It can be enabled with the following command:
-
- gluster volume set <volname> features.cache-invalidation on
-
-Note: This upcall notification is sent to only those clients which have accessed the file recently (i.e, with in CACHE_INVALIDATE_PERIOD – default 60sec). This options can be tuned using the following command:
-
- gluster volume set <volname> features.cache-invalidation-timeout <value>
diff --git a/doc/features/worm.md b/doc/features/worm.md
deleted file mode 100644
index dba99777da5..00000000000
--- a/doc/features/worm.md
+++ /dev/null
@@ -1,75 +0,0 @@
-#WORM (Write Once Read Many)
-This features enables you to create a `WORM volume` using gluster CLI.
-##Description
-WORM (write once,read many) is a desired feature for users who want to store data such as `log files` and where data is not allowed to get modified.
-
-GlusterFS provides a new key `features.worm` which takes boolean values(enable/disable) for volume set.
-
-Internally, the volume set command with 'feature.worm' key will add 'features/worm' translator in the brick's volume file.
-
-`This change would be reflected on a subsequent restart of the volume`, i.e gluster volume stop, followed by a gluster volume start.
-
-With a volume converted to WORM, the changes are as follows:
-
-* Reads are handled normally
-* Only files with O_APPEND flag will be supported.
-* Truncation,deletion wont be supported.
-
-##Volume Options
-Use the volume set command on a volume and see if the volume is actually turned into WORM type.
-
- # features.worm enable
-##Fully loaded Example
-WORM feature is being supported from glusterfs version 3.4
-start glusterd by using the command
-
- # service glusterd start
-Now create a volume by using the command
-
- # gluster volume create <vol_name> <brick_path>
-start the volume created by running the command below.
-
- # gluster vol start <vol_name>
-Run the command below to make sure that volume is created.
-
- # gluster volume info
-Now turn on the WORM feature on the volume by using the command
-
- # gluster vol set <vol_name> worm enable
-Verify that the option is set by using the command
-
- # gluster volume info
-User should be able to see another option in the volume info
-
- # features.worm: enable
-Now restart the volume for the changes to reflect, by performing volume stop and start.
-
- # gluster volume <vol_name> stop
- # gluster volume <vol_name> start
-Now mount the volume using fuse mount
-
- # mount -t glusterfs <vol_name> <mnt_point>
-create a file inside the mount point by running the command below
-
- # touch <file_name>
-Verify that user is able to create a file by running the command below
-
- # ls <file_name>
-
-##How To Test
-Now try deleting the above file which is been created
-
- # rm <file_name>
-Since WORM is enabled on the volume, it gives the following error message `rm: cannot remove '/<mnt_point>/<file_name>': Read-only file system`
-
-put some content into the file which is created above.
-
- # echo "at the end of the file" >> <file_name>
-Now try editing the file by running the commnad below and verify that the following error message is displayed `rm: cannot remove '/<mnt_point>/<file_name>': Read-only file system`
-
- # sed -i "1iAt the beginning of the file" <file_name>
-Now read the contents of the file and verify that file can be read.
-
- cat <file_name>
-
-`Note: If WORM option is set on the volume before it is started, then volume need not be restarted for the changes to get reflected`.
diff --git a/doc/features/zerofill.md b/doc/features/zerofill.md
deleted file mode 100644
index c0f1fc5014c..00000000000
--- a/doc/features/zerofill.md
+++ /dev/null
@@ -1,26 +0,0 @@
-#zerofill API for GlusterFS
-zerofill() API would allow creation of pre-allocated and zeroed-out files on GlusterFS volumes by offloading the zeroing part to server and/or storage (storage offloads use SCSI WRITESAME).
-## Description
-
-Zerofill writes zeroes to a file in the specified range. This fop will be useful when a whole file needs to be initialized with zero (could be useful for zero filled VM disk image provisioning or during scrubbing of VM disk images).
-
-Client/application can issue this FOP for zeroing out. Gluster server will zero out required range of bytes ie server offloaded zeroing. In the absence of this fop, client/application has to repetitively issue write (zero) fop to the server, which is very inefficient method because of the overheads involved in RPC calls and acknowledgements.
-
-WRITESAME is a SCSI T10 command that takes a block of data as input and writes the same data to other blocks and this write is handled completely within the storage and hence is known as offload . Linux ,now has support for SCSI WRITESAME command which is exposed to the user in the form of BLKZEROOUT ioctl. BD Xlator can exploit BLKZEROOUT ioctl to implement this fop. Thus zeroing out operations can be completely offloaded to the storage device,
-making it highly efficient.
-
-The fop takes two arguments offset and size. It zeroes out 'size' number of bytes in an opened file starting from 'offset' position.
-This feature adds zerofill support to the following areas:
-> - libglusterfs
-- io-stats
-- performance/md-cache,open-behind
-- quota
-- cluster/afr,dht,stripe
-- rpc/xdr
-- protocol/client,server
-- io-threads
-- marker
-- storage/posix
-- libgfapi
-
-Client applications can exploit this fop by using glfs_zerofill introduced in libgfapi.FUSE support to this fop has not been added as there is no system call for this fop.
diff --git a/doc/legacy/Makefile.am b/doc/legacy/Makefile.am
deleted file mode 100644
index b2caabaa2f3..00000000000
--- a/doc/legacy/Makefile.am
+++ /dev/null
@@ -1,3 +0,0 @@
-info_TEXINFOS = user-guide.texi
-CLEANFILES = *~
-DISTCLEANFILES = .deps/*.P *.info *vti
diff --git a/doc/legacy/advanced-stripe.odg b/doc/legacy/advanced-stripe.odg
deleted file mode 100644
index 7686d7091b2..00000000000
--- a/doc/legacy/advanced-stripe.odg
+++ /dev/null
Binary files differ
diff --git a/doc/legacy/advanced-stripe.pdf b/doc/legacy/advanced-stripe.pdf
deleted file mode 100644
index ec8b03dcfbb..00000000000
--- a/doc/legacy/advanced-stripe.pdf
+++ /dev/null
Binary files differ
diff --git a/doc/legacy/authentication.txt b/doc/legacy/authentication.txt
deleted file mode 100644
index 036a9df9908..00000000000
--- a/doc/legacy/authentication.txt
+++ /dev/null
@@ -1,112 +0,0 @@
-
-* Authentication is provided by two modules addr and login. Login based authentication uses username/password from client for authentication. Each module returns either ACCEPT, REJCET or DONT_CARE. DONT_CARE is returned if the input authentication information to the module is not concerned to its working. The theory behind authentication is that "none of the auth modules should return REJECT and atleast one of them should return ACCEPT"
-
-* Currently all the authentication related information is passed un-encrypted over the network from client to server.
-
-----------------------------------------------------------------------------------------------------
-* options provided in protocol/client:
- * for username/password based authentication:
- option username <username>
- option password <password>
- * client can have only one set of username/password
- * for addr based authentication:
- * no options required in protocol/client. Client has to bind to privileged port (port < 1024 ) which means the process in which protocol/client is loaded has to be run as root.
-
-----------------------------------------------------------------------------------------------------
-* options provided in protocol/server:
- * for username/password based authentication:
- option auth.login.<brick>.allow [comma separated list of usernames using which clients can connect to volume <brick>]
- option auth.login.<username>.password <password> #specify password <password> for username <username>
- * for addr based authentication:
- option auth.addr.<brick>.allow [comma separated list of ip-addresses/unix-paths from which clients are allowed to connect to volume <brick>]
- option auth.addr.<brick>.reject [comma separated list of ip-addresses/unix-paths from which clients are not allowed to connect to volume <brick>]
- * negation operator '!' is used to invert the sense of matching.
- Eg., option auth.addr.brick.allow !a.b.c.d #do not allow client from a.b.c.d to connect to volume brick
- option auth.addr.brick.reject !w.x.y.z #allow client from w.x.y.z to connect to volume brick
- * wildcard '*' can be used to match any ip-address/unix-path
-
-----------------------------------------------------------------------------------------------------
-
-* Usecases:
-
-* username/password based authentication only
- protocol/client:
- option username foo
- option password foo-password
- option remote-subvolume foo-brick
-
- protocol/server:
- option auth.login.foo-brick.allow foo,who #,other users allowed to connect to foo-brick
- option auth.login.foo.password foo-password
- option auth.login.who.password who-password
-
- * in protocol/server, dont specify ip from which client is connecting in auth.addr.foo-brick.reject list
-
-****************************************************************************************************
-
-* ip based authentication only
- protocol/client:
- option remote-subvolume foo-brick
- * Client is connecting from a.b.c.d
-
- protocol/server:
- option auth.addr.foo-brick.allow a.b.c.d,e.f.g.h,i.j.k.l #, other ip addresses from which clients are allowed to connect to foo-brick
-
-****************************************************************************************************
-* ip and username/password based authentication
- * allow only "user foo from a.b.c.d"
- protocol/client:
- option username foo
- option password foo-password
- option remote-subvolume foo-brick
-
- protocol/server:
- option auth.login.foo-brick.allow foo
- option auth.login.foo.password foo-password
- option auth.addr.foo-brick.reject !a.b.c.d
-
- * allow only "user foo" from a.b.c.d i.e., only user foo is allowed from a.b.c.d, but anyone is allowed from ip addresses other than a.b.c.d
- protocol/client:
- option username foo
- option password foo-password
- option remote-subvolume foo-brick
-
- protocol/server:
- option auth.login.foo-brick.allow foo
- option auth.login.foo.password foo-password
- option auth.addr.foo-brick.allow !a.b.c.d
-
- * reject only "user shoo from a.b.c.d"
- protcol/client:
- option remote-subvolume shoo-brick
-
- protocol/server:
- # observe that no "option auth.login.shoo-brick.allow shoo" given
- # Also other users from a.b.c.d have to be explicitly allowed using auth.login.shoo-brick.allow ...
- option auth.addr.shoo-brick.allow !a.b.c.d
-
- * reject only "user shoo" from a.b.c.d i.e., user shoo from a.b.c.d has to be rejected.
- * same as reject only "user shoo from a.b.c.d" above, but rules have to be added whether to allow ip addresses (and users from those ips) other than a.b.c.d
-
-****************************************************************************************************
-
-* ip or username/password based authentication
-
- * allow user foo or clients from a.b.c.d
- protocol/client:
- option remote-subvolume foo-brick
-
- protocol/server:
- option auth.login.foo-brick.allow foo
- option auth.login.foo.password foo-password
- option auth.addr.foo-brick.allow a.b.c.d
-
- * reject user shoo or clients from a.b.c.d
- protocol/client:
- option remote-subvolume shoo-brick
-
- protocol/server:
- option auth.login.shoo-brick.allow <usernames other than shoo>
- #for each username mentioned in the above <usernames other than shoo> list, specify password as below
- option auth.login.<username other than shoo>.password password
- option auth.addr.shoo-brick.reject a.b.c.d
diff --git a/doc/legacy/booster.txt b/doc/legacy/booster.txt
deleted file mode 100644
index 051401a28fc..00000000000
--- a/doc/legacy/booster.txt
+++ /dev/null
@@ -1,54 +0,0 @@
-Introduction
-============
-* booster is a LD_PRELOADable library which boosts read/write performance by bypassing fuse for
- read() and write() calls.
-
-Requirements
-============
-* fetch volfile from glusterfs.
-* identify whether multiple files are from the same mount point. If so, use only one context.
-
-Design
-======
-* for a getxattr, along with other attributes, fuse returns following attributes.
- * contents of client volume-file.
- * mount point.
-
-* LD_PRELOADed booster.so maintains an hash table storing mount-points and libglusterfsclient handles
- so that handles are reused for files from same mount point.
-
-* it also maintains a fdtable. fdtable maps the fd (integer) returned to application to fd (pointer to fd struct)
- used by libglusterfsclient. application is returned the same fd as the one returned from libc apis.
-
-* During fork, these tables are overwritten to enable creation of fresh glusterfs context in child.
-
-Working
-=======
-* application willing to use booster LD_PRELOADs booster.so which is a wrapper library implementing
- open, read and write.
-
-* application should specify the path to logfile through the environment variable GLFS_BOOSTER_LOGFILE. If
- not specified, logging is done to /dev/stderr.
-
-* open call does,
- * real_open on the file.
- * fgetxattr(fd).
- * store the volume-file content got in the dictionary to a temporary file.
- * look in the hashtable for the mount-point, if already present get the libglusterfsclient handle from the
- hashtable. Otherwise get a new handle from libglusterfsclient (be careful about mount point not present in
- the hashtable and multiple glusterfs_inits running simultaneously for the same mount-point there by using
- multiple handles for the same mount point).
- * real_close (fd).
- * delete temporary volume-volfile.
- * glusterfs_open (handle, path, mode).
- * store the fd returned by glusterfs_open in the fdtable at the same index as the fd returned by real_open.
- * return the index as fd.
-
-* read/write calls do,
- * get the libglusterfsclient fd from fdtable.
- * if found use glusterfs_read/glusterfs_write, else use real_read/real_write.
-
-* close call does,
- * remove the fd from the fdtable.
-
-* other calls use real_calls.
diff --git a/doc/legacy/colonO-icon.jpg b/doc/legacy/colonO-icon.jpg
deleted file mode 100644
index 3e66f7a2775..00000000000
--- a/doc/legacy/colonO-icon.jpg
+++ /dev/null
Binary files differ
diff --git a/doc/legacy/errno.list.bsd.txt b/doc/legacy/errno.list.bsd.txt
deleted file mode 100644
index 350af25e4ab..00000000000
--- a/doc/legacy/errno.list.bsd.txt
+++ /dev/null
@@ -1,376 +0,0 @@
-/*-
- * Copyright (c) 1982, 1986, 1989, 1993
- * The Regents of the University of California. All rights reserved.
- * (c) UNIX System Laboratories, Inc.
- * All or some portions of this file are derived from material licensed
- * to the University of California by American Telephone and Telegraph
- * Co. or Unix System Laboratories, Inc. and are reproduced herein with
- * the permission of UNIX System Laboratories, Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 4. Neither the name of the University nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- * @(#)errno.h 8.5 (Berkeley) 1/21/94
- * $FreeBSD: src/sys/sys/errno.h,v 1.28 2005/04/02 12:33:28 das Exp $
- */
-
-#ifndef _SYS_ERRNO_H_
-#define _SYS_ERRNO_H_
-
-#ifndef _KERNEL
-#include <sys/cdefs.h>
-__BEGIN_DECLS
-int * __error(void);
-__END_DECLS
-#define errno (* __error())
-#endif
-
-#define EPERM 1 /* Operation not permitted */
-#define ENOENT 2 /* No such file or directory */
-#define ESRCH 3 /* No such process */
-#define EINTR 4 /* Interrupted system call */
-#define EIO 5 /* Input/output error */
-#define ENXIO 6 /* Device not configured */
-#define E2BIG 7 /* Argument list too long */
-#define ENOEXEC 8 /* Exec format error */
-#define EBADF 9 /* Bad file descriptor */
-#define ECHILD 10 /* No child processes */
-#define EDEADLK 11 /* Resource deadlock avoided */
- /* 11 was EAGAIN */
-#define ENOMEM 12 /* Cannot allocate memory */
-#define EACCES 13 /* Permission denied */
-#define EFAULT 14 /* Bad address */
-#ifndef _POSIX_SOURCE
-#define ENOTBLK 15 /* Block device required */
-#endif
-#define EBUSY 16 /* Device busy */
-#define EEXIST 17 /* File exists */
-#define EXDEV 18 /* Cross-device link */
-#define ENODEV 19 /* Operation not supported by device */
-#define ENOTDIR 20 /* Not a directory */
-#define EISDIR 21 /* Is a directory */
-#define EINVAL 22 /* Invalid argument */
-#define ENFILE 23 /* Too many open files in system */
-#define EMFILE 24 /* Too many open files */
-#define ENOTTY 25 /* Inappropriate ioctl for device */
-#ifndef _POSIX_SOURCE
-#define ETXTBSY 26 /* Text file busy */
-#endif
-#define EFBIG 27 /* File too large */
-#define ENOSPC 28 /* No space left on device */
-#define ESPIPE 29 /* Illegal seek */
-#define EROFS 30 /* Read-only filesystem */
-#define EMLINK 31 /* Too many links */
-#define EPIPE 32 /* Broken pipe */
-
-/* math software */
-#define EDOM 33 /* Numerical argument out of domain */
-#define ERANGE 34 /* Result too large */
-
-/* non-blocking and interrupt i/o */
-#define EAGAIN 35 /* Resource temporarily unavailable */
-#ifndef _POSIX_SOURCE
-#define EWOULDBLOCK EAGAIN /* Operation would block */
-#define EINPROGRESS 36 /* Operation now in progress */
-#define EALREADY 37 /* Operation already in progress */
-
-/* ipc/network software -- argument errors */
-#define ENOTSOCK 38 /* Socket operation on non-socket */
-#define EDESTADDRREQ 39 /* Destination address required */
-#define EMSGSIZE 40 /* Message too long */
-#define EPROTOTYPE 41 /* Protocol wrong type for socket */
-#define ENOPROTOOPT 42 /* Protocol not available */
-#define EPROTONOSUPPORT 43 /* Protocol not supported */
-#define ESOCKTNOSUPPORT 44 /* Socket type not supported */
-#define EOPNOTSUPP 45 /* Operation not supported */
-#define ENOTSUP EOPNOTSUPP /* Operation not supported */
-#define EPFNOSUPPORT 46 /* Protocol family not supported */
-#define EAFNOSUPPORT 47 /* Address family not supported by protocol family */
-#define EADDRINUSE 48 /* Address already in use */
-#define EADDRNOTAVAIL 49 /* Can't assign requested address */
-
-/* ipc/network software -- operational errors */
-#define ENETDOWN 50 /* Network is down */
-#define ENETUNREACH 51 /* Network is unreachable */
-#define ENETRESET 52 /* Network dropped connection on reset */
-#define ECONNABORTED 53 /* Software caused connection abort */
-#define ECONNRESET 54 /* Connection reset by peer */
-#define ENOBUFS 55 /* No buffer space available */
-#define EISCONN 56 /* Socket is already connected */
-#define ENOTCONN 57 /* Socket is not connected */
-#define ESHUTDOWN 58 /* Can't send after socket shutdown */
-#define ETOOMANYREFS 59 /* Too many references: can't splice */
-#define ETIMEDOUT 60 /* Operation timed out */
-#define ECONNREFUSED 61 /* Connection refused */
-
-#define ELOOP 62 /* Too many levels of symbolic links */
-#endif /* _POSIX_SOURCE */
-#define ENAMETOOLONG 63 /* File name too long */
-
-/* should be rearranged */
-#ifndef _POSIX_SOURCE
-#define EHOSTDOWN 64 /* Host is down */
-#define EHOSTUNREACH 65 /* No route to host */
-#endif /* _POSIX_SOURCE */
-#define ENOTEMPTY 66 /* Directory not empty */
-
-/* quotas & mush */
-#ifndef _POSIX_SOURCE
-#define EPROCLIM 67 /* Too many processes */
-#define EUSERS 68 /* Too many users */
-#define EDQUOT 69 /* Disc quota exceeded */
-
-/* Network File System */
-#define ESTALE 70 /* Stale NFS file handle */
-#define EREMOTE 71 /* Too many levels of remote in path */
-#define EBADRPC 72 /* RPC struct is bad */
-#define ERPCMISMATCH 73 /* RPC version wrong */
-#define EPROGUNAVAIL 74 /* RPC prog. not avail */
-#define EPROGMISMATCH 75 /* Program version wrong */
-#define EPROCUNAVAIL 76 /* Bad procedure for program */
-#endif /* _POSIX_SOURCE */
-
-#define ENOLCK 77 /* No locks available */
-#define ENOSYS 78 /* Function not implemented */
-
-#ifndef _POSIX_SOURCE
-#define EFTYPE 79 /* Inappropriate file type or format */
-#define EAUTH 80 /* Authentication error */
-#define ENEEDAUTH 81 /* Need authenticator */
-#define EIDRM 82 /* Identifier removed */
-#define ENOMSG 83 /* No message of desired type */
-#define EOVERFLOW 84 /* Value too large to be stored in data type */
-#define ECANCELED 85 /* Operation canceled */
-#define EILSEQ 86 /* Illegal byte sequence */
-#define ENOATTR 87 /* Attribute not found */
-
-#define EDOOFUS 88 /* Programming error */
-#endif /* _POSIX_SOURCE */
-
-#define EBADMSG 89 /* Bad message */
-#define EMULTIHOP 90 /* Multihop attempted */
-#define ENOLINK 91 /* Link has been severed */
-#define EPROTO 92 /* Protocol error */
-
-#ifndef _POSIX_SOURCE
-#define ELAST 92 /* Must be equal largest errno */
-#endif /* _POSIX_SOURCE */
-
-#ifdef _KERNEL
-/* pseudo-errors returned inside kernel to modify return to process */
-#define ERESTART (-1) /* restart syscall */
-#define EJUSTRETURN (-2) /* don't modify regs, just return */
-#define ENOIOCTL (-3) /* ioctl not handled by this layer */
-#define EDIRIOCTL (-4) /* do direct ioctl in GEOM */
-#endif
-
-#endif
-/*-
- * Copyright (c) 1982, 1986, 1989, 1993
- * The Regents of the University of California. All rights reserved.
- * (c) UNIX System Laboratories, Inc.
- * All or some portions of this file are derived from material licensed
- * to the University of California by American Telephone and Telegraph
- * Co. or Unix System Laboratories, Inc. and are reproduced herein with
- * the permission of UNIX System Laboratories, Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 4. Neither the name of the University nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- * @(#)errno.h 8.5 (Berkeley) 1/21/94
- * $FreeBSD: src/sys/sys/errno.h,v 1.28 2005/04/02 12:33:28 das Exp $
- */
-
-#ifndef _SYS_ERRNO_H_
-#define _SYS_ERRNO_H_
-
-#ifndef _KERNEL
-#include <sys/cdefs.h>
-__BEGIN_DECLS
-int * __error(void);
-__END_DECLS
-#define errno (* __error())
-#endif
-
-#define EPERM 1 /* Operation not permitted */
-#define ENOENT 2 /* No such file or directory */
-#define ESRCH 3 /* No such process */
-#define EINTR 4 /* Interrupted system call */
-#define EIO 5 /* Input/output error */
-#define ENXIO 6 /* Device not configured */
-#define E2BIG 7 /* Argument list too long */
-#define ENOEXEC 8 /* Exec format error */
-#define EBADF 9 /* Bad file descriptor */
-#define ECHILD 10 /* No child processes */
-#define EDEADLK 11 /* Resource deadlock avoided */
- /* 11 was EAGAIN */
-#define ENOMEM 12 /* Cannot allocate memory */
-#define EACCES 13 /* Permission denied */
-#define EFAULT 14 /* Bad address */
-#ifndef _POSIX_SOURCE
-#define ENOTBLK 15 /* Block device required */
-#endif
-#define EBUSY 16 /* Device busy */
-#define EEXIST 17 /* File exists */
-#define EXDEV 18 /* Cross-device link */
-#define ENODEV 19 /* Operation not supported by device */
-#define ENOTDIR 20 /* Not a directory */
-#define EISDIR 21 /* Is a directory */
-#define EINVAL 22 /* Invalid argument */
-#define ENFILE 23 /* Too many open files in system */
-#define EMFILE 24 /* Too many open files */
-#define ENOTTY 25 /* Inappropriate ioctl for device */
-#ifndef _POSIX_SOURCE
-#define ETXTBSY 26 /* Text file busy */
-#endif
-#define EFBIG 27 /* File too large */
-#define ENOSPC 28 /* No space left on device */
-#define ESPIPE 29 /* Illegal seek */
-#define EROFS 30 /* Read-only filesystem */
-#define EMLINK 31 /* Too many links */
-#define EPIPE 32 /* Broken pipe */
-
-/* math software */
-#define EDOM 33 /* Numerical argument out of domain */
-#define ERANGE 34 /* Result too large */
-
-/* non-blocking and interrupt i/o */
-#define EAGAIN 35 /* Resource temporarily unavailable */
-#ifndef _POSIX_SOURCE
-#define EWOULDBLOCK EAGAIN /* Operation would block */
-#define EINPROGRESS 36 /* Operation now in progress */
-#define EALREADY 37 /* Operation already in progress */
-
-/* ipc/network software -- argument errors */
-#define ENOTSOCK 38 /* Socket operation on non-socket */
-#define EDESTADDRREQ 39 /* Destination address required */
-#define EMSGSIZE 40 /* Message too long */
-#define EPROTOTYPE 41 /* Protocol wrong type for socket */
-#define ENOPROTOOPT 42 /* Protocol not available */
-#define EPROTONOSUPPORT 43 /* Protocol not supported */
-#define ESOCKTNOSUPPORT 44 /* Socket type not supported */
-#define EOPNOTSUPP 45 /* Operation not supported */
-#define ENOTSUP EOPNOTSUPP /* Operation not supported */
-#define EPFNOSUPPORT 46 /* Protocol family not supported */
-#define EAFNOSUPPORT 47 /* Address family not supported by protocol family */
-#define EADDRINUSE 48 /* Address already in use */
-#define EADDRNOTAVAIL 49 /* Can't assign requested address */
-
-/* ipc/network software -- operational errors */
-#define ENETDOWN 50 /* Network is down */
-#define ENETUNREACH 51 /* Network is unreachable */
-#define ENETRESET 52 /* Network dropped connection on reset */
-#define ECONNABORTED 53 /* Software caused connection abort */
-#define ECONNRESET 54 /* Connection reset by peer */
-#define ENOBUFS 55 /* No buffer space available */
-#define EISCONN 56 /* Socket is already connected */
-#define ENOTCONN 57 /* Socket is not connected */
-#define ESHUTDOWN 58 /* Can't send after socket shutdown */
-#define ETOOMANYREFS 59 /* Too many references: can't splice */
-#define ETIMEDOUT 60 /* Operation timed out */
-#define ECONNREFUSED 61 /* Connection refused */
-
-#define ELOOP 62 /* Too many levels of symbolic links */
-#endif /* _POSIX_SOURCE */
-#define ENAMETOOLONG 63 /* File name too long */
-
-/* should be rearranged */
-#ifndef _POSIX_SOURCE
-#define EHOSTDOWN 64 /* Host is down */
-#define EHOSTUNREACH 65 /* No route to host */
-#endif /* _POSIX_SOURCE */
-#define ENOTEMPTY 66 /* Directory not empty */
-
-/* quotas & mush */
-#ifndef _POSIX_SOURCE
-#define EPROCLIM 67 /* Too many processes */
-#define EUSERS 68 /* Too many users */
-#define EDQUOT 69 /* Disc quota exceeded */
-
-/* Network File System */
-#define ESTALE 70 /* Stale NFS file handle */
-#define EREMOTE 71 /* Too many levels of remote in path */
-#define EBADRPC 72 /* RPC struct is bad */
-#define ERPCMISMATCH 73 /* RPC version wrong */
-#define EPROGUNAVAIL 74 /* RPC prog. not avail */
-#define EPROGMISMATCH 75 /* Program version wrong */
-#define EPROCUNAVAIL 76 /* Bad procedure for program */
-#endif /* _POSIX_SOURCE */
-
-#define ENOLCK 77 /* No locks available */
-#define ENOSYS 78 /* Function not implemented */
-
-#ifndef _POSIX_SOURCE
-#define EFTYPE 79 /* Inappropriate file type or format */
-#define EAUTH 80 /* Authentication error */
-#define ENEEDAUTH 81 /* Need authenticator */
-#define EIDRM 82 /* Identifier removed */
-#define ENOMSG 83 /* No message of desired type */
-#define EOVERFLOW 84 /* Value too large to be stored in data type */
-#define ECANCELED 85 /* Operation canceled */
-#define EILSEQ 86 /* Illegal byte sequence */
-#define ENOATTR 87 /* Attribute not found */
-
-#define EDOOFUS 88 /* Programming error */
-#endif /* _POSIX_SOURCE */
-
-#define EBADMSG 89 /* Bad message */
-#define EMULTIHOP 90 /* Multihop attempted */
-#define ENOLINK 91 /* Link has been severed */
-#define EPROTO 92 /* Protocol error */
-
-#ifndef _POSIX_SOURCE
-#define ELAST 92 /* Must be equal largest errno */
-#endif /* _POSIX_SOURCE */
-
-#ifdef _KERNEL
-/* pseudo-errors returned inside kernel to modify return to process */
-#define ERESTART (-1) /* restart syscall */
-#define EJUSTRETURN (-2) /* don't modify regs, just return */
-#define ENOIOCTL (-3) /* ioctl not handled by this layer */
-#define EDIRIOCTL (-4) /* do direct ioctl in GEOM */
-#endif
-
-#endif
diff --git a/doc/legacy/errno.list.linux.txt b/doc/legacy/errno.list.linux.txt
deleted file mode 100644
index 3f3b18c46ec..00000000000
--- a/doc/legacy/errno.list.linux.txt
+++ /dev/null
@@ -1,1586 +0,0 @@
-#define ICONV_SUPPORTS_ERRNO 1
-#include <errno.h>
-/* Error constants. Linux specific version.
- Copyright (C) 1996, 1997, 1998, 1999, 2005 Free Software Foundation, Inc.
- This file is part of the GNU C Library.
-
- The GNU C Library is free software; you can redistribute it and/or
- modify it under the terms of the GNU Lesser General Public
- License as published by the Free Software Foundation; either
- version 2.1 of the License, or (at your option) any later version.
-
- The GNU C Library is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Lesser General Public License for more details.
-
- You should have received a copy of the GNU Lesser General Public
- License along with the GNU C Library; if not, write to the Free
- Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
- 02111-1307 USA. */
-
-#ifdef _ERRNO_H
-
-# undef EDOM
-# undef EILSEQ
-# undef ERANGE
-# include <linux/errno.h>
-
-/* Linux has no ENOTSUP error code. */
-# define ENOTSUP EOPNOTSUPP
-
-/* Older Linux versions also had no ECANCELED error code. */
-# ifndef ECANCELED
-# define ECANCELED 125
-# endif
-
-/* Support for error codes to support robust mutexes was added later, too. */
-# ifndef EOWNERDEAD
-# define EOWNERDEAD 130
-# define ENOTRECOVERABLE 131
-# endif
-
-# ifndef __ASSEMBLER__
-/* Function to get address of global `errno' variable. */
-extern int *__errno_location (void) __THROW __attribute__ ((__const__));
-
-# if !defined _LIBC || defined _LIBC_REENTRANT
-/* When using threads, errno is a per-thread value. */
-# define errno (*__errno_location ())
-# endif
-# endif /* !__ASSEMBLER__ */
-#endif /* _ERRNO_H */
-
-#if !defined _ERRNO_H && defined __need_Emath
-/* This is ugly but the kernel header is not clean enough. We must
- define only the values EDOM, EILSEQ and ERANGE in case __need_Emath is
- defined. */
-# define EDOM 33 /* Math argument out of domain of function. */
-# define EILSEQ 84 /* Illegal byte sequence. */
-# define ERANGE 34 /* Math result not representable. */
-#endif /* !_ERRNO_H && __need_Emath */
-/* Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef APR_ERRNO_H
-#define APR_ERRNO_H
-
-/**
- * @file apr_errno.h
- * @brief APR Error Codes
- */
-
-#include "apr.h"
-
-#if APR_HAVE_ERRNO_H
-#include <errno.h>
-#endif
-
-#ifdef __cplusplus
-extern "C" {
-#endif /* __cplusplus */
-
-/**
- * @defgroup apr_errno Error Codes
- * @ingroup APR
- * @{
- */
-
-/**
- * Type for specifying an error or status code.
- */
-typedef int apr_status_t;
-
-/**
- * Return a human readable string describing the specified error.
- * @param statcode The error code the get a string for.
- * @param buf A buffer to hold the error string.
- * @param bufsize Size of the buffer to hold the string.
- */
-APR_DECLARE(char *) apr_strerror(apr_status_t statcode, char *buf,
- apr_size_t bufsize);
-
-#if defined(DOXYGEN)
-/**
- * @def APR_FROM_OS_ERROR(os_err_type syserr)
- * Fold a platform specific error into an apr_status_t code.
- * @return apr_status_t
- * @param e The platform os error code.
- * @warning macro implementation; the syserr argument may be evaluated
- * multiple times.
- */
-#define APR_FROM_OS_ERROR(e) (e == 0 ? APR_SUCCESS : e + APR_OS_START_SYSERR)
-
-/**
- * @def APR_TO_OS_ERROR(apr_status_t statcode)
- * @return os_err_type
- * Fold an apr_status_t code back to the native platform defined error.
- * @param e The apr_status_t folded platform os error code.
- * @warning macro implementation; the statcode argument may be evaluated
- * multiple times. If the statcode was not created by apr_get_os_error
- * or APR_FROM_OS_ERROR, the results are undefined.
- */
-#define APR_TO_OS_ERROR(e) (e == 0 ? APR_SUCCESS : e - APR_OS_START_SYSERR)
-
-/** @def apr_get_os_error()
- * @return apr_status_t the last platform error, folded into apr_status_t, on most platforms
- * @remark This retrieves errno, or calls a GetLastError() style function, and
- * folds it with APR_FROM_OS_ERROR. Some platforms (such as OS2) have no
- * such mechanism, so this call may be unsupported. Do NOT use this
- * call for socket errors from socket, send, recv etc!
- */
-
-/** @def apr_set_os_error(e)
- * Reset the last platform error, unfolded from an apr_status_t, on some platforms
- * @param e The OS error folded in a prior call to APR_FROM_OS_ERROR()
- * @warning This is a macro implementation; the statcode argument may be evaluated
- * multiple times. If the statcode was not created by apr_get_os_error
- * or APR_FROM_OS_ERROR, the results are undefined. This macro sets
- * errno, or calls a SetLastError() style function, unfolding statcode
- * with APR_TO_OS_ERROR. Some platforms (such as OS2) have no such
- * mechanism, so this call may be unsupported.
- */
-
-/** @def apr_get_netos_error()
- * Return the last socket error, folded into apr_status_t, on all platforms
- * @remark This retrieves errno or calls a GetLastSocketError() style function,
- * and folds it with APR_FROM_OS_ERROR.
- */
-
-/** @def apr_set_netos_error(e)
- * Reset the last socket error, unfolded from an apr_status_t
- * @param e The socket error folded in a prior call to APR_FROM_OS_ERROR()
- * @warning This is a macro implementation; the statcode argument may be evaluated
- * multiple times. If the statcode was not created by apr_get_os_error
- * or APR_FROM_OS_ERROR, the results are undefined. This macro sets
- * errno, or calls a WSASetLastError() style function, unfolding
- * socketcode with APR_TO_OS_ERROR.
- */
-
-#endif /* defined(DOXYGEN) */
-
-/**
- * APR_OS_START_ERROR is where the APR specific error values start.
- */
-#define APR_OS_START_ERROR 20000
-/**
- * APR_OS_ERRSPACE_SIZE is the maximum number of errors you can fit
- * into one of the error/status ranges below -- except for
- * APR_OS_START_USERERR, which see.
- */
-#define APR_OS_ERRSPACE_SIZE 50000
-/**
- * APR_OS_START_STATUS is where the APR specific status codes start.
- */
-#define APR_OS_START_STATUS (APR_OS_START_ERROR + APR_OS_ERRSPACE_SIZE)
-/**
- * APR_OS_START_USERERR are reserved for applications that use APR that
- * layer their own error codes along with APR's. Note that the
- * error immediately following this one is set ten times farther
- * away than usual, so that users of apr have a lot of room in
- * which to declare custom error codes.
- */
-#define APR_OS_START_USERERR (APR_OS_START_STATUS + APR_OS_ERRSPACE_SIZE)
-/**
- * APR_OS_START_USEERR is obsolete, defined for compatibility only.
- * Use APR_OS_START_USERERR instead.
- */
-#define APR_OS_START_USEERR APR_OS_START_USERERR
-/**
- * APR_OS_START_CANONERR is where APR versions of errno values are defined
- * on systems which don't have the corresponding errno.
- */
-#define APR_OS_START_CANONERR (APR_OS_START_USERERR \
- + (APR_OS_ERRSPACE_SIZE * 10))
-/**
- * APR_OS_START_EAIERR folds EAI_ error codes from getaddrinfo() into
- * apr_status_t values.
- */
-#define APR_OS_START_EAIERR (APR_OS_START_CANONERR + APR_OS_ERRSPACE_SIZE)
-/**
- * APR_OS_START_SYSERR folds platform-specific system error values into
- * apr_status_t values.
- */
-#define APR_OS_START_SYSERR (APR_OS_START_EAIERR + APR_OS_ERRSPACE_SIZE)
-
-/** no error. */
-#define APR_SUCCESS 0
-
-/**
- * @defgroup APR_Error APR Error Values
- * <PRE>
- * <b>APR ERROR VALUES</b>
- * APR_ENOSTAT APR was unable to perform a stat on the file
- * APR_ENOPOOL APR was not provided a pool with which to allocate memory
- * APR_EBADDATE APR was given an invalid date
- * APR_EINVALSOCK APR was given an invalid socket
- * APR_ENOPROC APR was not given a process structure
- * APR_ENOTIME APR was not given a time structure
- * APR_ENODIR APR was not given a directory structure
- * APR_ENOLOCK APR was not given a lock structure
- * APR_ENOPOLL APR was not given a poll structure
- * APR_ENOSOCKET APR was not given a socket
- * APR_ENOTHREAD APR was not given a thread structure
- * APR_ENOTHDKEY APR was not given a thread key structure
- * APR_ENOSHMAVAIL There is no more shared memory available
- * APR_EDSOOPEN APR was unable to open the dso object. For more
- * information call apr_dso_error().
- * APR_EGENERAL General failure (specific information not available)
- * APR_EBADIP The specified IP address is invalid
- * APR_EBADMASK The specified netmask is invalid
- * APR_ESYMNOTFOUND Could not find the requested symbol
- * </PRE>
- *
- * <PRE>
- * <b>APR STATUS VALUES</b>
- * APR_INCHILD Program is currently executing in the child
- * APR_INPARENT Program is currently executing in the parent
- * APR_DETACH The thread is detached
- * APR_NOTDETACH The thread is not detached
- * APR_CHILD_DONE The child has finished executing
- * APR_CHILD_NOTDONE The child has not finished executing
- * APR_TIMEUP The operation did not finish before the timeout
- * APR_INCOMPLETE The operation was incomplete although some processing
- * was performed and the results are partially valid
- * APR_BADCH Getopt found an option not in the option string
- * APR_BADARG Getopt found an option that is missing an argument
- * and an argument was specified in the option string
- * APR_EOF APR has encountered the end of the file
- * APR_NOTFOUND APR was unable to find the socket in the poll structure
- * APR_ANONYMOUS APR is using anonymous shared memory
- * APR_FILEBASED APR is using a file name as the key to the shared memory
- * APR_KEYBASED APR is using a shared key as the key to the shared memory
- * APR_EINIT Ininitalizer value. If no option has been found, but
- * the status variable requires a value, this should be used
- * APR_ENOTIMPL The APR function has not been implemented on this
- * platform, either because nobody has gotten to it yet,
- * or the function is impossible on this platform.
- * APR_EMISMATCH Two passwords do not match.
- * APR_EABSOLUTE The given path was absolute.
- * APR_ERELATIVE The given path was relative.
- * APR_EINCOMPLETE The given path was neither relative nor absolute.
- * APR_EABOVEROOT The given path was above the root path.
- * APR_EBUSY The given lock was busy.
- * APR_EPROC_UNKNOWN The given process wasn't recognized by APR
- * </PRE>
- * @{
- */
-/** @see APR_STATUS_IS_ENOSTAT */
-#define APR_ENOSTAT (APR_OS_START_ERROR + 1)
-/** @see APR_STATUS_IS_ENOPOOL */
-#define APR_ENOPOOL (APR_OS_START_ERROR + 2)
-/* empty slot: +3 */
-/** @see APR_STATUS_IS_EBADDATE */
-#define APR_EBADDATE (APR_OS_START_ERROR + 4)
-/** @see APR_STATUS_IS_EINVALSOCK */
-#define APR_EINVALSOCK (APR_OS_START_ERROR + 5)
-/** @see APR_STATUS_IS_ENOPROC */
-#define APR_ENOPROC (APR_OS_START_ERROR + 6)
-/** @see APR_STATUS_IS_ENOTIME */
-#define APR_ENOTIME (APR_OS_START_ERROR + 7)
-/** @see APR_STATUS_IS_ENODIR */
-#define APR_ENODIR (APR_OS_START_ERROR + 8)
-/** @see APR_STATUS_IS_ENOLOCK */
-#define APR_ENOLOCK (APR_OS_START_ERROR + 9)
-/** @see APR_STATUS_IS_ENOPOLL */
-#define APR_ENOPOLL (APR_OS_START_ERROR + 10)
-/** @see APR_STATUS_IS_ENOSOCKET */
-#define APR_ENOSOCKET (APR_OS_START_ERROR + 11)
-/** @see APR_STATUS_IS_ENOTHREAD */
-#define APR_ENOTHREAD (APR_OS_START_ERROR + 12)
-/** @see APR_STATUS_IS_ENOTHDKEY */
-#define APR_ENOTHDKEY (APR_OS_START_ERROR + 13)
-/** @see APR_STATUS_IS_EGENERAL */
-#define APR_EGENERAL (APR_OS_START_ERROR + 14)
-/** @see APR_STATUS_IS_ENOSHMAVAIL */
-#define APR_ENOSHMAVAIL (APR_OS_START_ERROR + 15)
-/** @see APR_STATUS_IS_EBADIP */
-#define APR_EBADIP (APR_OS_START_ERROR + 16)
-/** @see APR_STATUS_IS_EBADMASK */
-#define APR_EBADMASK (APR_OS_START_ERROR + 17)
-/* empty slot: +18 */
-/** @see APR_STATUS_IS_EDSOPEN */
-#define APR_EDSOOPEN (APR_OS_START_ERROR + 19)
-/** @see APR_STATUS_IS_EABSOLUTE */
-#define APR_EABSOLUTE (APR_OS_START_ERROR + 20)
-/** @see APR_STATUS_IS_ERELATIVE */
-#define APR_ERELATIVE (APR_OS_START_ERROR + 21)
-/** @see APR_STATUS_IS_EINCOMPLETE */
-#define APR_EINCOMPLETE (APR_OS_START_ERROR + 22)
-/** @see APR_STATUS_IS_EABOVEROOT */
-#define APR_EABOVEROOT (APR_OS_START_ERROR + 23)
-/** @see APR_STATUS_IS_EBADPATH */
-#define APR_EBADPATH (APR_OS_START_ERROR + 24)
-/** @see APR_STATUS_IS_EPATHWILD */
-#define APR_EPATHWILD (APR_OS_START_ERROR + 25)
-/** @see APR_STATUS_IS_ESYMNOTFOUND */
-#define APR_ESYMNOTFOUND (APR_OS_START_ERROR + 26)
-/** @see APR_STATUS_IS_EPROC_UNKNOWN */
-#define APR_EPROC_UNKNOWN (APR_OS_START_ERROR + 27)
-/** @see APR_STATUS_IS_ENOTENOUGHENTROPY */
-#define APR_ENOTENOUGHENTROPY (APR_OS_START_ERROR + 28)
-/** @} */
-
-/**
- * @defgroup APR_STATUS_IS Status Value Tests
- * @warning For any particular error condition, more than one of these tests
- * may match. This is because platform-specific error codes may not
- * always match the semantics of the POSIX codes these tests (and the
- * corresponding APR error codes) are named after. A notable example
- * are the APR_STATUS_IS_ENOENT and APR_STATUS_IS_ENOTDIR tests on
- * Win32 platforms. The programmer should always be aware of this and
- * adjust the order of the tests accordingly.
- * @{
- */
-/**
- * APR was unable to perform a stat on the file
- * @warning always use this test, as platform-specific variances may meet this
- * more than one error code
- */
-#define APR_STATUS_IS_ENOSTAT(s) ((s) == APR_ENOSTAT)
-/**
- * APR was not provided a pool with which to allocate memory
- * @warning always use this test, as platform-specific variances may meet this
- * more than one error code
- */
-#define APR_STATUS_IS_ENOPOOL(s) ((s) == APR_ENOPOOL)
-/** APR was given an invalid date */
-#define APR_STATUS_IS_EBADDATE(s) ((s) == APR_EBADDATE)
-/** APR was given an invalid socket */
-#define APR_STATUS_IS_EINVALSOCK(s) ((s) == APR_EINVALSOCK)
-/** APR was not given a process structure */
-#define APR_STATUS_IS_ENOPROC(s) ((s) == APR_ENOPROC)
-/** APR was not given a time structure */
-#define APR_STATUS_IS_ENOTIME(s) ((s) == APR_ENOTIME)
-/** APR was not given a directory structure */
-#define APR_STATUS_IS_ENODIR(s) ((s) == APR_ENODIR)
-/** APR was not given a lock structure */
-#define APR_STATUS_IS_ENOLOCK(s) ((s) == APR_ENOLOCK)
-/** APR was not given a poll structure */
-#define APR_STATUS_IS_ENOPOLL(s) ((s) == APR_ENOPOLL)
-/** APR was not given a socket */
-#define APR_STATUS_IS_ENOSOCKET(s) ((s) == APR_ENOSOCKET)
-/** APR was not given a thread structure */
-#define APR_STATUS_IS_ENOTHREAD(s) ((s) == APR_ENOTHREAD)
-/** APR was not given a thread key structure */
-#define APR_STATUS_IS_ENOTHDKEY(s) ((s) == APR_ENOTHDKEY)
-/** Generic Error which can not be put into another spot */
-#define APR_STATUS_IS_EGENERAL(s) ((s) == APR_EGENERAL)
-/** There is no more shared memory available */
-#define APR_STATUS_IS_ENOSHMAVAIL(s) ((s) == APR_ENOSHMAVAIL)
-/** The specified IP address is invalid */
-#define APR_STATUS_IS_EBADIP(s) ((s) == APR_EBADIP)
-/** The specified netmask is invalid */
-#define APR_STATUS_IS_EBADMASK(s) ((s) == APR_EBADMASK)
-/* empty slot: +18 */
-/**
- * APR was unable to open the dso object.
- * For more information call apr_dso_error().
- */
-#if defined(WIN32)
-#define APR_STATUS_IS_EDSOOPEN(s) ((s) == APR_EDSOOPEN \
- || APR_TO_OS_ERROR(s) == ERROR_MOD_NOT_FOUND)
-#else
-#define APR_STATUS_IS_EDSOOPEN(s) ((s) == APR_EDSOOPEN)
-#endif
-/** The given path was absolute. */
-#define APR_STATUS_IS_EABSOLUTE(s) ((s) == APR_EABSOLUTE)
-/** The given path was relative. */
-#define APR_STATUS_IS_ERELATIVE(s) ((s) == APR_ERELATIVE)
-/** The given path was neither relative nor absolute. */
-#define APR_STATUS_IS_EINCOMPLETE(s) ((s) == APR_EINCOMPLETE)
-/** The given path was above the root path. */
-#define APR_STATUS_IS_EABOVEROOT(s) ((s) == APR_EABOVEROOT)
-/** The given path was bad. */
-#define APR_STATUS_IS_EBADPATH(s) ((s) == APR_EBADPATH)
-/** The given path contained wildcards. */
-#define APR_STATUS_IS_EPATHWILD(s) ((s) == APR_EPATHWILD)
-/** Could not find the requested symbol.
- * For more information call apr_dso_error().
- */
-#if defined(WIN32)
-#define APR_STATUS_IS_ESYMNOTFOUND(s) ((s) == APR_ESYMNOTFOUND \
- || APR_TO_OS_ERROR(s) == ERROR_PROC_NOT_FOUND)
-#else
-#define APR_STATUS_IS_ESYMNOTFOUND(s) ((s) == APR_ESYMNOTFOUND)
-#endif
-/** The given process was not recognized by APR. */
-#define APR_STATUS_IS_EPROC_UNKNOWN(s) ((s) == APR_EPROC_UNKNOWN)
-
-/** APR could not gather enough entropy to continue. */
-#define APR_STATUS_IS_ENOTENOUGHENTROPY(s) ((s) == APR_ENOTENOUGHENTROPY)
-
-/** @} */
-
-/**
- * @addtogroup APR_Error
- * @{
- */
-/** @see APR_STATUS_IS_INCHILD */
-#define APR_INCHILD (APR_OS_START_STATUS + 1)
-/** @see APR_STATUS_IS_INPARENT */
-#define APR_INPARENT (APR_OS_START_STATUS + 2)
-/** @see APR_STATUS_IS_DETACH */
-#define APR_DETACH (APR_OS_START_STATUS + 3)
-/** @see APR_STATUS_IS_NOTDETACH */
-#define APR_NOTDETACH (APR_OS_START_STATUS + 4)
-/** @see APR_STATUS_IS_CHILD_DONE */
-#define APR_CHILD_DONE (APR_OS_START_STATUS + 5)
-/** @see APR_STATUS_IS_CHILD_NOTDONE */
-#define APR_CHILD_NOTDONE (APR_OS_START_STATUS + 6)
-/** @see APR_STATUS_IS_TIMEUP */
-#define APR_TIMEUP (APR_OS_START_STATUS + 7)
-/** @see APR_STATUS_IS_INCOMPLETE */
-#define APR_INCOMPLETE (APR_OS_START_STATUS + 8)
-/* empty slot: +9 */
-/* empty slot: +10 */
-/* empty slot: +11 */
-/** @see APR_STATUS_IS_BADCH */
-#define APR_BADCH (APR_OS_START_STATUS + 12)
-/** @see APR_STATUS_IS_BADARG */
-#define APR_BADARG (APR_OS_START_STATUS + 13)
-/** @see APR_STATUS_IS_EOF */
-#define APR_EOF (APR_OS_START_STATUS + 14)
-/** @see APR_STATUS_IS_NOTFOUND */
-#define APR_NOTFOUND (APR_OS_START_STATUS + 15)
-/* empty slot: +16 */
-/* empty slot: +17 */
-/* empty slot: +18 */
-/** @see APR_STATUS_IS_ANONYMOUS */
-#define APR_ANONYMOUS (APR_OS_START_STATUS + 19)
-/** @see APR_STATUS_IS_FILEBASED */
-#define APR_FILEBASED (APR_OS_START_STATUS + 20)
-/** @see APR_STATUS_IS_KEYBASED */
-#define APR_KEYBASED (APR_OS_START_STATUS + 21)
-/** @see APR_STATUS_IS_EINIT */
-#define APR_EINIT (APR_OS_START_STATUS + 22)
-/** @see APR_STATUS_IS_ENOTIMPL */
-#define APR_ENOTIMPL (APR_OS_START_STATUS + 23)
-/** @see APR_STATUS_IS_EMISMATCH */
-#define APR_EMISMATCH (APR_OS_START_STATUS + 24)
-/** @see APR_STATUS_IS_EBUSY */
-#define APR_EBUSY (APR_OS_START_STATUS + 25)
-/** @} */
-
-/**
- * @addtogroup APR_STATUS_IS
- * @{
- */
-/**
- * Program is currently executing in the child
- * @warning
- * always use this test, as platform-specific variances may meet this
- * more than one error code */
-#define APR_STATUS_IS_INCHILD(s) ((s) == APR_INCHILD)
-/**
- * Program is currently executing in the parent
- * @warning
- * always use this test, as platform-specific variances may meet this
- * more than one error code
- */
-#define APR_STATUS_IS_INPARENT(s) ((s) == APR_INPARENT)
-/**
- * The thread is detached
- * @warning
- * always use this test, as platform-specific variances may meet this
- * more than one error code
- */
-#define APR_STATUS_IS_DETACH(s) ((s) == APR_DETACH)
-/**
- * The thread is not detached
- * @warning
- * always use this test, as platform-specific variances may meet this
- * more than one error code
- */
-#define APR_STATUS_IS_NOTDETACH(s) ((s) == APR_NOTDETACH)
-/**
- * The child has finished executing
- * @warning
- * always use this test, as platform-specific variances may meet this
- * more than one error code
- */
-#define APR_STATUS_IS_CHILD_DONE(s) ((s) == APR_CHILD_DONE)
-/**
- * The child has not finished executing
- * @warning
- * always use this test, as platform-specific variances may meet this
- * more than one error code
- */
-#define APR_STATUS_IS_CHILD_NOTDONE(s) ((s) == APR_CHILD_NOTDONE)
-/**
- * The operation did not finish before the timeout
- * @warning
- * always use this test, as platform-specific variances may meet this
- * more than one error code
- */
-#define APR_STATUS_IS_TIMEUP(s) ((s) == APR_TIMEUP)
-/**
- * The operation was incomplete although some processing was performed
- * and the results are partially valid.
- * @warning
- * always use this test, as platform-specific variances may meet this
- * more than one error code
- */
-#define APR_STATUS_IS_INCOMPLETE(s) ((s) == APR_INCOMPLETE)
-/* empty slot: +9 */
-/* empty slot: +10 */
-/* empty slot: +11 */
-/**
- * Getopt found an option not in the option string
- * @warning
- * always use this test, as platform-specific variances may meet this
- * more than one error code
- */
-#define APR_STATUS_IS_BADCH(s) ((s) == APR_BADCH)
-/**
- * Getopt found an option not in the option string and an argument was
- * specified in the option string
- * @warning
- * always use this test, as platform-specific variances may meet this
- * more than one error code
- */
-#define APR_STATUS_IS_BADARG(s) ((s) == APR_BADARG)
-/**
- * APR has encountered the end of the file
- * @warning
- * always use this test, as platform-specific variances may meet this
- * more than one error code
- */
-#define APR_STATUS_IS_EOF(s) ((s) == APR_EOF)
-/**
- * APR was unable to find the socket in the poll structure
- * @warning
- * always use this test, as platform-specific variances may meet this
- * more than one error code
- */
-#define APR_STATUS_IS_NOTFOUND(s) ((s) == APR_NOTFOUND)
-/* empty slot: +16 */
-/* empty slot: +17 */
-/* empty slot: +18 */
-/**
- * APR is using anonymous shared memory
- * @warning
- * always use this test, as platform-specific variances may meet this
- * more than one error code
- */
-#define APR_STATUS_IS_ANONYMOUS(s) ((s) == APR_ANONYMOUS)
-/**
- * APR is using a file name as the key to the shared memory
- * @warning
- * always use this test, as platform-specific variances may meet this
- * more than one error code
- */
-#define APR_STATUS_IS_FILEBASED(s) ((s) == APR_FILEBASED)
-/**
- * APR is using a shared key as the key to the shared memory
- * @warning
- * always use this test, as platform-specific variances may meet this
- * more than one error code
- */
-#define APR_STATUS_IS_KEYBASED(s) ((s) == APR_KEYBASED)
-/**
- * Ininitalizer value. If no option has been found, but
- * the status variable requires a value, this should be used
- * @warning
- * always use this test, as platform-specific variances may meet this
- * more than one error code
- */
-#define APR_STATUS_IS_EINIT(s) ((s) == APR_EINIT)
-/**
- * The APR function has not been implemented on this
- * platform, either because nobody has gotten to it yet,
- * or the function is impossible on this platform.
- * @warning
- * always use this test, as platform-specific variances may meet this
- * more than one error code
- */
-#define APR_STATUS_IS_ENOTIMPL(s) ((s) == APR_ENOTIMPL)
-/**
- * Two passwords do not match.
- * @warning
- * always use this test, as platform-specific variances may meet this
- * more than one error code
- */
-#define APR_STATUS_IS_EMISMATCH(s) ((s) == APR_EMISMATCH)
-/**
- * The given lock was busy
- * @warning always use this test, as platform-specific variances may meet this
- * more than one error code
- */
-#define APR_STATUS_IS_EBUSY(s) ((s) == APR_EBUSY)
-
-/** @} */
-
-/**
- * @addtogroup APR_Error APR Error Values
- * @{
- */
-/* APR CANONICAL ERROR VALUES */
-/** @see APR_STATUS_IS_EACCES */
-#ifdef EACCES
-#define APR_EACCES EACCES
-#else
-#define APR_EACCES (APR_OS_START_CANONERR + 1)
-#endif
-
-/** @see APR_STATUS_IS_EXIST */
-#ifdef EEXIST
-#define APR_EEXIST EEXIST
-#else
-#define APR_EEXIST (APR_OS_START_CANONERR + 2)
-#endif
-
-/** @see APR_STATUS_IS_ENAMETOOLONG */
-#ifdef ENAMETOOLONG
-#define APR_ENAMETOOLONG ENAMETOOLONG
-#else
-#define APR_ENAMETOOLONG (APR_OS_START_CANONERR + 3)
-#endif
-
-/** @see APR_STATUS_IS_ENOENT */
-#ifdef ENOENT
-#define APR_ENOENT ENOENT
-#else
-#define APR_ENOENT (APR_OS_START_CANONERR + 4)
-#endif
-
-/** @see APR_STATUS_IS_ENOTDIR */
-#ifdef ENOTDIR
-#define APR_ENOTDIR ENOTDIR
-#else
-#define APR_ENOTDIR (APR_OS_START_CANONERR + 5)
-#endif
-
-/** @see APR_STATUS_IS_ENOSPC */
-#ifdef ENOSPC
-#define APR_ENOSPC ENOSPC
-#else
-#define APR_ENOSPC (APR_OS_START_CANONERR + 6)
-#endif
-
-/** @see APR_STATUS_IS_ENOMEM */
-#ifdef ENOMEM
-#define APR_ENOMEM ENOMEM
-#else
-#define APR_ENOMEM (APR_OS_START_CANONERR + 7)
-#endif
-
-/** @see APR_STATUS_IS_EMFILE */
-#ifdef EMFILE
-#define APR_EMFILE EMFILE
-#else
-#define APR_EMFILE (APR_OS_START_CANONERR + 8)
-#endif
-
-/** @see APR_STATUS_IS_ENFILE */
-#ifdef ENFILE
-#define APR_ENFILE ENFILE
-#else
-#define APR_ENFILE (APR_OS_START_CANONERR + 9)
-#endif
-
-/** @see APR_STATUS_IS_EBADF */
-#ifdef EBADF
-#define APR_EBADF EBADF
-#else
-#define APR_EBADF (APR_OS_START_CANONERR + 10)
-#endif
-
-/** @see APR_STATUS_IS_EINVAL */
-#ifdef EINVAL
-#define APR_EINVAL EINVAL
-#else
-#define APR_EINVAL (APR_OS_START_CANONERR + 11)
-#endif
-
-/** @see APR_STATUS_IS_ESPIPE */
-#ifdef ESPIPE
-#define APR_ESPIPE ESPIPE
-#else
-#define APR_ESPIPE (APR_OS_START_CANONERR + 12)
-#endif
-
-/**
- * @see APR_STATUS_IS_EAGAIN
- * @warning use APR_STATUS_IS_EAGAIN instead of just testing this value
- */
-#ifdef EAGAIN
-#define APR_EAGAIN EAGAIN
-#elif defined(EWOULDBLOCK)
-#define APR_EAGAIN EWOULDBLOCK
-#else
-#define APR_EAGAIN (APR_OS_START_CANONERR + 13)
-#endif
-
-/** @see APR_STATUS_IS_EINTR */
-#ifdef EINTR
-#define APR_EINTR EINTR
-#else
-#define APR_EINTR (APR_OS_START_CANONERR + 14)
-#endif
-
-/** @see APR_STATUS_IS_ENOTSOCK */
-#ifdef ENOTSOCK
-#define APR_ENOTSOCK ENOTSOCK
-#else
-#define APR_ENOTSOCK (APR_OS_START_CANONERR + 15)
-#endif
-
-/** @see APR_STATUS_IS_ECONNREFUSED */
-#ifdef ECONNREFUSED
-#define APR_ECONNREFUSED ECONNREFUSED
-#else
-#define APR_ECONNREFUSED (APR_OS_START_CANONERR + 16)
-#endif
-
-/** @see APR_STATUS_IS_EINPROGRESS */
-#ifdef EINPROGRESS
-#define APR_EINPROGRESS EINPROGRESS
-#else
-#define APR_EINPROGRESS (APR_OS_START_CANONERR + 17)
-#endif
-
-/**
- * @see APR_STATUS_IS_ECONNABORTED
- * @warning use APR_STATUS_IS_ECONNABORTED instead of just testing this value
- */
-
-#ifdef ECONNABORTED
-#define APR_ECONNABORTED ECONNABORTED
-#else
-#define APR_ECONNABORTED (APR_OS_START_CANONERR + 18)
-#endif
-
-/** @see APR_STATUS_IS_ECONNRESET */
-#ifdef ECONNRESET
-#define APR_ECONNRESET ECONNRESET
-#else
-#define APR_ECONNRESET (APR_OS_START_CANONERR + 19)
-#endif
-
-/** @see APR_STATUS_IS_ETIMEDOUT
- * @deprecated */
-#ifdef ETIMEDOUT
-#define APR_ETIMEDOUT ETIMEDOUT
-#else
-#define APR_ETIMEDOUT (APR_OS_START_CANONERR + 20)
-#endif
-
-/** @see APR_STATUS_IS_EHOSTUNREACH */
-#ifdef EHOSTUNREACH
-#define APR_EHOSTUNREACH EHOSTUNREACH
-#else
-#define APR_EHOSTUNREACH (APR_OS_START_CANONERR + 21)
-#endif
-
-/** @see APR_STATUS_IS_ENETUNREACH */
-#ifdef ENETUNREACH
-#define APR_ENETUNREACH ENETUNREACH
-#else
-#define APR_ENETUNREACH (APR_OS_START_CANONERR + 22)
-#endif
-
-/** @see APR_STATUS_IS_EFTYPE */
-#ifdef EFTYPE
-#define APR_EFTYPE EFTYPE
-#else
-#define APR_EFTYPE (APR_OS_START_CANONERR + 23)
-#endif
-
-/** @see APR_STATUS_IS_EPIPE */
-#ifdef EPIPE
-#define APR_EPIPE EPIPE
-#else
-#define APR_EPIPE (APR_OS_START_CANONERR + 24)
-#endif
-
-/** @see APR_STATUS_IS_EXDEV */
-#ifdef EXDEV
-#define APR_EXDEV EXDEV
-#else
-#define APR_EXDEV (APR_OS_START_CANONERR + 25)
-#endif
-
-/** @see APR_STATUS_IS_ENOTEMPTY */
-#ifdef ENOTEMPTY
-#define APR_ENOTEMPTY ENOTEMPTY
-#else
-#define APR_ENOTEMPTY (APR_OS_START_CANONERR + 26)
-#endif
-
-/** @} */
-
-#if defined(OS2) && !defined(DOXYGEN)
-
-#define APR_FROM_OS_ERROR(e) (e == 0 ? APR_SUCCESS : e + APR_OS_START_SYSERR)
-#define APR_TO_OS_ERROR(e) (e == 0 ? APR_SUCCESS : e - APR_OS_START_SYSERR)
-
-#define INCL_DOSERRORS
-#define INCL_DOS
-
-/* Leave these undefined.
- * OS2 doesn't rely on the errno concept.
- * The API calls always return a result codes which
- * should be filtered through APR_FROM_OS_ERROR().
- *
- * #define apr_get_os_error() (APR_FROM_OS_ERROR(GetLastError()))
- * #define apr_set_os_error(e) (SetLastError(APR_TO_OS_ERROR(e)))
- */
-
-/* A special case, only socket calls require this;
- */
-#define apr_get_netos_error() (APR_FROM_OS_ERROR(errno))
-#define apr_set_netos_error(e) (errno = APR_TO_OS_ERROR(e))
-
-/* And this needs to be greped away for good:
- */
-#define APR_OS2_STATUS(e) (APR_FROM_OS_ERROR(e))
-
-/* These can't sit in a private header, so in spite of the extra size,
- * they need to be made available here.
- */
-#define SOCBASEERR 10000
-#define SOCEPERM (SOCBASEERR+1) /* Not owner */
-#define SOCESRCH (SOCBASEERR+3) /* No such process */
-#define SOCEINTR (SOCBASEERR+4) /* Interrupted system call */
-#define SOCENXIO (SOCBASEERR+6) /* No such device or address */
-#define SOCEBADF (SOCBASEERR+9) /* Bad file number */
-#define SOCEACCES (SOCBASEERR+13) /* Permission denied */
-#define SOCEFAULT (SOCBASEERR+14) /* Bad address */
-#define SOCEINVAL (SOCBASEERR+22) /* Invalid argument */
-#define SOCEMFILE (SOCBASEERR+24) /* Too many open files */
-#define SOCEPIPE (SOCBASEERR+32) /* Broken pipe */
-#define SOCEOS2ERR (SOCBASEERR+100) /* OS/2 Error */
-#define SOCEWOULDBLOCK (SOCBASEERR+35) /* Operation would block */
-#define SOCEINPROGRESS (SOCBASEERR+36) /* Operation now in progress */
-#define SOCEALREADY (SOCBASEERR+37) /* Operation already in progress */
-#define SOCENOTSOCK (SOCBASEERR+38) /* Socket operation on non-socket */
-#define SOCEDESTADDRREQ (SOCBASEERR+39) /* Destination address required */
-#define SOCEMSGSIZE (SOCBASEERR+40) /* Message too long */
-#define SOCEPROTOTYPE (SOCBASEERR+41) /* Protocol wrong type for socket */
-#define SOCENOPROTOOPT (SOCBASEERR+42) /* Protocol not available */
-#define SOCEPROTONOSUPPORT (SOCBASEERR+43) /* Protocol not supported */
-#define SOCESOCKTNOSUPPORT (SOCBASEERR+44) /* Socket type not supported */
-#define SOCEOPNOTSUPP (SOCBASEERR+45) /* Operation not supported on socket */
-#define SOCEPFNOSUPPORT (SOCBASEERR+46) /* Protocol family not supported */
-#define SOCEAFNOSUPPORT (SOCBASEERR+47) /* Address family not supported by protocol family */
-#define SOCEADDRINUSE (SOCBASEERR+48) /* Address already in use */
-#define SOCEADDRNOTAVAIL (SOCBASEERR+49) /* Can't assign requested address */
-#define SOCENETDOWN (SOCBASEERR+50) /* Network is down */
-#define SOCENETUNREACH (SOCBASEERR+51) /* Network is unreachable */
-#define SOCENETRESET (SOCBASEERR+52) /* Network dropped connection on reset */
-#define SOCECONNABORTED (SOCBASEERR+53) /* Software caused connection abort */
-#define SOCECONNRESET (SOCBASEERR+54) /* Connection reset by peer */
-#define SOCENOBUFS (SOCBASEERR+55) /* No buffer space available */
-#define SOCEISCONN (SOCBASEERR+56) /* Socket is already connected */
-#define SOCENOTCONN (SOCBASEERR+57) /* Socket is not connected */
-#define SOCESHUTDOWN (SOCBASEERR+58) /* Can't send after socket shutdown */
-#define SOCETOOMANYREFS (SOCBASEERR+59) /* Too many references: can't splice */
-#define SOCETIMEDOUT (SOCBASEERR+60) /* Connection timed out */
-#define SOCECONNREFUSED (SOCBASEERR+61) /* Connection refused */
-#define SOCELOOP (SOCBASEERR+62) /* Too many levels of symbolic links */
-#define SOCENAMETOOLONG (SOCBASEERR+63) /* File name too long */
-#define SOCEHOSTDOWN (SOCBASEERR+64) /* Host is down */
-#define SOCEHOSTUNREACH (SOCBASEERR+65) /* No route to host */
-#define SOCENOTEMPTY (SOCBASEERR+66) /* Directory not empty */
-
-/* APR CANONICAL ERROR TESTS */
-#define APR_STATUS_IS_EACCES(s) ((s) == APR_EACCES \
- || (s) == APR_OS_START_SYSERR + ERROR_ACCESS_DENIED \
- || (s) == APR_OS_START_SYSERR + ERROR_SHARING_VIOLATION)
-#define APR_STATUS_IS_EEXIST(s) ((s) == APR_EEXIST \
- || (s) == APR_OS_START_SYSERR + ERROR_OPEN_FAILED \
- || (s) == APR_OS_START_SYSERR + ERROR_FILE_EXISTS \
- || (s) == APR_OS_START_SYSERR + ERROR_ALREADY_EXISTS \
- || (s) == APR_OS_START_SYSERR + ERROR_ACCESS_DENIED)
-#define APR_STATUS_IS_ENAMETOOLONG(s) ((s) == APR_ENAMETOOLONG \
- || (s) == APR_OS_START_SYSERR + ERROR_FILENAME_EXCED_RANGE \
- || (s) == APR_OS_START_SYSERR + SOCENAMETOOLONG)
-#define APR_STATUS_IS_ENOENT(s) ((s) == APR_ENOENT \
- || (s) == APR_OS_START_SYSERR + ERROR_FILE_NOT_FOUND \
- || (s) == APR_OS_START_SYSERR + ERROR_PATH_NOT_FOUND \
- || (s) == APR_OS_START_SYSERR + ERROR_NO_MORE_FILES \
- || (s) == APR_OS_START_SYSERR + ERROR_OPEN_FAILED)
-#define APR_STATUS_IS_ENOTDIR(s) ((s) == APR_ENOTDIR)
-#define APR_STATUS_IS_ENOSPC(s) ((s) == APR_ENOSPC \
- || (s) == APR_OS_START_SYSERR + ERROR_DISK_FULL)
-#define APR_STATUS_IS_ENOMEM(s) ((s) == APR_ENOMEM)
-#define APR_STATUS_IS_EMFILE(s) ((s) == APR_EMFILE \
- || (s) == APR_OS_START_SYSERR + ERROR_TOO_MANY_OPEN_FILES)
-#define APR_STATUS_IS_ENFILE(s) ((s) == APR_ENFILE)
-#define APR_STATUS_IS_EBADF(s) ((s) == APR_EBADF \
- || (s) == APR_OS_START_SYSERR + ERROR_INVALID_HANDLE)
-#define APR_STATUS_IS_EINVAL(s) ((s) == APR_EINVAL \
- || (s) == APR_OS_START_SYSERR + ERROR_INVALID_PARAMETER \
- || (s) == APR_OS_START_SYSERR + ERROR_INVALID_FUNCTION)
-#define APR_STATUS_IS_ESPIPE(s) ((s) == APR_ESPIPE \
- || (s) == APR_OS_START_SYSERR + ERROR_NEGATIVE_SEEK)
-#define APR_STATUS_IS_EAGAIN(s) ((s) == APR_EAGAIN \
- || (s) == APR_OS_START_SYSERR + ERROR_NO_DATA \
- || (s) == APR_OS_START_SYSERR + SOCEWOULDBLOCK \
- || (s) == APR_OS_START_SYSERR + ERROR_LOCK_VIOLATION)
-#define APR_STATUS_IS_EINTR(s) ((s) == APR_EINTR \
- || (s) == APR_OS_START_SYSERR + SOCEINTR)
-#define APR_STATUS_IS_ENOTSOCK(s) ((s) == APR_ENOTSOCK \
- || (s) == APR_OS_START_SYSERR + SOCENOTSOCK)
-#define APR_STATUS_IS_ECONNREFUSED(s) ((s) == APR_ECONNREFUSED \
- || (s) == APR_OS_START_SYSERR + SOCECONNREFUSED)
-#define APR_STATUS_IS_EINPROGRESS(s) ((s) == APR_EINPROGRESS \
- || (s) == APR_OS_START_SYSERR + SOCEINPROGRESS)
-#define APR_STATUS_IS_ECONNABORTED(s) ((s) == APR_ECONNABORTED \
- || (s) == APR_OS_START_SYSERR + SOCECONNABORTED)
-#define APR_STATUS_IS_ECONNRESET(s) ((s) == APR_ECONNRESET \
- || (s) == APR_OS_START_SYSERR + SOCECONNRESET)
-/* XXX deprecated */
-#define APR_STATUS_IS_ETIMEDOUT(s) ((s) == APR_ETIMEDOUT \
- || (s) == APR_OS_START_SYSERR + SOCETIMEDOUT)
-#undef APR_STATUS_IS_TIMEUP
-#define APR_STATUS_IS_TIMEUP(s) ((s) == APR_TIMEUP \
- || (s) == APR_OS_START_SYSERR + SOCETIMEDOUT)
-#define APR_STATUS_IS_EHOSTUNREACH(s) ((s) == APR_EHOSTUNREACH \
- || (s) == APR_OS_START_SYSERR + SOCEHOSTUNREACH)
-#define APR_STATUS_IS_ENETUNREACH(s) ((s) == APR_ENETUNREACH \
- || (s) == APR_OS_START_SYSERR + SOCENETUNREACH)
-#define APR_STATUS_IS_EFTYPE(s) ((s) == APR_EFTYPE)
-#define APR_STATUS_IS_EPIPE(s) ((s) == APR_EPIPE \
- || (s) == APR_OS_START_SYSERR + ERROR_BROKEN_PIPE \
- || (s) == APR_OS_START_SYSERR + SOCEPIPE)
-#define APR_STATUS_IS_EXDEV(s) ((s) == APR_EXDEV \
- || (s) == APR_OS_START_SYSERR + ERROR_NOT_SAME_DEVICE)
-#define APR_STATUS_IS_ENOTEMPTY(s) ((s) == APR_ENOTEMPTY \
- || (s) == APR_OS_START_SYSERR + ERROR_DIR_NOT_EMPTY \
- || (s) == APR_OS_START_SYSERR + ERROR_ACCESS_DENIED)
-
-/*
- Sorry, too tired to wrap this up for OS2... feel free to
- fit the following into their best matches.
-
- { ERROR_NO_SIGNAL_SENT, ESRCH },
- { SOCEALREADY, EALREADY },
- { SOCEDESTADDRREQ, EDESTADDRREQ },
- { SOCEMSGSIZE, EMSGSIZE },
- { SOCEPROTOTYPE, EPROTOTYPE },
- { SOCENOPROTOOPT, ENOPROTOOPT },
- { SOCEPROTONOSUPPORT, EPROTONOSUPPORT },
- { SOCESOCKTNOSUPPORT, ESOCKTNOSUPPORT },
- { SOCEOPNOTSUPP, EOPNOTSUPP },
- { SOCEPFNOSUPPORT, EPFNOSUPPORT },
- { SOCEAFNOSUPPORT, EAFNOSUPPORT },
- { SOCEADDRINUSE, EADDRINUSE },
- { SOCEADDRNOTAVAIL, EADDRNOTAVAIL },
- { SOCENETDOWN, ENETDOWN },
- { SOCENETRESET, ENETRESET },
- { SOCENOBUFS, ENOBUFS },
- { SOCEISCONN, EISCONN },
- { SOCENOTCONN, ENOTCONN },
- { SOCESHUTDOWN, ESHUTDOWN },
- { SOCETOOMANYREFS, ETOOMANYREFS },
- { SOCELOOP, ELOOP },
- { SOCEHOSTDOWN, EHOSTDOWN },
- { SOCENOTEMPTY, ENOTEMPTY },
- { SOCEPIPE, EPIPE }
-*/
-
-#elif defined(WIN32) && !defined(DOXYGEN) /* !defined(OS2) */
-
-#define APR_FROM_OS_ERROR(e) (e == 0 ? APR_SUCCESS : e + APR_OS_START_SYSERR)
-#define APR_TO_OS_ERROR(e) (e == 0 ? APR_SUCCESS : e - APR_OS_START_SYSERR)
-
-#define apr_get_os_error() (APR_FROM_OS_ERROR(GetLastError()))
-#define apr_set_os_error(e) (SetLastError(APR_TO_OS_ERROR(e)))
-
-/* A special case, only socket calls require this:
- */
-#define apr_get_netos_error() (APR_FROM_OS_ERROR(WSAGetLastError()))
-#define apr_set_netos_error(e) (WSASetLastError(APR_TO_OS_ERROR(e)))
-
-/* APR CANONICAL ERROR TESTS */
-#define APR_STATUS_IS_EACCES(s) ((s) == APR_EACCES \
- || (s) == APR_OS_START_SYSERR + ERROR_ACCESS_DENIED \
- || (s) == APR_OS_START_SYSERR + ERROR_CANNOT_MAKE \
- || (s) == APR_OS_START_SYSERR + ERROR_CURRENT_DIRECTORY \
- || (s) == APR_OS_START_SYSERR + ERROR_DRIVE_LOCKED \
- || (s) == APR_OS_START_SYSERR + ERROR_FAIL_I24 \
- || (s) == APR_OS_START_SYSERR + ERROR_LOCK_VIOLATION \
- || (s) == APR_OS_START_SYSERR + ERROR_LOCK_FAILED \
- || (s) == APR_OS_START_SYSERR + ERROR_NOT_LOCKED \
- || (s) == APR_OS_START_SYSERR + ERROR_NETWORK_ACCESS_DENIED \
- || (s) == APR_OS_START_SYSERR + ERROR_SHARING_VIOLATION)
-#define APR_STATUS_IS_EEXIST(s) ((s) == APR_EEXIST \
- || (s) == APR_OS_START_SYSERR + ERROR_FILE_EXISTS \
- || (s) == APR_OS_START_SYSERR + ERROR_ALREADY_EXISTS)
-#define APR_STATUS_IS_ENAMETOOLONG(s) ((s) == APR_ENAMETOOLONG \
- || (s) == APR_OS_START_SYSERR + ERROR_FILENAME_EXCED_RANGE \
- || (s) == APR_OS_START_SYSERR + WSAENAMETOOLONG)
-#define APR_STATUS_IS_ENOENT(s) ((s) == APR_ENOENT \
- || (s) == APR_OS_START_SYSERR + ERROR_FILE_NOT_FOUND \
- || (s) == APR_OS_START_SYSERR + ERROR_PATH_NOT_FOUND \
- || (s) == APR_OS_START_SYSERR + ERROR_OPEN_FAILED \
- || (s) == APR_OS_START_SYSERR + ERROR_NO_MORE_FILES)
-#define APR_STATUS_IS_ENOTDIR(s) ((s) == APR_ENOTDIR \
- || (s) == APR_OS_START_SYSERR + ERROR_PATH_NOT_FOUND \
- || (s) == APR_OS_START_SYSERR + ERROR_BAD_NETPATH \
- || (s) == APR_OS_START_SYSERR + ERROR_BAD_NET_NAME \
- || (s) == APR_OS_START_SYSERR + ERROR_BAD_PATHNAME \
- || (s) == APR_OS_START_SYSERR + ERROR_INVALID_DRIVE)
-#define APR_STATUS_IS_ENOSPC(s) ((s) == APR_ENOSPC \
- || (s) == APR_OS_START_SYSERR + ERROR_DISK_FULL)
-#define APR_STATUS_IS_ENOMEM(s) ((s) == APR_ENOMEM \
- || (s) == APR_OS_START_SYSERR + ERROR_ARENA_TRASHED \
- || (s) == APR_OS_START_SYSERR + ERROR_NOT_ENOUGH_MEMORY \
- || (s) == APR_OS_START_SYSERR + ERROR_INVALID_BLOCK \
- || (s) == APR_OS_START_SYSERR + ERROR_NOT_ENOUGH_QUOTA \
- || (s) == APR_OS_START_SYSERR + ERROR_OUTOFMEMORY)
-#define APR_STATUS_IS_EMFILE(s) ((s) == APR_EMFILE \
- || (s) == APR_OS_START_SYSERR + ERROR_TOO_MANY_OPEN_FILES)
-#define APR_STATUS_IS_ENFILE(s) ((s) == APR_ENFILE)
-#define APR_STATUS_IS_EBADF(s) ((s) == APR_EBADF \
- || (s) == APR_OS_START_SYSERR + ERROR_INVALID_HANDLE \
- || (s) == APR_OS_START_SYSERR + ERROR_INVALID_TARGET_HANDLE)
-#define APR_STATUS_IS_EINVAL(s) ((s) == APR_EINVAL \
- || (s) == APR_OS_START_SYSERR + ERROR_INVALID_ACCESS \
- || (s) == APR_OS_START_SYSERR + ERROR_INVALID_DATA \
- || (s) == APR_OS_START_SYSERR + ERROR_INVALID_FUNCTION \
- || (s) == APR_OS_START_SYSERR + ERROR_INVALID_HANDLE \
- || (s) == APR_OS_START_SYSERR + ERROR_INVALID_PARAMETER \
- || (s) == APR_OS_START_SYSERR + ERROR_NEGATIVE_SEEK)
-#define APR_STATUS_IS_ESPIPE(s) ((s) == APR_ESPIPE \
- || (s) == APR_OS_START_SYSERR + ERROR_SEEK_ON_DEVICE \
- || (s) == APR_OS_START_SYSERR + ERROR_NEGATIVE_SEEK)
-#define APR_STATUS_IS_EAGAIN(s) ((s) == APR_EAGAIN \
- || (s) == APR_OS_START_SYSERR + ERROR_NO_DATA \
- || (s) == APR_OS_START_SYSERR + ERROR_NO_PROC_SLOTS \
- || (s) == APR_OS_START_SYSERR + ERROR_NESTING_NOT_ALLOWED \
- || (s) == APR_OS_START_SYSERR + ERROR_MAX_THRDS_REACHED \
- || (s) == APR_OS_START_SYSERR + ERROR_LOCK_VIOLATION \
- || (s) == APR_OS_START_SYSERR + WSAEWOULDBLOCK)
-#define APR_STATUS_IS_EINTR(s) ((s) == APR_EINTR \
- || (s) == APR_OS_START_SYSERR + WSAEINTR)
-#define APR_STATUS_IS_ENOTSOCK(s) ((s) == APR_ENOTSOCK \
- || (s) == APR_OS_START_SYSERR + WSAENOTSOCK)
-#define APR_STATUS_IS_ECONNREFUSED(s) ((s) == APR_ECONNREFUSED \
- || (s) == APR_OS_START_SYSERR + WSAECONNREFUSED)
-#define APR_STATUS_IS_EINPROGRESS(s) ((s) == APR_EINPROGRESS \
- || (s) == APR_OS_START_SYSERR + WSAEINPROGRESS)
-#define APR_STATUS_IS_ECONNABORTED(s) ((s) == APR_ECONNABORTED \
- || (s) == APR_OS_START_SYSERR + WSAECONNABORTED)
-#define APR_STATUS_IS_ECONNRESET(s) ((s) == APR_ECONNRESET \
- || (s) == APR_OS_START_SYSERR + ERROR_NETNAME_DELETED \
- || (s) == APR_OS_START_SYSERR + WSAECONNRESET)
-/* XXX deprecated */
-#define APR_STATUS_IS_ETIMEDOUT(s) ((s) == APR_ETIMEDOUT \
- || (s) == APR_OS_START_SYSERR + WSAETIMEDOUT \
- || (s) == APR_OS_START_SYSERR + WAIT_TIMEOUT)
-#undef APR_STATUS_IS_TIMEUP
-#define APR_STATUS_IS_TIMEUP(s) ((s) == APR_TIMEUP \
- || (s) == APR_OS_START_SYSERR + WSAETIMEDOUT \
- || (s) == APR_OS_START_SYSERR + WAIT_TIMEOUT)
-#define APR_STATUS_IS_EHOSTUNREACH(s) ((s) == APR_EHOSTUNREACH \
- || (s) == APR_OS_START_SYSERR + WSAEHOSTUNREACH)
-#define APR_STATUS_IS_ENETUNREACH(s) ((s) == APR_ENETUNREACH \
- || (s) == APR_OS_START_SYSERR + WSAENETUNREACH)
-#define APR_STATUS_IS_EFTYPE(s) ((s) == APR_EFTYPE \
- || (s) == APR_OS_START_SYSERR + ERROR_EXE_MACHINE_TYPE_MISMATCH \
- || (s) == APR_OS_START_SYSERR + ERROR_INVALID_DLL \
- || (s) == APR_OS_START_SYSERR + ERROR_INVALID_MODULETYPE \
- || (s) == APR_OS_START_SYSERR + ERROR_BAD_EXE_FORMAT \
- || (s) == APR_OS_START_SYSERR + ERROR_INVALID_EXE_SIGNATURE \
- || (s) == APR_OS_START_SYSERR + ERROR_FILE_CORRUPT \
- || (s) == APR_OS_START_SYSERR + ERROR_BAD_FORMAT)
-#define APR_STATUS_IS_EPIPE(s) ((s) == APR_EPIPE \
- || (s) == APR_OS_START_SYSERR + ERROR_BROKEN_PIPE)
-#define APR_STATUS_IS_EXDEV(s) ((s) == APR_EXDEV \
- || (s) == APR_OS_START_SYSERR + ERROR_NOT_SAME_DEVICE)
-#define APR_STATUS_IS_ENOTEMPTY(s) ((s) == APR_ENOTEMPTY \
- || (s) == APR_OS_START_SYSERR + ERROR_DIR_NOT_EMPTY)
-
-#elif defined(NETWARE) && defined(USE_WINSOCK) && !defined(DOXYGEN) /* !defined(OS2) && !defined(WIN32) */
-
-#define APR_FROM_OS_ERROR(e) (e == 0 ? APR_SUCCESS : e + APR_OS_START_SYSERR)
-#define APR_TO_OS_ERROR(e) (e == 0 ? APR_SUCCESS : e - APR_OS_START_SYSERR)
-
-#define apr_get_os_error() (errno)
-#define apr_set_os_error(e) (errno = (e))
-
-/* A special case, only socket calls require this: */
-#define apr_get_netos_error() (APR_FROM_OS_ERROR(WSAGetLastError()))
-#define apr_set_netos_error(e) (WSASetLastError(APR_TO_OS_ERROR(e)))
-
-/* APR CANONICAL ERROR TESTS */
-#define APR_STATUS_IS_EACCES(s) ((s) == APR_EACCES)
-#define APR_STATUS_IS_EEXIST(s) ((s) == APR_EEXIST)
-#define APR_STATUS_IS_ENAMETOOLONG(s) ((s) == APR_ENAMETOOLONG)
-#define APR_STATUS_IS_ENOENT(s) ((s) == APR_ENOENT)
-#define APR_STATUS_IS_ENOTDIR(s) ((s) == APR_ENOTDIR)
-#define APR_STATUS_IS_ENOSPC(s) ((s) == APR_ENOSPC)
-#define APR_STATUS_IS_ENOMEM(s) ((s) == APR_ENOMEM)
-#define APR_STATUS_IS_EMFILE(s) ((s) == APR_EMFILE)
-#define APR_STATUS_IS_ENFILE(s) ((s) == APR_ENFILE)
-#define APR_STATUS_IS_EBADF(s) ((s) == APR_EBADF)
-#define APR_STATUS_IS_EINVAL(s) ((s) == APR_EINVAL)
-#define APR_STATUS_IS_ESPIPE(s) ((s) == APR_ESPIPE)
-
-#define APR_STATUS_IS_EAGAIN(s) ((s) == APR_EAGAIN \
- || (s) == EWOULDBLOCK \
- || (s) == APR_OS_START_SYSERR + WSAEWOULDBLOCK)
-#define APR_STATUS_IS_EINTR(s) ((s) == APR_EINTR \
- || (s) == APR_OS_START_SYSERR + WSAEINTR)
-#define APR_STATUS_IS_ENOTSOCK(s) ((s) == APR_ENOTSOCK \
- || (s) == APR_OS_START_SYSERR + WSAENOTSOCK)
-#define APR_STATUS_IS_ECONNREFUSED(s) ((s) == APR_ECONNREFUSED \
- || (s) == APR_OS_START_SYSERR + WSAECONNREFUSED)
-#define APR_STATUS_IS_EINPROGRESS(s) ((s) == APR_EINPROGRESS \
- || (s) == APR_OS_START_SYSERR + WSAEINPROGRESS)
-#define APR_STATUS_IS_ECONNABORTED(s) ((s) == APR_ECONNABORTED \
- || (s) == APR_OS_START_SYSERR + WSAECONNABORTED)
-#define APR_STATUS_IS_ECONNRESET(s) ((s) == APR_ECONNRESET \
- || (s) == APR_OS_START_SYSERR + WSAECONNRESET)
-/* XXX deprecated */
-#define APR_STATUS_IS_ETIMEDOUT(s) ((s) == APR_ETIMEDOUT \
- || (s) == APR_OS_START_SYSERR + WSAETIMEDOUT \
- || (s) == APR_OS_START_SYSERR + WAIT_TIMEOUT)
-#undef APR_STATUS_IS_TIMEUP
-#define APR_STATUS_IS_TIMEUP(s) ((s) == APR_TIMEUP \
- || (s) == APR_OS_START_SYSERR + WSAETIMEDOUT \
- || (s) == APR_OS_START_SYSERR + WAIT_TIMEOUT)
-#define APR_STATUS_IS_EHOSTUNREACH(s) ((s) == APR_EHOSTUNREACH \
- || (s) == APR_OS_START_SYSERR + WSAEHOSTUNREACH)
-#define APR_STATUS_IS_ENETUNREACH(s) ((s) == APR_ENETUNREACH \
- || (s) == APR_OS_START_SYSERR + WSAENETUNREACH)
-#define APR_STATUS_IS_ENETDOWN(s) ((s) == APR_OS_START_SYSERR + WSAENETDOWN)
-#define APR_STATUS_IS_EFTYPE(s) ((s) == APR_EFTYPE)
-#define APR_STATUS_IS_EPIPE(s) ((s) == APR_EPIPE)
-#define APR_STATUS_IS_EXDEV(s) ((s) == APR_EXDEV)
-#define APR_STATUS_IS_ENOTEMPTY(s) ((s) == APR_ENOTEMPTY)
-
-#else /* !defined(NETWARE) && !defined(OS2) && !defined(WIN32) */
-
-/*
- * os error codes are clib error codes
- */
-#define APR_FROM_OS_ERROR(e) (e)
-#define APR_TO_OS_ERROR(e) (e)
-
-#define apr_get_os_error() (errno)
-#define apr_set_os_error(e) (errno = (e))
-
-/* A special case, only socket calls require this:
- */
-#define apr_get_netos_error() (errno)
-#define apr_set_netos_error(e) (errno = (e))
-
-/**
- * @addtogroup APR_STATUS_IS
- * @{
- */
-
-/** permission denied */
-#define APR_STATUS_IS_EACCES(s) ((s) == APR_EACCES)
-/** file exists */
-#define APR_STATUS_IS_EEXIST(s) ((s) == APR_EEXIST)
-/** path name is too long */
-#define APR_STATUS_IS_ENAMETOOLONG(s) ((s) == APR_ENAMETOOLONG)
-/**
- * no such file or directory
- * @remark
- * EMVSCATLG can be returned by the automounter on z/OS for
- * paths which do not exist.
- */
-#ifdef EMVSCATLG
-#define APR_STATUS_IS_ENOENT(s) ((s) == APR_ENOENT \
- || (s) == EMVSCATLG)
-#else
-#define APR_STATUS_IS_ENOENT(s) ((s) == APR_ENOENT)
-#endif
-/** not a directory */
-#define APR_STATUS_IS_ENOTDIR(s) ((s) == APR_ENOTDIR)
-/** no space left on device */
-#ifdef EDQUOT
-#define APR_STATUS_IS_ENOSPC(s) ((s) == APR_ENOSPC \
- || (s) == EDQUOT)
-#else
-#define APR_STATUS_IS_ENOSPC(s) ((s) == APR_ENOSPC)
-#endif
-/** not enough memory */
-#define APR_STATUS_IS_ENOMEM(s) ((s) == APR_ENOMEM)
-/** too many open files */
-#define APR_STATUS_IS_EMFILE(s) ((s) == APR_EMFILE)
-/** file table overflow */
-#define APR_STATUS_IS_ENFILE(s) ((s) == APR_ENFILE)
-/** bad file # */
-#define APR_STATUS_IS_EBADF(s) ((s) == APR_EBADF)
-/** invalid argument */
-#define APR_STATUS_IS_EINVAL(s) ((s) == APR_EINVAL)
-/** illegal seek */
-#define APR_STATUS_IS_ESPIPE(s) ((s) == APR_ESPIPE)
-
-/** operation would block */
-#if !defined(EWOULDBLOCK) || !defined(EAGAIN)
-#define APR_STATUS_IS_EAGAIN(s) ((s) == APR_EAGAIN)
-#elif (EWOULDBLOCK == EAGAIN)
-#define APR_STATUS_IS_EAGAIN(s) ((s) == APR_EAGAIN)
-#else
-#define APR_STATUS_IS_EAGAIN(s) ((s) == APR_EAGAIN \
- || (s) == EWOULDBLOCK)
-#endif
-
-/** interrupted system call */
-#define APR_STATUS_IS_EINTR(s) ((s) == APR_EINTR)
-/** socket operation on a non-socket */
-#define APR_STATUS_IS_ENOTSOCK(s) ((s) == APR_ENOTSOCK)
-/** Connection Refused */
-#define APR_STATUS_IS_ECONNREFUSED(s) ((s) == APR_ECONNREFUSED)
-/** operation now in progress */
-#define APR_STATUS_IS_EINPROGRESS(s) ((s) == APR_EINPROGRESS)
-
-/**
- * Software caused connection abort
- * @remark
- * EPROTO on certain older kernels really means ECONNABORTED, so we need to
- * ignore it for them. See discussion in new-httpd archives nh.9701 & nh.9603
- *
- * There is potentially a bug in Solaris 2.x x<6, and other boxes that
- * implement tcp sockets in userland (i.e. on top of STREAMS). On these
- * systems, EPROTO can actually result in a fatal loop. See PR#981 for
- * example. It's hard to handle both uses of EPROTO.
- */
-#ifdef EPROTO
-#define APR_STATUS_IS_ECONNABORTED(s) ((s) == APR_ECONNABORTED \
- || (s) == EPROTO)
-#else
-#define APR_STATUS_IS_ECONNABORTED(s) ((s) == APR_ECONNABORTED)
-#endif
-
-/** Connection Reset by peer */
-#define APR_STATUS_IS_ECONNRESET(s) ((s) == APR_ECONNRESET)
-/** Operation timed out
- * @deprecated */
-#define APR_STATUS_IS_ETIMEDOUT(s) ((s) == APR_ETIMEDOUT)
-/** no route to host */
-#define APR_STATUS_IS_EHOSTUNREACH(s) ((s) == APR_EHOSTUNREACH)
-/** network is unreachable */
-#define APR_STATUS_IS_ENETUNREACH(s) ((s) == APR_ENETUNREACH)
-/** inappropriate file type or format */
-#define APR_STATUS_IS_EFTYPE(s) ((s) == APR_EFTYPE)
-/** broken pipe */
-#define APR_STATUS_IS_EPIPE(s) ((s) == APR_EPIPE)
-/** cross device link */
-#define APR_STATUS_IS_EXDEV(s) ((s) == APR_EXDEV)
-/** Directory Not Empty */
-#define APR_STATUS_IS_ENOTEMPTY(s) ((s) == APR_ENOTEMPTY || \
- (s) == APR_EEXIST)
-/** @} */
-
-#endif /* !defined(NETWARE) && !defined(OS2) && !defined(WIN32) */
-
-/** @} */
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* ! APR_ERRNO_H */
-#ifndef _LINUX_ERRNO_H
-#define _LINUX_ERRNO_H
-
-#include <asm/errno.h>
-
-#ifdef __KERNEL__
-
-/* Should never be seen by user programs */
-#define ERESTARTSYS 512
-#define ERESTARTNOINTR 513
-#define ERESTARTNOHAND 514 /* restart if no handler.. */
-#define ENOIOCTLCMD 515 /* No ioctl command */
-#define ERESTART_RESTARTBLOCK 516 /* restart by calling sys_restart_syscall */
-
-/* Defined for the NFSv3 protocol */
-#define EBADHANDLE 521 /* Illegal NFS file handle */
-#define ENOTSYNC 522 /* Update synchronization mismatch */
-#define EBADCOOKIE 523 /* Cookie is stale */
-#define ENOTSUPP 524 /* Operation is not supported */
-#define ETOOSMALL 525 /* Buffer or request is too small */
-#define ESERVERFAULT 526 /* An untranslatable error occurred */
-#define EBADTYPE 527 /* Type not supported by server */
-#define EJUKEBOX 528 /* Request initiated, but will not complete before timeout */
-#define EIOCBQUEUED 529 /* iocb queued, will get completion event */
-#define EIOCBRETRY 530 /* iocb queued, will trigger a retry */
-
-#endif
-
-#endif
-// Copyright (c) 1994 James Clark
-// See the file COPYING for copying permission.
-
-#ifndef ErrnoMessageArg_INCLUDED
-#define ErrnoMessageArg_INCLUDED 1
-
-#include "MessageArg.h"
-#include "rtti.h"
-
-#ifdef SP_NAMESPACE
-namespace SP_NAMESPACE {
-#endif
-
-class SP_API ErrnoMessageArg : public OtherMessageArg {
- RTTI_CLASS
-public:
- ErrnoMessageArg(int errnum) : errno_(errnum) { }
- MessageArg *copy() const;
- // errno might be a macro so we must use a different name
- int errnum() const;
-private:
- int errno_;
-};
-
-inline
-int ErrnoMessageArg::errnum() const
-{
- return errno_;
-}
-
-#ifdef SP_NAMESPACE
-}
-#endif
-
-#endif /* not ErrnoMessageArg_INCLUDED */
-/* Copyright (C) 1991,92,93,94,95,96,97,2002 Free Software Foundation, Inc.
- This file is part of the GNU C Library.
-
- The GNU C Library is free software; you can redistribute it and/or
- modify it under the terms of the GNU Lesser General Public
- License as published by the Free Software Foundation; either
- version 2.1 of the License, or (at your option) any later version.
-
- The GNU C Library is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Lesser General Public License for more details.
-
- You should have received a copy of the GNU Lesser General Public
- License along with the GNU C Library; if not, write to the Free
- Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
- 02111-1307 USA. */
-
-/*
- * ISO C99 Standard: 7.5 Errors <errno.h>
- */
-
-#ifndef _ERRNO_H
-
-/* The includer defined __need_Emath if he wants only the definitions
- of EDOM and ERANGE, and not everything else. */
-#ifndef __need_Emath
-# define _ERRNO_H 1
-# include <features.h>
-#endif
-
-__BEGIN_DECLS
-
-/* Get the error number constants from the system-specific file.
- This file will test __need_Emath and _ERRNO_H. */
-#include <bits/errno.h>
-#undef __need_Emath
-
-#ifdef _ERRNO_H
-
-/* Declare the `errno' variable, unless it's defined as a macro by
- bits/errno.h. This is the case in GNU, where it is a per-thread
- variable. This redeclaration using the macro still works, but it
- will be a function declaration without a prototype and may trigger
- a -Wstrict-prototypes warning. */
-#ifndef errno
-extern int errno;
-#endif
-
-#ifdef __USE_GNU
-
-/* The full and simple forms of the name with which the program was
- invoked. These variables are set up automatically at startup based on
- the value of ARGV[0] (this works only if you use GNU ld). */
-extern char *program_invocation_name, *program_invocation_short_name;
-#endif /* __USE_GNU */
-#endif /* _ERRNO_H */
-
-__END_DECLS
-
-#endif /* _ERRNO_H */
-
-/* The Hurd <bits/errno.h> defines `error_t' as an enumerated type so
- that printing `error_t' values in the debugger shows the names. We
- might need this definition sometimes even if this file was included
- before. */
-#if defined __USE_GNU || defined __need_error_t
-# ifndef __error_t_defined
-typedef int error_t;
-# define __error_t_defined 1
-# endif
-# undef __need_error_t
-#endif
-#ifndef _I386_ERRNO_H
-#define _I386_ERRNO_H
-
-#include <asm-generic/errno.h>
-
-#endif
-#ifndef _ASM_GENERIC_ERRNO_BASE_H
-#define _ASM_GENERIC_ERRNO_BASE_H
-
-#define EPERM 1 /* Operation not permitted */
-#define ENOENT 2 /* No such file or directory */
-#define ESRCH 3 /* No such process */
-#define EINTR 4 /* Interrupted system call */
-#define EIO 5 /* I/O error */
-#define ENXIO 6 /* No such device or address */
-#define E2BIG 7 /* Argument list too long */
-#define ENOEXEC 8 /* Exec format error */
-#define EBADF 9 /* Bad file number */
-#define ECHILD 10 /* No child processes */
-#define EAGAIN 11 /* Try again */
-#define ENOMEM 12 /* Out of memory */
-#define EACCES 13 /* Permission denied */
-#define EFAULT 14 /* Bad address */
-#define ENOTBLK 15 /* Block device required */
-#define EBUSY 16 /* Device or resource busy */
-#define EEXIST 17 /* File exists */
-#define EXDEV 18 /* Cross-device link */
-#define ENODEV 19 /* No such device */
-#define ENOTDIR 20 /* Not a directory */
-#define EISDIR 21 /* Is a directory */
-#define EINVAL 22 /* Invalid argument */
-#define ENFILE 23 /* File table overflow */
-#define EMFILE 24 /* Too many open files */
-#define ENOTTY 25 /* Not a typewriter */
-#define ETXTBSY 26 /* Text file busy */
-#define EFBIG 27 /* File too large */
-#define ENOSPC 28 /* No space left on device */
-#define ESPIPE 29 /* Illegal seek */
-#define EROFS 30 /* Read-only file system */
-#define EMLINK 31 /* Too many links */
-#define EPIPE 32 /* Broken pipe */
-#define EDOM 33 /* Math argument out of domain of func */
-#define ERANGE 34 /* Math result not representable */
-
-#endif
-#ifndef _ASM_GENERIC_ERRNO_H
-#define _ASM_GENERIC_ERRNO_H
-
-#include <asm-generic/errno-base.h>
-
-#define EDEADLK 35 /* Resource deadlock would occur */
-#define ENAMETOOLONG 36 /* File name too long */
-#define ENOLCK 37 /* No record locks available */
-#define ENOSYS 38 /* Function not implemented */
-#define ENOTEMPTY 39 /* Directory not empty */
-#define ELOOP 40 /* Too many symbolic links encountered */
-#define EWOULDBLOCK EAGAIN /* Operation would block */
-#define ENOMSG 42 /* No message of desired type */
-#define EIDRM 43 /* Identifier removed */
-#define ECHRNG 44 /* Channel number out of range */
-#define EL2NSYNC 45 /* Level 2 not synchronized */
-#define EL3HLT 46 /* Level 3 halted */
-#define EL3RST 47 /* Level 3 reset */
-#define ELNRNG 48 /* Link number out of range */
-#define EUNATCH 49 /* Protocol driver not attached */
-#define ENOCSI 50 /* No CSI structure available */
-#define EL2HLT 51 /* Level 2 halted */
-#define EBADE 52 /* Invalid exchange */
-#define EBADR 53 /* Invalid request descriptor */
-#define EXFULL 54 /* Exchange full */
-#define ENOANO 55 /* No anode */
-#define EBADRQC 56 /* Invalid request code */
-#define EBADSLT 57 /* Invalid slot */
-
-#define EDEADLOCK EDEADLK
-
-#define EBFONT 59 /* Bad font file format */
-#define ENOSTR 60 /* Device not a stream */
-#define ENODATA 61 /* No data available */
-#define ETIME 62 /* Timer expired */
-#define ENOSR 63 /* Out of streams resources */
-#define ENONET 64 /* Machine is not on the network */
-#define ENOPKG 65 /* Package not installed */
-#define EREMOTE 66 /* Object is remote */
-#define ENOLINK 67 /* Link has been severed */
-#define EADV 68 /* Advertise error */
-#define ESRMNT 69 /* Srmount error */
-#define ECOMM 70 /* Communication error on send */
-#define EPROTO 71 /* Protocol error */
-#define EMULTIHOP 72 /* Multihop attempted */
-#define EDOTDOT 73 /* RFS specific error */
-#define EBADMSG 74 /* Not a data message */
-#define EOVERFLOW 75 /* Value too large for defined data type */
-#define ENOTUNIQ 76 /* Name not unique on network */
-#define EBADFD 77 /* File descriptor in bad state */
-#define EREMCHG 78 /* Remote address changed */
-#define ELIBACC 79 /* Can not access a needed shared library */
-#define ELIBBAD 80 /* Accessing a corrupted shared library */
-#define ELIBSCN 81 /* .lib section in a.out corrupted */
-#define ELIBMAX 82 /* Attempting to link in too many shared libraries */
-#define ELIBEXEC 83 /* Cannot exec a shared library directly */
-#define EILSEQ 84 /* Illegal byte sequence */
-#define ERESTART 85 /* Interrupted system call should be restarted */
-#define ESTRPIPE 86 /* Streams pipe error */
-#define EUSERS 87 /* Too many users */
-#define ENOTSOCK 88 /* Socket operation on non-socket */
-#define EDESTADDRREQ 89 /* Destination address required */
-#define EMSGSIZE 90 /* Message too long */
-#define EPROTOTYPE 91 /* Protocol wrong type for socket */
-#define ENOPROTOOPT 92 /* Protocol not available */
-#define EPROTONOSUPPORT 93 /* Protocol not supported */
-#define ESOCKTNOSUPPORT 94 /* Socket type not supported */
-#define EOPNOTSUPP 95 /* Operation not supported on transport endpoint */
-#define EPFNOSUPPORT 96 /* Protocol family not supported */
-#define EAFNOSUPPORT 97 /* Address family not supported by protocol */
-#define EADDRINUSE 98 /* Address already in use */
-#define EADDRNOTAVAIL 99 /* Cannot assign requested address */
-#define ENETDOWN 100 /* Network is down */
-#define ENETUNREACH 101 /* Network is unreachable */
-#define ENETRESET 102 /* Network dropped connection because of reset */
-#define ECONNABORTED 103 /* Software caused connection abort */
-#define ECONNRESET 104 /* Connection reset by peer */
-#define ENOBUFS 105 /* No buffer space available */
-#define EISCONN 106 /* Transport endpoint is already connected */
-#define ENOTCONN 107 /* Transport endpoint is not connected */
-#define ESHUTDOWN 108 /* Cannot send after transport endpoint shutdown */
-#define ETOOMANYREFS 109 /* Too many references: cannot splice */
-#define ETIMEDOUT 110 /* Connection timed out */
-#define ECONNREFUSED 111 /* Connection refused */
-#define EHOSTDOWN 112 /* Host is down */
-#define EHOSTUNREACH 113 /* No route to host */
-#define EALREADY 114 /* Operation already in progress */
-#define EINPROGRESS 115 /* Operation now in progress */
-#define ESTALE 116 /* Stale NFS file handle */
-#define EUCLEAN 117 /* Structure needs cleaning */
-#define ENOTNAM 118 /* Not a XENIX named type file */
-#define ENAVAIL 119 /* No XENIX semaphores available */
-#define EISNAM 120 /* Is a named type file */
-#define EREMOTEIO 121 /* Remote I/O error */
-#define EDQUOT 122 /* Quota exceeded */
-
-#define ENOMEDIUM 123 /* No medium found */
-#define EMEDIUMTYPE 124 /* Wrong medium type */
-#define ECANCELED 125 /* Operation Canceled */
-#define ENOKEY 126 /* Required key not available */
-#define EKEYEXPIRED 127 /* Key has expired */
-#define EKEYREVOKED 128 /* Key has been revoked */
-#define EKEYREJECTED 129 /* Key was rejected by service */
-
-/* for robust mutexes */
-#define EOWNERDEAD 130 /* Owner died */
-#define ENOTRECOVERABLE 131 /* State not recoverable */
-
-#endif
diff --git a/doc/legacy/errno.list.macosx.txt b/doc/legacy/errno.list.macosx.txt
deleted file mode 100644
index 2dff28cd379..00000000000
--- a/doc/legacy/errno.list.macosx.txt
+++ /dev/null
@@ -1,1513 +0,0 @@
-/* Copyright 2000-2005 The Apache Software Foundation or its licensors, as
- * applicable.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef APR_ERRNO_H
-#define APR_ERRNO_H
-
-/**
- * @file apr_errno.h
- * @brief APR Error Codes
- */
-
-#include "apr.h"
-
-#if APR_HAVE_ERRNO_H
-#include <errno.h>
-#endif
-
-#ifdef __cplusplus
-extern "C" {
-#endif /* __cplusplus */
-
-/**
- * @defgroup apr_errno Error Codes
- * @ingroup APR
- * @{
- */
-
-/**
- * Type for specifying an error or status code.
- */
-typedef int apr_status_t;
-
-/**
- * Return a human readable string describing the specified error.
- * @param statcode The error code the get a string for.
- * @param buf A buffer to hold the error string.
- * @param bufsize Size of the buffer to hold the string.
- */
-APR_DECLARE(char *) apr_strerror(apr_status_t statcode, char *buf,
- apr_size_t bufsize);
-
-#if defined(DOXYGEN)
-/**
- * @def APR_FROM_OS_ERROR(os_err_type syserr)
- * Fold a platform specific error into an apr_status_t code.
- * @return apr_status_t
- * @param e The platform os error code.
- * @warning macro implementation; the syserr argument may be evaluated
- * multiple times.
- */
-#define APR_FROM_OS_ERROR(e) (e == 0 ? APR_SUCCESS : e + APR_OS_START_SYSERR)
-
-/**
- * @def APR_TO_OS_ERROR(apr_status_t statcode)
- * @return os_err_type
- * Fold an apr_status_t code back to the native platform defined error.
- * @param e The apr_status_t folded platform os error code.
- * @warning macro implementation; the statcode argument may be evaluated
- * multiple times. If the statcode was not created by apr_get_os_error
- * or APR_FROM_OS_ERROR, the results are undefined.
- */
-#define APR_TO_OS_ERROR(e) (e == 0 ? APR_SUCCESS : e - APR_OS_START_SYSERR)
-
-/** @def apr_get_os_error()
- * @return apr_status_t the last platform error, folded into apr_status_t, on most platforms
- * @remark This retrieves errno, or calls a GetLastError() style function, and
- * folds it with APR_FROM_OS_ERROR. Some platforms (such as OS2) have no
- * such mechanism, so this call may be unsupported. Do NOT use this
- * call for socket errors from socket, send, recv etc!
- */
-
-/** @def apr_set_os_error(e)
- * Reset the last platform error, unfolded from an apr_status_t, on some platforms
- * @param e The OS error folded in a prior call to APR_FROM_OS_ERROR()
- * @warning This is a macro implementation; the statcode argument may be evaluated
- * multiple times. If the statcode was not created by apr_get_os_error
- * or APR_FROM_OS_ERROR, the results are undefined. This macro sets
- * errno, or calls a SetLastError() style function, unfolding statcode
- * with APR_TO_OS_ERROR. Some platforms (such as OS2) have no such
- * mechanism, so this call may be unsupported.
- */
-
-/** @def apr_get_netos_error()
- * Return the last socket error, folded into apr_status_t, on all platforms
- * @remark This retrieves errno or calls a GetLastSocketError() style function,
- * and folds it with APR_FROM_OS_ERROR.
- */
-
-/** @def apr_set_netos_error(e)
- * Reset the last socket error, unfolded from an apr_status_t
- * @param e The socket error folded in a prior call to APR_FROM_OS_ERROR()
- * @warning This is a macro implementation; the statcode argument may be evaluated
- * multiple times. If the statcode was not created by apr_get_os_error
- * or APR_FROM_OS_ERROR, the results are undefined. This macro sets
- * errno, or calls a WSASetLastError() style function, unfolding
- * socketcode with APR_TO_OS_ERROR.
- */
-
-#endif /* defined(DOXYGEN) */
-
-/**
- * APR_OS_START_ERROR is where the APR specific error values start.
- */
-#define APR_OS_START_ERROR 20000
-/**
- * APR_OS_ERRSPACE_SIZE is the maximum number of errors you can fit
- * into one of the error/status ranges below -- except for
- * APR_OS_START_USERERR, which see.
- */
-#define APR_OS_ERRSPACE_SIZE 50000
-/**
- * APR_OS_START_STATUS is where the APR specific status codes start.
- */
-#define APR_OS_START_STATUS (APR_OS_START_ERROR + APR_OS_ERRSPACE_SIZE)
-/**
- * APR_OS_START_USERERR are reserved for applications that use APR that
- * layer their own error codes along with APR's. Note that the
- * error immediately following this one is set ten times farther
- * away than usual, so that users of apr have a lot of room in
- * which to declare custom error codes.
- */
-#define APR_OS_START_USERERR (APR_OS_START_STATUS + APR_OS_ERRSPACE_SIZE)
-/**
- * APR_OS_START_USEERR is obsolete, defined for compatibility only.
- * Use APR_OS_START_USERERR instead.
- */
-#define APR_OS_START_USEERR APR_OS_START_USERERR
-/**
- * APR_OS_START_CANONERR is where APR versions of errno values are defined
- * on systems which don't have the corresponding errno.
- */
-#define APR_OS_START_CANONERR (APR_OS_START_USERERR \
- + (APR_OS_ERRSPACE_SIZE * 10))
-/**
- * APR_OS_START_EAIERR folds EAI_ error codes from getaddrinfo() into
- * apr_status_t values.
- */
-#define APR_OS_START_EAIERR (APR_OS_START_CANONERR + APR_OS_ERRSPACE_SIZE)
-/**
- * APR_OS_START_SYSERR folds platform-specific system error values into
- * apr_status_t values.
- */
-#define APR_OS_START_SYSERR (APR_OS_START_EAIERR + APR_OS_ERRSPACE_SIZE)
-
-/** no error. */
-#define APR_SUCCESS 0
-
-/**
- * @defgroup APR_Error APR Error Values
- * <PRE>
- * <b>APR ERROR VALUES</b>
- * APR_ENOSTAT APR was unable to perform a stat on the file
- * APR_ENOPOOL APR was not provided a pool with which to allocate memory
- * APR_EBADDATE APR was given an invalid date
- * APR_EINVALSOCK APR was given an invalid socket
- * APR_ENOPROC APR was not given a process structure
- * APR_ENOTIME APR was not given a time structure
- * APR_ENODIR APR was not given a directory structure
- * APR_ENOLOCK APR was not given a lock structure
- * APR_ENOPOLL APR was not given a poll structure
- * APR_ENOSOCKET APR was not given a socket
- * APR_ENOTHREAD APR was not given a thread structure
- * APR_ENOTHDKEY APR was not given a thread key structure
- * APR_ENOSHMAVAIL There is no more shared memory available
- * APR_EDSOOPEN APR was unable to open the dso object. For more
- * information call apr_dso_error().
- * APR_EGENERAL General failure (specific information not available)
- * APR_EBADIP The specified IP address is invalid
- * APR_EBADMASK The specified netmask is invalid
- * APR_ESYMNOTFOUND Could not find the requested symbol
- * </PRE>
- *
- * <PRE>
- * <b>APR STATUS VALUES</b>
- * APR_INCHILD Program is currently executing in the child
- * APR_INPARENT Program is currently executing in the parent
- * APR_DETACH The thread is detached
- * APR_NOTDETACH The thread is not detached
- * APR_CHILD_DONE The child has finished executing
- * APR_CHILD_NOTDONE The child has not finished executing
- * APR_TIMEUP The operation did not finish before the timeout
- * APR_INCOMPLETE The operation was incomplete although some processing
- * was performed and the results are partially valid
- * APR_BADCH Getopt found an option not in the option string
- * APR_BADARG Getopt found an option that is missing an argument
- * and an argument was specified in the option string
- * APR_EOF APR has encountered the end of the file
- * APR_NOTFOUND APR was unable to find the socket in the poll structure
- * APR_ANONYMOUS APR is using anonymous shared memory
- * APR_FILEBASED APR is using a file name as the key to the shared memory
- * APR_KEYBASED APR is using a shared key as the key to the shared memory
- * APR_EINIT Ininitalizer value. If no option has been found, but
- * the status variable requires a value, this should be used
- * APR_ENOTIMPL The APR function has not been implemented on this
- * platform, either because nobody has gotten to it yet,
- * or the function is impossible on this platform.
- * APR_EMISMATCH Two passwords do not match.
- * APR_EABSOLUTE The given path was absolute.
- * APR_ERELATIVE The given path was relative.
- * APR_EINCOMPLETE The given path was neither relative nor absolute.
- * APR_EABOVEROOT The given path was above the root path.
- * APR_EBUSY The given lock was busy.
- * APR_EPROC_UNKNOWN The given process wasn't recognized by APR
- * </PRE>
- * @{
- */
-/** @see APR_STATUS_IS_ENOSTAT */
-#define APR_ENOSTAT (APR_OS_START_ERROR + 1)
-/** @see APR_STATUS_IS_ENOPOOL */
-#define APR_ENOPOOL (APR_OS_START_ERROR + 2)
-/* empty slot: +3 */
-/** @see APR_STATUS_IS_EBADDATE */
-#define APR_EBADDATE (APR_OS_START_ERROR + 4)
-/** @see APR_STATUS_IS_EINVALSOCK */
-#define APR_EINVALSOCK (APR_OS_START_ERROR + 5)
-/** @see APR_STATUS_IS_ENOPROC */
-#define APR_ENOPROC (APR_OS_START_ERROR + 6)
-/** @see APR_STATUS_IS_ENOTIME */
-#define APR_ENOTIME (APR_OS_START_ERROR + 7)
-/** @see APR_STATUS_IS_ENODIR */
-#define APR_ENODIR (APR_OS_START_ERROR + 8)
-/** @see APR_STATUS_IS_ENOLOCK */
-#define APR_ENOLOCK (APR_OS_START_ERROR + 9)
-/** @see APR_STATUS_IS_ENOPOLL */
-#define APR_ENOPOLL (APR_OS_START_ERROR + 10)
-/** @see APR_STATUS_IS_ENOSOCKET */
-#define APR_ENOSOCKET (APR_OS_START_ERROR + 11)
-/** @see APR_STATUS_IS_ENOTHREAD */
-#define APR_ENOTHREAD (APR_OS_START_ERROR + 12)
-/** @see APR_STATUS_IS_ENOTHDKEY */
-#define APR_ENOTHDKEY (APR_OS_START_ERROR + 13)
-/** @see APR_STATUS_IS_EGENERAL */
-#define APR_EGENERAL (APR_OS_START_ERROR + 14)
-/** @see APR_STATUS_IS_ENOSHMAVAIL */
-#define APR_ENOSHMAVAIL (APR_OS_START_ERROR + 15)
-/** @see APR_STATUS_IS_EBADIP */
-#define APR_EBADIP (APR_OS_START_ERROR + 16)
-/** @see APR_STATUS_IS_EBADMASK */
-#define APR_EBADMASK (APR_OS_START_ERROR + 17)
-/* empty slot: +18 */
-/** @see APR_STATUS_IS_EDSOPEN */
-#define APR_EDSOOPEN (APR_OS_START_ERROR + 19)
-/** @see APR_STATUS_IS_EABSOLUTE */
-#define APR_EABSOLUTE (APR_OS_START_ERROR + 20)
-/** @see APR_STATUS_IS_ERELATIVE */
-#define APR_ERELATIVE (APR_OS_START_ERROR + 21)
-/** @see APR_STATUS_IS_EINCOMPLETE */
-#define APR_EINCOMPLETE (APR_OS_START_ERROR + 22)
-/** @see APR_STATUS_IS_EABOVEROOT */
-#define APR_EABOVEROOT (APR_OS_START_ERROR + 23)
-/** @see APR_STATUS_IS_EBADPATH */
-#define APR_EBADPATH (APR_OS_START_ERROR + 24)
-/** @see APR_STATUS_IS_EPATHWILD */
-#define APR_EPATHWILD (APR_OS_START_ERROR + 25)
-/** @see APR_STATUS_IS_ESYMNOTFOUND */
-#define APR_ESYMNOTFOUND (APR_OS_START_ERROR + 26)
-/** @see APR_STATUS_IS_EPROC_UNKNOWN */
-#define APR_EPROC_UNKNOWN (APR_OS_START_ERROR + 27)
-/** @see APR_STATUS_IS_ENOTENOUGHENTROPY */
-#define APR_ENOTENOUGHENTROPY (APR_OS_START_ERROR + 28)
-/** @} */
-
-/**
- * @defgroup APR_STATUS_IS Status Value Tests
- * @warning For any particular error condition, more than one of these tests
- * may match. This is because platform-specific error codes may not
- * always match the semantics of the POSIX codes these tests (and the
- * corresponding APR error codes) are named after. A notable example
- * are the APR_STATUS_IS_ENOENT and APR_STATUS_IS_ENOTDIR tests on
- * Win32 platforms. The programmer should always be aware of this and
- * adjust the order of the tests accordingly.
- * @{
- */
-/**
- * APR was unable to perform a stat on the file
- * @warning always use this test, as platform-specific variances may meet this
- * more than one error code
- */
-#define APR_STATUS_IS_ENOSTAT(s) ((s) == APR_ENOSTAT)
-/**
- * APR was not provided a pool with which to allocate memory
- * @warning always use this test, as platform-specific variances may meet this
- * more than one error code
- */
-#define APR_STATUS_IS_ENOPOOL(s) ((s) == APR_ENOPOOL)
-/** APR was given an invalid date */
-#define APR_STATUS_IS_EBADDATE(s) ((s) == APR_EBADDATE)
-/** APR was given an invalid socket */
-#define APR_STATUS_IS_EINVALSOCK(s) ((s) == APR_EINVALSOCK)
-/** APR was not given a process structure */
-#define APR_STATUS_IS_ENOPROC(s) ((s) == APR_ENOPROC)
-/** APR was not given a time structure */
-#define APR_STATUS_IS_ENOTIME(s) ((s) == APR_ENOTIME)
-/** APR was not given a directory structure */
-#define APR_STATUS_IS_ENODIR(s) ((s) == APR_ENODIR)
-/** APR was not given a lock structure */
-#define APR_STATUS_IS_ENOLOCK(s) ((s) == APR_ENOLOCK)
-/** APR was not given a poll structure */
-#define APR_STATUS_IS_ENOPOLL(s) ((s) == APR_ENOPOLL)
-/** APR was not given a socket */
-#define APR_STATUS_IS_ENOSOCKET(s) ((s) == APR_ENOSOCKET)
-/** APR was not given a thread structure */
-#define APR_STATUS_IS_ENOTHREAD(s) ((s) == APR_ENOTHREAD)
-/** APR was not given a thread key structure */
-#define APR_STATUS_IS_ENOTHDKEY(s) ((s) == APR_ENOTHDKEY)
-/** Generic Error which can not be put into another spot */
-#define APR_STATUS_IS_EGENERAL(s) ((s) == APR_EGENERAL)
-/** There is no more shared memory available */
-#define APR_STATUS_IS_ENOSHMAVAIL(s) ((s) == APR_ENOSHMAVAIL)
-/** The specified IP address is invalid */
-#define APR_STATUS_IS_EBADIP(s) ((s) == APR_EBADIP)
-/** The specified netmask is invalid */
-#define APR_STATUS_IS_EBADMASK(s) ((s) == APR_EBADMASK)
-/* empty slot: +18 */
-/**
- * APR was unable to open the dso object.
- * For more information call apr_dso_error().
- */
-#if defined(WIN32)
-#define APR_STATUS_IS_EDSOOPEN(s) ((s) == APR_EDSOOPEN \
- || APR_TO_OS_ERROR(s) == ERROR_MOD_NOT_FOUND)
-#else
-#define APR_STATUS_IS_EDSOOPEN(s) ((s) == APR_EDSOOPEN)
-#endif
-/** The given path was absolute. */
-#define APR_STATUS_IS_EABSOLUTE(s) ((s) == APR_EABSOLUTE)
-/** The given path was relative. */
-#define APR_STATUS_IS_ERELATIVE(s) ((s) == APR_ERELATIVE)
-/** The given path was neither relative nor absolute. */
-#define APR_STATUS_IS_EINCOMPLETE(s) ((s) == APR_EINCOMPLETE)
-/** The given path was above the root path. */
-#define APR_STATUS_IS_EABOVEROOT(s) ((s) == APR_EABOVEROOT)
-/** The given path was bad. */
-#define APR_STATUS_IS_EBADPATH(s) ((s) == APR_EBADPATH)
-/** The given path contained wildcards. */
-#define APR_STATUS_IS_EPATHWILD(s) ((s) == APR_EPATHWILD)
-/** Could not find the requested symbol.
- * For more information call apr_dso_error().
- */
-#if defined(WIN32)
-#define APR_STATUS_IS_ESYMNOTFOUND(s) ((s) == APR_ESYMNOTFOUND \
- || APR_TO_OS_ERROR(s) == ERROR_PROC_NOT_FOUND)
-#else
-#define APR_STATUS_IS_ESYMNOTFOUND(s) ((s) == APR_ESYMNOTFOUND)
-#endif
-/** The given process was not recognized by APR. */
-#define APR_STATUS_IS_EPROC_UNKNOWN(s) ((s) == APR_EPROC_UNKNOWN)
-
-/** APR could not gather enough entropy to continue. */
-#define APR_STATUS_IS_ENOTENOUGHENTROPY(s) ((s) == APR_ENOTENOUGHENTROPY)
-
-/** @} */
-
-/**
- * @addtogroup APR_Error
- * @{
- */
-/** @see APR_STATUS_IS_INCHILD */
-#define APR_INCHILD (APR_OS_START_STATUS + 1)
-/** @see APR_STATUS_IS_INPARENT */
-#define APR_INPARENT (APR_OS_START_STATUS + 2)
-/** @see APR_STATUS_IS_DETACH */
-#define APR_DETACH (APR_OS_START_STATUS + 3)
-/** @see APR_STATUS_IS_NOTDETACH */
-#define APR_NOTDETACH (APR_OS_START_STATUS + 4)
-/** @see APR_STATUS_IS_CHILD_DONE */
-#define APR_CHILD_DONE (APR_OS_START_STATUS + 5)
-/** @see APR_STATUS_IS_CHILD_NOTDONE */
-#define APR_CHILD_NOTDONE (APR_OS_START_STATUS + 6)
-/** @see APR_STATUS_IS_TIMEUP */
-#define APR_TIMEUP (APR_OS_START_STATUS + 7)
-/** @see APR_STATUS_IS_INCOMPLETE */
-#define APR_INCOMPLETE (APR_OS_START_STATUS + 8)
-/* empty slot: +9 */
-/* empty slot: +10 */
-/* empty slot: +11 */
-/** @see APR_STATUS_IS_BADCH */
-#define APR_BADCH (APR_OS_START_STATUS + 12)
-/** @see APR_STATUS_IS_BADARG */
-#define APR_BADARG (APR_OS_START_STATUS + 13)
-/** @see APR_STATUS_IS_EOF */
-#define APR_EOF (APR_OS_START_STATUS + 14)
-/** @see APR_STATUS_IS_NOTFOUND */
-#define APR_NOTFOUND (APR_OS_START_STATUS + 15)
-/* empty slot: +16 */
-/* empty slot: +17 */
-/* empty slot: +18 */
-/** @see APR_STATUS_IS_ANONYMOUS */
-#define APR_ANONYMOUS (APR_OS_START_STATUS + 19)
-/** @see APR_STATUS_IS_FILEBASED */
-#define APR_FILEBASED (APR_OS_START_STATUS + 20)
-/** @see APR_STATUS_IS_KEYBASED */
-#define APR_KEYBASED (APR_OS_START_STATUS + 21)
-/** @see APR_STATUS_IS_EINIT */
-#define APR_EINIT (APR_OS_START_STATUS + 22)
-/** @see APR_STATUS_IS_ENOTIMPL */
-#define APR_ENOTIMPL (APR_OS_START_STATUS + 23)
-/** @see APR_STATUS_IS_EMISMATCH */
-#define APR_EMISMATCH (APR_OS_START_STATUS + 24)
-/** @see APR_STATUS_IS_EBUSY */
-#define APR_EBUSY (APR_OS_START_STATUS + 25)
-/** @} */
-
-/**
- * @addtogroup APR_STATUS_IS
- * @{
- */
-/**
- * Program is currently executing in the child
- * @warning
- * always use this test, as platform-specific variances may meet this
- * more than one error code */
-#define APR_STATUS_IS_INCHILD(s) ((s) == APR_INCHILD)
-/**
- * Program is currently executing in the parent
- * @warning
- * always use this test, as platform-specific variances may meet this
- * more than one error code
- */
-#define APR_STATUS_IS_INPARENT(s) ((s) == APR_INPARENT)
-/**
- * The thread is detached
- * @warning
- * always use this test, as platform-specific variances may meet this
- * more than one error code
- */
-#define APR_STATUS_IS_DETACH(s) ((s) == APR_DETACH)
-/**
- * The thread is not detached
- * @warning
- * always use this test, as platform-specific variances may meet this
- * more than one error code
- */
-#define APR_STATUS_IS_NOTDETACH(s) ((s) == APR_NOTDETACH)
-/**
- * The child has finished executing
- * @warning
- * always use this test, as platform-specific variances may meet this
- * more than one error code
- */
-#define APR_STATUS_IS_CHILD_DONE(s) ((s) == APR_CHILD_DONE)
-/**
- * The child has not finished executing
- * @warning
- * always use this test, as platform-specific variances may meet this
- * more than one error code
- */
-#define APR_STATUS_IS_CHILD_NOTDONE(s) ((s) == APR_CHILD_NOTDONE)
-/**
- * The operation did not finish before the timeout
- * @warning
- * always use this test, as platform-specific variances may meet this
- * more than one error code
- */
-#define APR_STATUS_IS_TIMEUP(s) ((s) == APR_TIMEUP)
-/**
- * The operation was incomplete although some processing was performed
- * and the results are partially valid.
- * @warning
- * always use this test, as platform-specific variances may meet this
- * more than one error code
- */
-#define APR_STATUS_IS_INCOMPLETE(s) ((s) == APR_INCOMPLETE)
-/* empty slot: +9 */
-/* empty slot: +10 */
-/* empty slot: +11 */
-/**
- * Getopt found an option not in the option string
- * @warning
- * always use this test, as platform-specific variances may meet this
- * more than one error code
- */
-#define APR_STATUS_IS_BADCH(s) ((s) == APR_BADCH)
-/**
- * Getopt found an option not in the option string and an argument was
- * specified in the option string
- * @warning
- * always use this test, as platform-specific variances may meet this
- * more than one error code
- */
-#define APR_STATUS_IS_BADARG(s) ((s) == APR_BADARG)
-/**
- * APR has encountered the end of the file
- * @warning
- * always use this test, as platform-specific variances may meet this
- * more than one error code
- */
-#define APR_STATUS_IS_EOF(s) ((s) == APR_EOF)
-/**
- * APR was unable to find the socket in the poll structure
- * @warning
- * always use this test, as platform-specific variances may meet this
- * more than one error code
- */
-#define APR_STATUS_IS_NOTFOUND(s) ((s) == APR_NOTFOUND)
-/* empty slot: +16 */
-/* empty slot: +17 */
-/* empty slot: +18 */
-/**
- * APR is using anonymous shared memory
- * @warning
- * always use this test, as platform-specific variances may meet this
- * more than one error code
- */
-#define APR_STATUS_IS_ANONYMOUS(s) ((s) == APR_ANONYMOUS)
-/**
- * APR is using a file name as the key to the shared memory
- * @warning
- * always use this test, as platform-specific variances may meet this
- * more than one error code
- */
-#define APR_STATUS_IS_FILEBASED(s) ((s) == APR_FILEBASED)
-/**
- * APR is using a shared key as the key to the shared memory
- * @warning
- * always use this test, as platform-specific variances may meet this
- * more than one error code
- */
-#define APR_STATUS_IS_KEYBASED(s) ((s) == APR_KEYBASED)
-/**
- * Ininitalizer value. If no option has been found, but
- * the status variable requires a value, this should be used
- * @warning
- * always use this test, as platform-specific variances may meet this
- * more than one error code
- */
-#define APR_STATUS_IS_EINIT(s) ((s) == APR_EINIT)
-/**
- * The APR function has not been implemented on this
- * platform, either because nobody has gotten to it yet,
- * or the function is impossible on this platform.
- * @warning
- * always use this test, as platform-specific variances may meet this
- * more than one error code
- */
-#define APR_STATUS_IS_ENOTIMPL(s) ((s) == APR_ENOTIMPL)
-/**
- * Two passwords do not match.
- * @warning
- * always use this test, as platform-specific variances may meet this
- * more than one error code
- */
-#define APR_STATUS_IS_EMISMATCH(s) ((s) == APR_EMISMATCH)
-/**
- * The given lock was busy
- * @warning always use this test, as platform-specific variances may meet this
- * more than one error code
- */
-#define APR_STATUS_IS_EBUSY(s) ((s) == APR_EBUSY)
-
-/** @} */
-
-/**
- * @addtogroup APR_Error APR Error Values
- * @{
- */
-/* APR CANONICAL ERROR VALUES */
-/** @see APR_STATUS_IS_EACCES */
-#ifdef EACCES
-#define APR_EACCES EACCES
-#else
-#define APR_EACCES (APR_OS_START_CANONERR + 1)
-#endif
-
-/** @see APR_STATUS_IS_EXIST */
-#ifdef EEXIST
-#define APR_EEXIST EEXIST
-#else
-#define APR_EEXIST (APR_OS_START_CANONERR + 2)
-#endif
-
-/** @see APR_STATUS_IS_ENAMETOOLONG */
-#ifdef ENAMETOOLONG
-#define APR_ENAMETOOLONG ENAMETOOLONG
-#else
-#define APR_ENAMETOOLONG (APR_OS_START_CANONERR + 3)
-#endif
-
-/** @see APR_STATUS_IS_ENOENT */
-#ifdef ENOENT
-#define APR_ENOENT ENOENT
-#else
-#define APR_ENOENT (APR_OS_START_CANONERR + 4)
-#endif
-
-/** @see APR_STATUS_IS_ENOTDIR */
-#ifdef ENOTDIR
-#define APR_ENOTDIR ENOTDIR
-#else
-#define APR_ENOTDIR (APR_OS_START_CANONERR + 5)
-#endif
-
-/** @see APR_STATUS_IS_ENOSPC */
-#ifdef ENOSPC
-#define APR_ENOSPC ENOSPC
-#else
-#define APR_ENOSPC (APR_OS_START_CANONERR + 6)
-#endif
-
-/** @see APR_STATUS_IS_ENOMEM */
-#ifdef ENOMEM
-#define APR_ENOMEM ENOMEM
-#else
-#define APR_ENOMEM (APR_OS_START_CANONERR + 7)
-#endif
-
-/** @see APR_STATUS_IS_EMFILE */
-#ifdef EMFILE
-#define APR_EMFILE EMFILE
-#else
-#define APR_EMFILE (APR_OS_START_CANONERR + 8)
-#endif
-
-/** @see APR_STATUS_IS_ENFILE */
-#ifdef ENFILE
-#define APR_ENFILE ENFILE
-#else
-#define APR_ENFILE (APR_OS_START_CANONERR + 9)
-#endif
-
-/** @see APR_STATUS_IS_EBADF */
-#ifdef EBADF
-#define APR_EBADF EBADF
-#else
-#define APR_EBADF (APR_OS_START_CANONERR + 10)
-#endif
-
-/** @see APR_STATUS_IS_EINVAL */
-#ifdef EINVAL
-#define APR_EINVAL EINVAL
-#else
-#define APR_EINVAL (APR_OS_START_CANONERR + 11)
-#endif
-
-/** @see APR_STATUS_IS_ESPIPE */
-#ifdef ESPIPE
-#define APR_ESPIPE ESPIPE
-#else
-#define APR_ESPIPE (APR_OS_START_CANONERR + 12)
-#endif
-
-/**
- * @see APR_STATUS_IS_EAGAIN
- * @warning use APR_STATUS_IS_EAGAIN instead of just testing this value
- */
-#ifdef EAGAIN
-#define APR_EAGAIN EAGAIN
-#elif defined(EWOULDBLOCK)
-#define APR_EAGAIN EWOULDBLOCK
-#else
-#define APR_EAGAIN (APR_OS_START_CANONERR + 13)
-#endif
-
-/** @see APR_STATUS_IS_EINTR */
-#ifdef EINTR
-#define APR_EINTR EINTR
-#else
-#define APR_EINTR (APR_OS_START_CANONERR + 14)
-#endif
-
-/** @see APR_STATUS_IS_ENOTSOCK */
-#ifdef ENOTSOCK
-#define APR_ENOTSOCK ENOTSOCK
-#else
-#define APR_ENOTSOCK (APR_OS_START_CANONERR + 15)
-#endif
-
-/** @see APR_STATUS_IS_ECONNREFUSED */
-#ifdef ECONNREFUSED
-#define APR_ECONNREFUSED ECONNREFUSED
-#else
-#define APR_ECONNREFUSED (APR_OS_START_CANONERR + 16)
-#endif
-
-/** @see APR_STATUS_IS_EINPROGRESS */
-#ifdef EINPROGRESS
-#define APR_EINPROGRESS EINPROGRESS
-#else
-#define APR_EINPROGRESS (APR_OS_START_CANONERR + 17)
-#endif
-
-/**
- * @see APR_STATUS_IS_ECONNABORTED
- * @warning use APR_STATUS_IS_ECONNABORTED instead of just testing this value
- */
-
-#ifdef ECONNABORTED
-#define APR_ECONNABORTED ECONNABORTED
-#else
-#define APR_ECONNABORTED (APR_OS_START_CANONERR + 18)
-#endif
-
-/** @see APR_STATUS_IS_ECONNRESET */
-#ifdef ECONNRESET
-#define APR_ECONNRESET ECONNRESET
-#else
-#define APR_ECONNRESET (APR_OS_START_CANONERR + 19)
-#endif
-
-/** @see APR_STATUS_IS_ETIMEDOUT
- * @deprecated */
-#ifdef ETIMEDOUT
-#define APR_ETIMEDOUT ETIMEDOUT
-#else
-#define APR_ETIMEDOUT (APR_OS_START_CANONERR + 20)
-#endif
-
-/** @see APR_STATUS_IS_EHOSTUNREACH */
-#ifdef EHOSTUNREACH
-#define APR_EHOSTUNREACH EHOSTUNREACH
-#else
-#define APR_EHOSTUNREACH (APR_OS_START_CANONERR + 21)
-#endif
-
-/** @see APR_STATUS_IS_ENETUNREACH */
-#ifdef ENETUNREACH
-#define APR_ENETUNREACH ENETUNREACH
-#else
-#define APR_ENETUNREACH (APR_OS_START_CANONERR + 22)
-#endif
-
-/** @see APR_STATUS_IS_EFTYPE */
-#ifdef EFTYPE
-#define APR_EFTYPE EFTYPE
-#else
-#define APR_EFTYPE (APR_OS_START_CANONERR + 23)
-#endif
-
-/** @see APR_STATUS_IS_EPIPE */
-#ifdef EPIPE
-#define APR_EPIPE EPIPE
-#else
-#define APR_EPIPE (APR_OS_START_CANONERR + 24)
-#endif
-
-/** @see APR_STATUS_IS_EXDEV */
-#ifdef EXDEV
-#define APR_EXDEV EXDEV
-#else
-#define APR_EXDEV (APR_OS_START_CANONERR + 25)
-#endif
-
-/** @see APR_STATUS_IS_ENOTEMPTY */
-#ifdef ENOTEMPTY
-#define APR_ENOTEMPTY ENOTEMPTY
-#else
-#define APR_ENOTEMPTY (APR_OS_START_CANONERR + 26)
-#endif
-
-/** @} */
-
-#if defined(OS2) && !defined(DOXYGEN)
-
-#define APR_FROM_OS_ERROR(e) (e == 0 ? APR_SUCCESS : e + APR_OS_START_SYSERR)
-#define APR_TO_OS_ERROR(e) (e == 0 ? APR_SUCCESS : e - APR_OS_START_SYSERR)
-
-#define INCL_DOSERRORS
-#define INCL_DOS
-
-/* Leave these undefined.
- * OS2 doesn't rely on the errno concept.
- * The API calls always return a result codes which
- * should be filtered through APR_FROM_OS_ERROR().
- *
- * #define apr_get_os_error() (APR_FROM_OS_ERROR(GetLastError()))
- * #define apr_set_os_error(e) (SetLastError(APR_TO_OS_ERROR(e)))
- */
-
-/* A special case, only socket calls require this;
- */
-#define apr_get_netos_error() (APR_FROM_OS_ERROR(errno))
-#define apr_set_netos_error(e) (errno = APR_TO_OS_ERROR(e))
-
-/* And this needs to be greped away for good:
- */
-#define APR_OS2_STATUS(e) (APR_FROM_OS_ERROR(e))
-
-/* These can't sit in a private header, so in spite of the extra size,
- * they need to be made available here.
- */
-#define SOCBASEERR 10000
-#define SOCEPERM (SOCBASEERR+1) /* Not owner */
-#define SOCESRCH (SOCBASEERR+3) /* No such process */
-#define SOCEINTR (SOCBASEERR+4) /* Interrupted system call */
-#define SOCENXIO (SOCBASEERR+6) /* No such device or address */
-#define SOCEBADF (SOCBASEERR+9) /* Bad file number */
-#define SOCEACCES (SOCBASEERR+13) /* Permission denied */
-#define SOCEFAULT (SOCBASEERR+14) /* Bad address */
-#define SOCEINVAL (SOCBASEERR+22) /* Invalid argument */
-#define SOCEMFILE (SOCBASEERR+24) /* Too many open files */
-#define SOCEPIPE (SOCBASEERR+32) /* Broken pipe */
-#define SOCEOS2ERR (SOCBASEERR+100) /* OS/2 Error */
-#define SOCEWOULDBLOCK (SOCBASEERR+35) /* Operation would block */
-#define SOCEINPROGRESS (SOCBASEERR+36) /* Operation now in progress */
-#define SOCEALREADY (SOCBASEERR+37) /* Operation already in progress */
-#define SOCENOTSOCK (SOCBASEERR+38) /* Socket operation on non-socket */
-#define SOCEDESTADDRREQ (SOCBASEERR+39) /* Destination address required */
-#define SOCEMSGSIZE (SOCBASEERR+40) /* Message too long */
-#define SOCEPROTOTYPE (SOCBASEERR+41) /* Protocol wrong type for socket */
-#define SOCENOPROTOOPT (SOCBASEERR+42) /* Protocol not available */
-#define SOCEPROTONOSUPPORT (SOCBASEERR+43) /* Protocol not supported */
-#define SOCESOCKTNOSUPPORT (SOCBASEERR+44) /* Socket type not supported */
-#define SOCEOPNOTSUPP (SOCBASEERR+45) /* Operation not supported on socket */
-#define SOCEPFNOSUPPORT (SOCBASEERR+46) /* Protocol family not supported */
-#define SOCEAFNOSUPPORT (SOCBASEERR+47) /* Address family not supported by protocol family */
-#define SOCEADDRINUSE (SOCBASEERR+48) /* Address already in use */
-#define SOCEADDRNOTAVAIL (SOCBASEERR+49) /* Can't assign requested address */
-#define SOCENETDOWN (SOCBASEERR+50) /* Network is down */
-#define SOCENETUNREACH (SOCBASEERR+51) /* Network is unreachable */
-#define SOCENETRESET (SOCBASEERR+52) /* Network dropped connection on reset */
-#define SOCECONNABORTED (SOCBASEERR+53) /* Software caused connection abort */
-#define SOCECONNRESET (SOCBASEERR+54) /* Connection reset by peer */
-#define SOCENOBUFS (SOCBASEERR+55) /* No buffer space available */
-#define SOCEISCONN (SOCBASEERR+56) /* Socket is already connected */
-#define SOCENOTCONN (SOCBASEERR+57) /* Socket is not connected */
-#define SOCESHUTDOWN (SOCBASEERR+58) /* Can't send after socket shutdown */
-#define SOCETOOMANYREFS (SOCBASEERR+59) /* Too many references: can't splice */
-#define SOCETIMEDOUT (SOCBASEERR+60) /* Connection timed out */
-#define SOCECONNREFUSED (SOCBASEERR+61) /* Connection refused */
-#define SOCELOOP (SOCBASEERR+62) /* Too many levels of symbolic links */
-#define SOCENAMETOOLONG (SOCBASEERR+63) /* File name too long */
-#define SOCEHOSTDOWN (SOCBASEERR+64) /* Host is down */
-#define SOCEHOSTUNREACH (SOCBASEERR+65) /* No route to host */
-#define SOCENOTEMPTY (SOCBASEERR+66) /* Directory not empty */
-
-/* APR CANONICAL ERROR TESTS */
-#define APR_STATUS_IS_EACCES(s) ((s) == APR_EACCES \
- || (s) == APR_OS_START_SYSERR + ERROR_ACCESS_DENIED \
- || (s) == APR_OS_START_SYSERR + ERROR_SHARING_VIOLATION)
-#define APR_STATUS_IS_EEXIST(s) ((s) == APR_EEXIST \
- || (s) == APR_OS_START_SYSERR + ERROR_OPEN_FAILED \
- || (s) == APR_OS_START_SYSERR + ERROR_FILE_EXISTS \
- || (s) == APR_OS_START_SYSERR + ERROR_ALREADY_EXISTS \
- || (s) == APR_OS_START_SYSERR + ERROR_ACCESS_DENIED)
-#define APR_STATUS_IS_ENAMETOOLONG(s) ((s) == APR_ENAMETOOLONG \
- || (s) == APR_OS_START_SYSERR + ERROR_FILENAME_EXCED_RANGE \
- || (s) == APR_OS_START_SYSERR + SOCENAMETOOLONG)
-#define APR_STATUS_IS_ENOENT(s) ((s) == APR_ENOENT \
- || (s) == APR_OS_START_SYSERR + ERROR_FILE_NOT_FOUND \
- || (s) == APR_OS_START_SYSERR + ERROR_PATH_NOT_FOUND \
- || (s) == APR_OS_START_SYSERR + ERROR_NO_MORE_FILES \
- || (s) == APR_OS_START_SYSERR + ERROR_OPEN_FAILED)
-#define APR_STATUS_IS_ENOTDIR(s) ((s) == APR_ENOTDIR)
-#define APR_STATUS_IS_ENOSPC(s) ((s) == APR_ENOSPC \
- || (s) == APR_OS_START_SYSERR + ERROR_DISK_FULL)
-#define APR_STATUS_IS_ENOMEM(s) ((s) == APR_ENOMEM)
-#define APR_STATUS_IS_EMFILE(s) ((s) == APR_EMFILE \
- || (s) == APR_OS_START_SYSERR + ERROR_TOO_MANY_OPEN_FILES)
-#define APR_STATUS_IS_ENFILE(s) ((s) == APR_ENFILE)
-#define APR_STATUS_IS_EBADF(s) ((s) == APR_EBADF \
- || (s) == APR_OS_START_SYSERR + ERROR_INVALID_HANDLE)
-#define APR_STATUS_IS_EINVAL(s) ((s) == APR_EINVAL \
- || (s) == APR_OS_START_SYSERR + ERROR_INVALID_PARAMETER \
- || (s) == APR_OS_START_SYSERR + ERROR_INVALID_FUNCTION)
-#define APR_STATUS_IS_ESPIPE(s) ((s) == APR_ESPIPE \
- || (s) == APR_OS_START_SYSERR + ERROR_NEGATIVE_SEEK)
-#define APR_STATUS_IS_EAGAIN(s) ((s) == APR_EAGAIN \
- || (s) == APR_OS_START_SYSERR + ERROR_NO_DATA \
- || (s) == APR_OS_START_SYSERR + SOCEWOULDBLOCK \
- || (s) == APR_OS_START_SYSERR + ERROR_LOCK_VIOLATION)
-#define APR_STATUS_IS_EINTR(s) ((s) == APR_EINTR \
- || (s) == APR_OS_START_SYSERR + SOCEINTR)
-#define APR_STATUS_IS_ENOTSOCK(s) ((s) == APR_ENOTSOCK \
- || (s) == APR_OS_START_SYSERR + SOCENOTSOCK)
-#define APR_STATUS_IS_ECONNREFUSED(s) ((s) == APR_ECONNREFUSED \
- || (s) == APR_OS_START_SYSERR + SOCECONNREFUSED)
-#define APR_STATUS_IS_EINPROGRESS(s) ((s) == APR_EINPROGRESS \
- || (s) == APR_OS_START_SYSERR + SOCEINPROGRESS)
-#define APR_STATUS_IS_ECONNABORTED(s) ((s) == APR_ECONNABORTED \
- || (s) == APR_OS_START_SYSERR + SOCECONNABORTED)
-#define APR_STATUS_IS_ECONNRESET(s) ((s) == APR_ECONNRESET \
- || (s) == APR_OS_START_SYSERR + SOCECONNRESET)
-/* XXX deprecated */
-#define APR_STATUS_IS_ETIMEDOUT(s) ((s) == APR_ETIMEDOUT \
- || (s) == APR_OS_START_SYSERR + SOCETIMEDOUT)
-#undef APR_STATUS_IS_TIMEUP
-#define APR_STATUS_IS_TIMEUP(s) ((s) == APR_TIMEUP \
- || (s) == APR_OS_START_SYSERR + SOCETIMEDOUT)
-#define APR_STATUS_IS_EHOSTUNREACH(s) ((s) == APR_EHOSTUNREACH \
- || (s) == APR_OS_START_SYSERR + SOCEHOSTUNREACH)
-#define APR_STATUS_IS_ENETUNREACH(s) ((s) == APR_ENETUNREACH \
- || (s) == APR_OS_START_SYSERR + SOCENETUNREACH)
-#define APR_STATUS_IS_EFTYPE(s) ((s) == APR_EFTYPE)
-#define APR_STATUS_IS_EPIPE(s) ((s) == APR_EPIPE \
- || (s) == APR_OS_START_SYSERR + ERROR_BROKEN_PIPE \
- || (s) == APR_OS_START_SYSERR + SOCEPIPE)
-#define APR_STATUS_IS_EXDEV(s) ((s) == APR_EXDEV \
- || (s) == APR_OS_START_SYSERR + ERROR_NOT_SAME_DEVICE)
-#define APR_STATUS_IS_ENOTEMPTY(s) ((s) == APR_ENOTEMPTY \
- || (s) == APR_OS_START_SYSERR + ERROR_DIR_NOT_EMPTY \
- || (s) == APR_OS_START_SYSERR + ERROR_ACCESS_DENIED)
-
-/*
- Sorry, too tired to wrap this up for OS2... feel free to
- fit the following into their best matches.
-
- { ERROR_NO_SIGNAL_SENT, ESRCH },
- { SOCEALREADY, EALREADY },
- { SOCEDESTADDRREQ, EDESTADDRREQ },
- { SOCEMSGSIZE, EMSGSIZE },
- { SOCEPROTOTYPE, EPROTOTYPE },
- { SOCENOPROTOOPT, ENOPROTOOPT },
- { SOCEPROTONOSUPPORT, EPROTONOSUPPORT },
- { SOCESOCKTNOSUPPORT, ESOCKTNOSUPPORT },
- { SOCEOPNOTSUPP, EOPNOTSUPP },
- { SOCEPFNOSUPPORT, EPFNOSUPPORT },
- { SOCEAFNOSUPPORT, EAFNOSUPPORT },
- { SOCEADDRINUSE, EADDRINUSE },
- { SOCEADDRNOTAVAIL, EADDRNOTAVAIL },
- { SOCENETDOWN, ENETDOWN },
- { SOCENETRESET, ENETRESET },
- { SOCENOBUFS, ENOBUFS },
- { SOCEISCONN, EISCONN },
- { SOCENOTCONN, ENOTCONN },
- { SOCESHUTDOWN, ESHUTDOWN },
- { SOCETOOMANYREFS, ETOOMANYREFS },
- { SOCELOOP, ELOOP },
- { SOCEHOSTDOWN, EHOSTDOWN },
- { SOCENOTEMPTY, ENOTEMPTY },
- { SOCEPIPE, EPIPE }
-*/
-
-#elif defined(WIN32) && !defined(DOXYGEN) /* !defined(OS2) */
-
-#define APR_FROM_OS_ERROR(e) (e == 0 ? APR_SUCCESS : e + APR_OS_START_SYSERR)
-#define APR_TO_OS_ERROR(e) (e == 0 ? APR_SUCCESS : e - APR_OS_START_SYSERR)
-
-#define apr_get_os_error() (APR_FROM_OS_ERROR(GetLastError()))
-#define apr_set_os_error(e) (SetLastError(APR_TO_OS_ERROR(e)))
-
-/* A special case, only socket calls require this:
- */
-#define apr_get_netos_error() (APR_FROM_OS_ERROR(WSAGetLastError()))
-#define apr_set_netos_error(e) (WSASetLastError(APR_TO_OS_ERROR(e)))
-
-/* APR CANONICAL ERROR TESTS */
-#define APR_STATUS_IS_EACCES(s) ((s) == APR_EACCES \
- || (s) == APR_OS_START_SYSERR + ERROR_ACCESS_DENIED \
- || (s) == APR_OS_START_SYSERR + ERROR_CANNOT_MAKE \
- || (s) == APR_OS_START_SYSERR + ERROR_CURRENT_DIRECTORY \
- || (s) == APR_OS_START_SYSERR + ERROR_DRIVE_LOCKED \
- || (s) == APR_OS_START_SYSERR + ERROR_FAIL_I24 \
- || (s) == APR_OS_START_SYSERR + ERROR_LOCK_VIOLATION \
- || (s) == APR_OS_START_SYSERR + ERROR_LOCK_FAILED \
- || (s) == APR_OS_START_SYSERR + ERROR_NOT_LOCKED \
- || (s) == APR_OS_START_SYSERR + ERROR_NETWORK_ACCESS_DENIED \
- || (s) == APR_OS_START_SYSERR + ERROR_SHARING_VIOLATION)
-#define APR_STATUS_IS_EEXIST(s) ((s) == APR_EEXIST \
- || (s) == APR_OS_START_SYSERR + ERROR_FILE_EXISTS \
- || (s) == APR_OS_START_SYSERR + ERROR_ALREADY_EXISTS)
-#define APR_STATUS_IS_ENAMETOOLONG(s) ((s) == APR_ENAMETOOLONG \
- || (s) == APR_OS_START_SYSERR + ERROR_FILENAME_EXCED_RANGE \
- || (s) == APR_OS_START_SYSERR + WSAENAMETOOLONG)
-#define APR_STATUS_IS_ENOENT(s) ((s) == APR_ENOENT \
- || (s) == APR_OS_START_SYSERR + ERROR_FILE_NOT_FOUND \
- || (s) == APR_OS_START_SYSERR + ERROR_PATH_NOT_FOUND \
- || (s) == APR_OS_START_SYSERR + ERROR_OPEN_FAILED \
- || (s) == APR_OS_START_SYSERR + ERROR_NO_MORE_FILES)
-#define APR_STATUS_IS_ENOTDIR(s) ((s) == APR_ENOTDIR \
- || (s) == APR_OS_START_SYSERR + ERROR_PATH_NOT_FOUND \
- || (s) == APR_OS_START_SYSERR + ERROR_BAD_NETPATH \
- || (s) == APR_OS_START_SYSERR + ERROR_BAD_NET_NAME \
- || (s) == APR_OS_START_SYSERR + ERROR_BAD_PATHNAME \
- || (s) == APR_OS_START_SYSERR + ERROR_INVALID_DRIVE)
-#define APR_STATUS_IS_ENOSPC(s) ((s) == APR_ENOSPC \
- || (s) == APR_OS_START_SYSERR + ERROR_DISK_FULL)
-#define APR_STATUS_IS_ENOMEM(s) ((s) == APR_ENOMEM \
- || (s) == APR_OS_START_SYSERR + ERROR_ARENA_TRASHED \
- || (s) == APR_OS_START_SYSERR + ERROR_NOT_ENOUGH_MEMORY \
- || (s) == APR_OS_START_SYSERR + ERROR_INVALID_BLOCK \
- || (s) == APR_OS_START_SYSERR + ERROR_NOT_ENOUGH_QUOTA \
- || (s) == APR_OS_START_SYSERR + ERROR_OUTOFMEMORY)
-#define APR_STATUS_IS_EMFILE(s) ((s) == APR_EMFILE \
- || (s) == APR_OS_START_SYSERR + ERROR_TOO_MANY_OPEN_FILES)
-#define APR_STATUS_IS_ENFILE(s) ((s) == APR_ENFILE)
-#define APR_STATUS_IS_EBADF(s) ((s) == APR_EBADF \
- || (s) == APR_OS_START_SYSERR + ERROR_INVALID_HANDLE \
- || (s) == APR_OS_START_SYSERR + ERROR_INVALID_TARGET_HANDLE)
-#define APR_STATUS_IS_EINVAL(s) ((s) == APR_EINVAL \
- || (s) == APR_OS_START_SYSERR + ERROR_INVALID_ACCESS \
- || (s) == APR_OS_START_SYSERR + ERROR_INVALID_DATA \
- || (s) == APR_OS_START_SYSERR + ERROR_INVALID_FUNCTION \
- || (s) == APR_OS_START_SYSERR + ERROR_INVALID_HANDLE \
- || (s) == APR_OS_START_SYSERR + ERROR_INVALID_PARAMETER \
- || (s) == APR_OS_START_SYSERR + ERROR_NEGATIVE_SEEK)
-#define APR_STATUS_IS_ESPIPE(s) ((s) == APR_ESPIPE \
- || (s) == APR_OS_START_SYSERR + ERROR_SEEK_ON_DEVICE \
- || (s) == APR_OS_START_SYSERR + ERROR_NEGATIVE_SEEK)
-#define APR_STATUS_IS_EAGAIN(s) ((s) == APR_EAGAIN \
- || (s) == APR_OS_START_SYSERR + ERROR_NO_DATA \
- || (s) == APR_OS_START_SYSERR + ERROR_NO_PROC_SLOTS \
- || (s) == APR_OS_START_SYSERR + ERROR_NESTING_NOT_ALLOWED \
- || (s) == APR_OS_START_SYSERR + ERROR_MAX_THRDS_REACHED \
- || (s) == APR_OS_START_SYSERR + ERROR_LOCK_VIOLATION \
- || (s) == APR_OS_START_SYSERR + WSAEWOULDBLOCK)
-#define APR_STATUS_IS_EINTR(s) ((s) == APR_EINTR \
- || (s) == APR_OS_START_SYSERR + WSAEINTR)
-#define APR_STATUS_IS_ENOTSOCK(s) ((s) == APR_ENOTSOCK \
- || (s) == APR_OS_START_SYSERR + WSAENOTSOCK)
-#define APR_STATUS_IS_ECONNREFUSED(s) ((s) == APR_ECONNREFUSED \
- || (s) == APR_OS_START_SYSERR + WSAECONNREFUSED)
-#define APR_STATUS_IS_EINPROGRESS(s) ((s) == APR_EINPROGRESS \
- || (s) == APR_OS_START_SYSERR + WSAEINPROGRESS)
-#define APR_STATUS_IS_ECONNABORTED(s) ((s) == APR_ECONNABORTED \
- || (s) == APR_OS_START_SYSERR + WSAECONNABORTED)
-#define APR_STATUS_IS_ECONNRESET(s) ((s) == APR_ECONNRESET \
- || (s) == APR_OS_START_SYSERR + ERROR_NETNAME_DELETED \
- || (s) == APR_OS_START_SYSERR + WSAECONNRESET)
-/* XXX deprecated */
-#define APR_STATUS_IS_ETIMEDOUT(s) ((s) == APR_ETIMEDOUT \
- || (s) == APR_OS_START_SYSERR + WSAETIMEDOUT \
- || (s) == APR_OS_START_SYSERR + WAIT_TIMEOUT)
-#undef APR_STATUS_IS_TIMEUP
-#define APR_STATUS_IS_TIMEUP(s) ((s) == APR_TIMEUP \
- || (s) == APR_OS_START_SYSERR + WSAETIMEDOUT \
- || (s) == APR_OS_START_SYSERR + WAIT_TIMEOUT)
-#define APR_STATUS_IS_EHOSTUNREACH(s) ((s) == APR_EHOSTUNREACH \
- || (s) == APR_OS_START_SYSERR + WSAEHOSTUNREACH)
-#define APR_STATUS_IS_ENETUNREACH(s) ((s) == APR_ENETUNREACH \
- || (s) == APR_OS_START_SYSERR + WSAENETUNREACH)
-#define APR_STATUS_IS_EFTYPE(s) ((s) == APR_EFTYPE \
- || (s) == APR_OS_START_SYSERR + ERROR_EXE_MACHINE_TYPE_MISMATCH \
- || (s) == APR_OS_START_SYSERR + ERROR_INVALID_DLL \
- || (s) == APR_OS_START_SYSERR + ERROR_INVALID_MODULETYPE \
- || (s) == APR_OS_START_SYSERR + ERROR_BAD_EXE_FORMAT \
- || (s) == APR_OS_START_SYSERR + ERROR_INVALID_EXE_SIGNATURE \
- || (s) == APR_OS_START_SYSERR + ERROR_FILE_CORRUPT \
- || (s) == APR_OS_START_SYSERR + ERROR_BAD_FORMAT)
-#define APR_STATUS_IS_EPIPE(s) ((s) == APR_EPIPE \
- || (s) == APR_OS_START_SYSERR + ERROR_BROKEN_PIPE)
-#define APR_STATUS_IS_EXDEV(s) ((s) == APR_EXDEV \
- || (s) == APR_OS_START_SYSERR + ERROR_NOT_SAME_DEVICE)
-#define APR_STATUS_IS_ENOTEMPTY(s) ((s) == APR_ENOTEMPTY \
- || (s) == APR_OS_START_SYSERR + ERROR_DIR_NOT_EMPTY)
-
-#elif defined(NETWARE) && defined(USE_WINSOCK) && !defined(DOXYGEN) /* !defined(OS2) && !defined(WIN32) */
-
-#define APR_FROM_OS_ERROR(e) (e == 0 ? APR_SUCCESS : e + APR_OS_START_SYSERR)
-#define APR_TO_OS_ERROR(e) (e == 0 ? APR_SUCCESS : e - APR_OS_START_SYSERR)
-
-#define apr_get_os_error() (errno)
-#define apr_set_os_error(e) (errno = (e))
-
-/* A special case, only socket calls require this: */
-#define apr_get_netos_error() (APR_FROM_OS_ERROR(WSAGetLastError()))
-#define apr_set_netos_error(e) (WSASetLastError(APR_TO_OS_ERROR(e)))
-
-/* APR CANONICAL ERROR TESTS */
-#define APR_STATUS_IS_EACCES(s) ((s) == APR_EACCES)
-#define APR_STATUS_IS_EEXIST(s) ((s) == APR_EEXIST)
-#define APR_STATUS_IS_ENAMETOOLONG(s) ((s) == APR_ENAMETOOLONG)
-#define APR_STATUS_IS_ENOENT(s) ((s) == APR_ENOENT)
-#define APR_STATUS_IS_ENOTDIR(s) ((s) == APR_ENOTDIR)
-#define APR_STATUS_IS_ENOSPC(s) ((s) == APR_ENOSPC)
-#define APR_STATUS_IS_ENOMEM(s) ((s) == APR_ENOMEM)
-#define APR_STATUS_IS_EMFILE(s) ((s) == APR_EMFILE)
-#define APR_STATUS_IS_ENFILE(s) ((s) == APR_ENFILE)
-#define APR_STATUS_IS_EBADF(s) ((s) == APR_EBADF)
-#define APR_STATUS_IS_EINVAL(s) ((s) == APR_EINVAL)
-#define APR_STATUS_IS_ESPIPE(s) ((s) == APR_ESPIPE)
-
-#define APR_STATUS_IS_EAGAIN(s) ((s) == APR_EAGAIN \
- || (s) == EWOULDBLOCK \
- || (s) == APR_OS_START_SYSERR + WSAEWOULDBLOCK)
-#define APR_STATUS_IS_EINTR(s) ((s) == APR_EINTR \
- || (s) == APR_OS_START_SYSERR + WSAEINTR)
-#define APR_STATUS_IS_ENOTSOCK(s) ((s) == APR_ENOTSOCK \
- || (s) == APR_OS_START_SYSERR + WSAENOTSOCK)
-#define APR_STATUS_IS_ECONNREFUSED(s) ((s) == APR_ECONNREFUSED \
- || (s) == APR_OS_START_SYSERR + WSAECONNREFUSED)
-#define APR_STATUS_IS_EINPROGRESS(s) ((s) == APR_EINPROGRESS \
- || (s) == APR_OS_START_SYSERR + WSAEINPROGRESS)
-#define APR_STATUS_IS_ECONNABORTED(s) ((s) == APR_ECONNABORTED \
- || (s) == APR_OS_START_SYSERR + WSAECONNABORTED)
-#define APR_STATUS_IS_ECONNRESET(s) ((s) == APR_ECONNRESET \
- || (s) == APR_OS_START_SYSERR + WSAECONNRESET)
-/* XXX deprecated */
-#define APR_STATUS_IS_ETIMEDOUT(s) ((s) == APR_ETIMEDOUT \
- || (s) == APR_OS_START_SYSERR + WSAETIMEDOUT \
- || (s) == APR_OS_START_SYSERR + WAIT_TIMEOUT)
-#undef APR_STATUS_IS_TIMEUP
-#define APR_STATUS_IS_TIMEUP(s) ((s) == APR_TIMEUP \
- || (s) == APR_OS_START_SYSERR + WSAETIMEDOUT \
- || (s) == APR_OS_START_SYSERR + WAIT_TIMEOUT)
-#define APR_STATUS_IS_EHOSTUNREACH(s) ((s) == APR_EHOSTUNREACH \
- || (s) == APR_OS_START_SYSERR + WSAEHOSTUNREACH)
-#define APR_STATUS_IS_ENETUNREACH(s) ((s) == APR_ENETUNREACH \
- || (s) == APR_OS_START_SYSERR + WSAENETUNREACH)
-#define APR_STATUS_IS_ENETDOWN(s) ((s) == APR_OS_START_SYSERR + WSAENETDOWN)
-#define APR_STATUS_IS_EFTYPE(s) ((s) == APR_EFTYPE)
-#define APR_STATUS_IS_EPIPE(s) ((s) == APR_EPIPE)
-#define APR_STATUS_IS_EXDEV(s) ((s) == APR_EXDEV)
-#define APR_STATUS_IS_ENOTEMPTY(s) ((s) == APR_ENOTEMPTY)
-
-#else /* !defined(NETWARE) && !defined(OS2) && !defined(WIN32) */
-
-/*
- * os error codes are clib error codes
- */
-#define APR_FROM_OS_ERROR(e) (e)
-#define APR_TO_OS_ERROR(e) (e)
-
-#define apr_get_os_error() (errno)
-#define apr_set_os_error(e) (errno = (e))
-
-/* A special case, only socket calls require this:
- */
-#define apr_get_netos_error() (errno)
-#define apr_set_netos_error(e) (errno = (e))
-
-/**
- * @addtogroup APR_STATUS_IS
- * @{
- */
-
-/** permission denied */
-#define APR_STATUS_IS_EACCES(s) ((s) == APR_EACCES)
-/** file exists */
-#define APR_STATUS_IS_EEXIST(s) ((s) == APR_EEXIST)
-/** path name is too long */
-#define APR_STATUS_IS_ENAMETOOLONG(s) ((s) == APR_ENAMETOOLONG)
-/**
- * no such file or directory
- * @remark
- * EMVSCATLG can be returned by the automounter on z/OS for
- * paths which do not exist.
- */
-#ifdef EMVSCATLG
-#define APR_STATUS_IS_ENOENT(s) ((s) == APR_ENOENT \
- || (s) == EMVSCATLG)
-#else
-#define APR_STATUS_IS_ENOENT(s) ((s) == APR_ENOENT)
-#endif
-/** not a directory */
-#define APR_STATUS_IS_ENOTDIR(s) ((s) == APR_ENOTDIR)
-/** no space left on device */
-#ifdef EDQUOT
-#define APR_STATUS_IS_ENOSPC(s) ((s) == APR_ENOSPC \
- || (s) == EDQUOT)
-#else
-#define APR_STATUS_IS_ENOSPC(s) ((s) == APR_ENOSPC)
-#endif
-/** not enough memory */
-#define APR_STATUS_IS_ENOMEM(s) ((s) == APR_ENOMEM)
-/** too many open files */
-#define APR_STATUS_IS_EMFILE(s) ((s) == APR_EMFILE)
-/** file table overflow */
-#define APR_STATUS_IS_ENFILE(s) ((s) == APR_ENFILE)
-/** bad file # */
-#define APR_STATUS_IS_EBADF(s) ((s) == APR_EBADF)
-/** invalid argument */
-#define APR_STATUS_IS_EINVAL(s) ((s) == APR_EINVAL)
-/** illegal seek */
-#define APR_STATUS_IS_ESPIPE(s) ((s) == APR_ESPIPE)
-
-/** operation would block */
-#if !defined(EWOULDBLOCK) || !defined(EAGAIN)
-#define APR_STATUS_IS_EAGAIN(s) ((s) == APR_EAGAIN)
-#elif (EWOULDBLOCK == EAGAIN)
-#define APR_STATUS_IS_EAGAIN(s) ((s) == APR_EAGAIN)
-#else
-#define APR_STATUS_IS_EAGAIN(s) ((s) == APR_EAGAIN \
- || (s) == EWOULDBLOCK)
-#endif
-
-/** interrupted system call */
-#define APR_STATUS_IS_EINTR(s) ((s) == APR_EINTR)
-/** socket operation on a non-socket */
-#define APR_STATUS_IS_ENOTSOCK(s) ((s) == APR_ENOTSOCK)
-/** Connection Refused */
-#define APR_STATUS_IS_ECONNREFUSED(s) ((s) == APR_ECONNREFUSED)
-/** operation now in progress */
-#define APR_STATUS_IS_EINPROGRESS(s) ((s) == APR_EINPROGRESS)
-
-/**
- * Software caused connection abort
- * @remark
- * EPROTO on certain older kernels really means ECONNABORTED, so we need to
- * ignore it for them. See discussion in new-httpd archives nh.9701 & nh.9603
- *
- * There is potentially a bug in Solaris 2.x x<6, and other boxes that
- * implement tcp sockets in userland (i.e. on top of STREAMS). On these
- * systems, EPROTO can actually result in a fatal loop. See PR#981 for
- * example. It's hard to handle both uses of EPROTO.
- */
-#ifdef EPROTO
-#define APR_STATUS_IS_ECONNABORTED(s) ((s) == APR_ECONNABORTED \
- || (s) == EPROTO)
-#else
-#define APR_STATUS_IS_ECONNABORTED(s) ((s) == APR_ECONNABORTED)
-#endif
-
-/** Connection Reset by peer */
-#define APR_STATUS_IS_ECONNRESET(s) ((s) == APR_ECONNRESET)
-/** Operation timed out
- * @deprecated */
-#define APR_STATUS_IS_ETIMEDOUT(s) ((s) == APR_ETIMEDOUT)
-/** no route to host */
-#define APR_STATUS_IS_EHOSTUNREACH(s) ((s) == APR_EHOSTUNREACH)
-/** network is unreachable */
-#define APR_STATUS_IS_ENETUNREACH(s) ((s) == APR_ENETUNREACH)
-/** inappropriate file type or format */
-#define APR_STATUS_IS_EFTYPE(s) ((s) == APR_EFTYPE)
-/** broken pipe */
-#define APR_STATUS_IS_EPIPE(s) ((s) == APR_EPIPE)
-/** cross device link */
-#define APR_STATUS_IS_EXDEV(s) ((s) == APR_EXDEV)
-/** Directory Not Empty */
-#define APR_STATUS_IS_ENOTEMPTY(s) ((s) == APR_ENOTEMPTY || \
- (s) == APR_EEXIST)
-/** @} */
-
-#endif /* !defined(NETWARE) && !defined(OS2) && !defined(WIN32) */
-
-/** @} */
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* ! APR_ERRNO_H */
-/*
- * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
- *
- * @APPLE_LICENSE_HEADER_START@
- *
- * This file contains Original Code and/or Modifications of Original Code
- * as defined in and that are subject to the Apple Public Source License
- * Version 2.0 (the 'License'). You may not use this file except in
- * compliance with the License. Please obtain a copy of the License at
- * http://www.opensource.apple.com/apsl/ and read it before using this
- * file.
- *
- * The Original Code and all software distributed under the License are
- * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
- * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
- * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
- * Please see the License for the specific language governing rights and
- * limitations under the License.
- *
- * @APPLE_LICENSE_HEADER_END@
- */
-#include <sys/errno.h>
-
-
-/*
- * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved.
- *
- * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
- *
- * This file contains Original Code and/or Modifications of Original Code
- * as defined in and that are subject to the Apple Public Source License
- * Version 2.0 (the 'License'). You may not use this file except in
- * compliance with the License. The rights granted to you under the License
- * may not be used to create, or enable the creation or redistribution of,
- * unlawful or unlicensed copies of an Apple operating system, or to
- * circumvent, violate, or enable the circumvention or violation of, any
- * terms of an Apple operating system software license agreement.
- *
- * Please obtain a copy of the License at
- * http://www.opensource.apple.com/apsl/ and read it before using this file.
- *
- * The Original Code and all software distributed under the License are
- * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
- * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
- * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
- * Please see the License for the specific language governing rights and
- * limitations under the License.
- *
- * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
- */
-/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
-/*
- * Copyright (c) 1982, 1986, 1989, 1993
- * The Regents of the University of California. All rights reserved.
- * (c) UNIX System Laboratories, Inc.
- * All or some portions of this file are derived from material licensed
- * to the University of California by American Telephone and Telegraph
- * Co. or Unix System Laboratories, Inc. and are reproduced herein with
- * the permission of UNIX System Laboratories, Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. All advertising materials mentioning features or use of this software
- * must display the following acknowledgement:
- * This product includes software developed by the University of
- * California, Berkeley and its contributors.
- * 4. Neither the name of the University nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- * @(#)errno.h 8.5 (Berkeley) 1/21/94
- */
-
-#ifndef _SYS_ERRNO_H_
-#define _SYS_ERRNO_H_
-
-#include <sys/cdefs.h>
-__BEGIN_DECLS
-extern int * __error(void);
-#define errno (*__error())
-__END_DECLS
-
-/*
- * Error codes
- */
-
-#define EPERM 1 /* Operation not permitted */
-#define ENOENT 2 /* No such file or directory */
-#define ESRCH 3 /* No such process */
-#define EINTR 4 /* Interrupted system call */
-#define EIO 5 /* Input/output error */
-#define ENXIO 6 /* Device not configured */
-#define E2BIG 7 /* Argument list too long */
-#define ENOEXEC 8 /* Exec format error */
-#define EBADF 9 /* Bad file descriptor */
-#define ECHILD 10 /* No child processes */
-#define EDEADLK 11 /* Resource deadlock avoided */
- /* 11 was EAGAIN */
-#define ENOMEM 12 /* Cannot allocate memory */
-#define EACCES 13 /* Permission denied */
-#define EFAULT 14 /* Bad address */
-#if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE)
-#define ENOTBLK 15 /* Block device required */
-#endif
-#define EBUSY 16 /* Device / Resource busy */
-#define EEXIST 17 /* File exists */
-#define EXDEV 18 /* Cross-device link */
-#define ENODEV 19 /* Operation not supported by device */
-#define ENOTDIR 20 /* Not a directory */
-#define EISDIR 21 /* Is a directory */
-#define EINVAL 22 /* Invalid argument */
-#define ENFILE 23 /* Too many open files in system */
-#define EMFILE 24 /* Too many open files */
-#define ENOTTY 25 /* Inappropriate ioctl for device */
-#define ETXTBSY 26 /* Text file busy */
-#define EFBIG 27 /* File too large */
-#define ENOSPC 28 /* No space left on device */
-#define ESPIPE 29 /* Illegal seek */
-#define EROFS 30 /* Read-only file system */
-#define EMLINK 31 /* Too many links */
-#define EPIPE 32 /* Broken pipe */
-
-/* math software */
-#define EDOM 33 /* Numerical argument out of domain */
-#define ERANGE 34 /* Result too large */
-
-/* non-blocking and interrupt i/o */
-#define EAGAIN 35 /* Resource temporarily unavailable */
-#define EWOULDBLOCK EAGAIN /* Operation would block */
-#define EINPROGRESS 36 /* Operation now in progress */
-#define EALREADY 37 /* Operation already in progress */
-
-/* ipc/network software -- argument errors */
-#define ENOTSOCK 38 /* Socket operation on non-socket */
-#define EDESTADDRREQ 39 /* Destination address required */
-#define EMSGSIZE 40 /* Message too long */
-#define EPROTOTYPE 41 /* Protocol wrong type for socket */
-#define ENOPROTOOPT 42 /* Protocol not available */
-#define EPROTONOSUPPORT 43 /* Protocol not supported */
-#if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE)
-#define ESOCKTNOSUPPORT 44 /* Socket type not supported */
-#endif /* (!_POSIX_C_SOURCE || _DARWIN_C_SOURCE) */
-#define ENOTSUP 45 /* Operation not supported */
-#if !__DARWIN_UNIX03 && !defined(KERNEL)
-/*
- * This is the same for binary and source copmpatability, unless compiling
- * the kernel itself, or compiling __DARWIN_UNIX03; if compiling for the
- * kernel, the correct value will be returned. If compiling non-POSIX
- * source, the kernel return value will be converted by a stub in libc, and
- * if compiling source with __DARWIN_UNIX03, the conversion in libc is not
- * done, and the caller gets the expected (discrete) value.
- */
-#define EOPNOTSUPP ENOTSUP /* Operation not supported on socket */
-#endif /* !__DARWIN_UNIX03 && !KERNEL */
-
-#if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE)
-#define EPFNOSUPPORT 46 /* Protocol family not supported */
-#endif /* (_POSIX_C_SOURCE && !_DARWIN_C_SOURCE) */
-#define EAFNOSUPPORT 47 /* Address family not supported by protocol family */
-#define EADDRINUSE 48 /* Address already in use */
-#define EADDRNOTAVAIL 49 /* Can't assign requested address */
-
-/* ipc/network software -- operational errors */
-#define ENETDOWN 50 /* Network is down */
-#define ENETUNREACH 51 /* Network is unreachable */
-#define ENETRESET 52 /* Network dropped connection on reset */
-#define ECONNABORTED 53 /* Software caused connection abort */
-#define ECONNRESET 54 /* Connection reset by peer */
-#define ENOBUFS 55 /* No buffer space available */
-#define EISCONN 56 /* Socket is already connected */
-#define ENOTCONN 57 /* Socket is not connected */
-#if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE)
-#define ESHUTDOWN 58 /* Can't send after socket shutdown */
-#define ETOOMANYREFS 59 /* Too many references: can't splice */
-#endif /* (_POSIX_C_SOURCE && !_DARWIN_C_SOURCE) */
-#define ETIMEDOUT 60 /* Operation timed out */
-#define ECONNREFUSED 61 /* Connection refused */
-
-#define ELOOP 62 /* Too many levels of symbolic links */
-#define ENAMETOOLONG 63 /* File name too long */
-
-/* should be rearranged */
-#if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE)
-#define EHOSTDOWN 64 /* Host is down */
-#endif /* (_POSIX_C_SOURCE && !_DARWIN_C_SOURCE) */
-#define EHOSTUNREACH 65 /* No route to host */
-#define ENOTEMPTY 66 /* Directory not empty */
-
-/* quotas & mush */
-#if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE)
-#define EPROCLIM 67 /* Too many processes */
-#define EUSERS 68 /* Too many users */
-#endif /* (_POSIX_C_SOURCE && !_DARWIN_C_SOURCE) */
-#define EDQUOT 69 /* Disc quota exceeded */
-
-/* Network File System */
-#define ESTALE 70 /* Stale NFS file handle */
-#if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE)
-#define EREMOTE 71 /* Too many levels of remote in path */
-#define EBADRPC 72 /* RPC struct is bad */
-#define ERPCMISMATCH 73 /* RPC version wrong */
-#define EPROGUNAVAIL 74 /* RPC prog. not avail */
-#define EPROGMISMATCH 75 /* Program version wrong */
-#define EPROCUNAVAIL 76 /* Bad procedure for program */
-#endif /* (_POSIX_C_SOURCE && !_DARWIN_C_SOURCE) */
-
-#define ENOLCK 77 /* No locks available */
-#define ENOSYS 78 /* Function not implemented */
-
-#if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE)
-#define EFTYPE 79 /* Inappropriate file type or format */
-#define EAUTH 80 /* Authentication error */
-#define ENEEDAUTH 81 /* Need authenticator */
-
-/* Intelligent device errors */
-#define EPWROFF 82 /* Device power is off */
-#define EDEVERR 83 /* Device error, e.g. paper out */
-#endif /* (_POSIX_C_SOURCE && !_DARWIN_C_SOURCE) */
-
-#define EOVERFLOW 84 /* Value too large to be stored in data type */
-
-/* Program loading errors */
-#if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE)
-#define EBADEXEC 85 /* Bad executable */
-#define EBADARCH 86 /* Bad CPU type in executable */
-#define ESHLIBVERS 87 /* Shared library version mismatch */
-#define EBADMACHO 88 /* Malformed Macho file */
-#endif /* (_POSIX_C_SOURCE && !_DARWIN_C_SOURCE) */
-
-#define ECANCELED 89 /* Operation canceled */
-
-#define EIDRM 90 /* Identifier removed */
-#define ENOMSG 91 /* No message of desired type */
-#define EILSEQ 92 /* Illegal byte sequence */
-#if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE)
-#define ENOATTR 93 /* Attribute not found */
-#endif /* (_POSIX_C_SOURCE && !_DARWIN_C_SOURCE) */
-
-#define EBADMSG 94 /* Bad message */
-#define EMULTIHOP 95 /* Reserved */
-#define ENODATA 96 /* No message available on STREAM */
-#define ENOLINK 97 /* Reserved */
-#define ENOSR 98 /* No STREAM resources */
-#define ENOSTR 99 /* Not a STREAM */
-#define EPROTO 100 /* Protocol error */
-#define ETIME 101 /* STREAM ioctl timeout */
-
-#if __DARWIN_UNIX03 || defined(KERNEL)
-/* This value is only discrete when compiling __DARWIN_UNIX03, or KERNEL */
-#define EOPNOTSUPP 102 /* Operation not supported on socket */
-#endif /* __DARWIN_UNIX03 || KERNEL */
-
-#define ENOPOLICY 103 /* No such policy registered */
-
-#if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE)
-#define ELAST 103 /* Must be equal largest errno */
-#endif /* (_POSIX_C_SOURCE && !_DARWIN_C_SOURCE) */
-
-#endif /* _SYS_ERRNO_H_ */
diff --git a/doc/legacy/errno.list.solaris.txt b/doc/legacy/errno.list.solaris.txt
deleted file mode 100644
index 23601e9d374..00000000000
--- a/doc/legacy/errno.list.solaris.txt
+++ /dev/null
@@ -1,206 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License"). You may not use this file except in compliance
- * with the License.
- *
- * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
- * or http://www.opensolaris.org/os/licensing.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information: Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- */
-/*
- * Copyright 2000 Sun Microsystems, Inc. All rights reserved.
- * Use is subject to license terms.
- */
-
-/* Copyright (c) 1983, 1984, 1985, 1986, 1987, 1988, 1989 AT&T */
-/* All Rights Reserved */
-
-/*
- * University Copyright- Copyright (c) 1982, 1986, 1988
- * The Regents of the University of California
- * All Rights Reserved
- *
- * University Acknowledgment- Portions of this document are derived from
- * software developed by the University of California, Berkeley, and its
- * contributors.
- */
-
-#ifndef _SYS_ERRNO_H
-#define _SYS_ERRNO_H
-
-#pragma ident "@(#)errno.h 1.22 05/06/08 SMI"
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/*
- * Error codes
- */
-
-#define EPERM 1 /* Not super-user */
-#define ENOENT 2 /* No such file or directory */
-#define ESRCH 3 /* No such process */
-#define EINTR 4 /* interrupted system call */
-#define EIO 5 /* I/O error */
-#define ENXIO 6 /* No such device or address */
-#define E2BIG 7 /* Arg list too long */
-#define ENOEXEC 8 /* Exec format error */
-#define EBADF 9 /* Bad file number */
-#define ECHILD 10 /* No children */
-#define EAGAIN 11 /* Resource temporarily unavailable */
-#define ENOMEM 12 /* Not enough core */
-#define EACCES 13 /* Permission denied */
-#define EFAULT 14 /* Bad address */
-#define ENOTBLK 15 /* Block device required */
-#define EBUSY 16 /* Mount device busy */
-#define EEXIST 17 /* File exists */
-#define EXDEV 18 /* Cross-device link */
-#define ENODEV 19 /* No such device */
-#define ENOTDIR 20 /* Not a directory */
-#define EISDIR 21 /* Is a directory */
-#define EINVAL 22 /* Invalid argument */
-#define ENFILE 23 /* File table overflow */
-#define EMFILE 24 /* Too many open files */
-#define ENOTTY 25 /* Inappropriate ioctl for device */
-#define ETXTBSY 26 /* Text file busy */
-#define EFBIG 27 /* File too large */
-#define ENOSPC 28 /* No space left on device */
-#define ESPIPE 29 /* Illegal seek */
-#define EROFS 30 /* Read only file system */
-#define EMLINK 31 /* Too many links */
-#define EPIPE 32 /* Broken pipe */
-#define EDOM 33 /* Math arg out of domain of func */
-#define ERANGE 34 /* Math result not representable */
-#define ENOMSG 35 /* No message of desired type */
-#define EIDRM 36 /* Identifier removed */
-#define ECHRNG 37 /* Channel number out of range */
-#define EL2NSYNC 38 /* Level 2 not synchronized */
-#define EL3HLT 39 /* Level 3 halted */
-#define EL3RST 40 /* Level 3 reset */
-#define ELNRNG 41 /* Link number out of range */
-#define EUNATCH 42 /* Protocol driver not attached */
-#define ENOCSI 43 /* No CSI structure available */
-#define EL2HLT 44 /* Level 2 halted */
-#define EDEADLK 45 /* Deadlock condition. */
-#define ENOLCK 46 /* No record locks available. */
-#define ECANCELED 47 /* Operation canceled */
-#define ENOTSUP 48 /* Operation not supported */
-
-/* Filesystem Quotas */
-#define EDQUOT 49 /* Disc quota exceeded */
-
-/* Convergent Error Returns */
-#define EBADE 50 /* invalid exchange */
-#define EBADR 51 /* invalid request descriptor */
-#define EXFULL 52 /* exchange full */
-#define ENOANO 53 /* no anode */
-#define EBADRQC 54 /* invalid request code */
-#define EBADSLT 55 /* invalid slot */
-#define EDEADLOCK 56 /* file locking deadlock error */
-
-#define EBFONT 57 /* bad font file fmt */
-
-/* Interprocess Robust Locks */
-#define EOWNERDEAD 58 /* process died with the lock */
-#define ENOTRECOVERABLE 59 /* lock is not recoverable */
-
-/* stream problems */
-#define ENOSTR 60 /* Device not a stream */
-#define ENODATA 61 /* no data (for no delay io) */
-#define ETIME 62 /* timer expired */
-#define ENOSR 63 /* out of streams resources */
-
-#define ENONET 64 /* Machine is not on the network */
-#define ENOPKG 65 /* Package not installed */
-#define EREMOTE 66 /* The object is remote */
-#define ENOLINK 67 /* the link has been severed */
-#define EADV 68 /* advertise error */
-#define ESRMNT 69 /* srmount error */
-
-#define ECOMM 70 /* Communication error on send */
-#define EPROTO 71 /* Protocol error */
-
-/* Interprocess Robust Locks */
-#define ELOCKUNMAPPED 72 /* locked lock was unmapped */
-
-#define ENOTACTIVE 73 /* Facility is not active */
-#define EMULTIHOP 74 /* multihop attempted */
-#define EBADMSG 77 /* trying to read unreadable message */
-#define ENAMETOOLONG 78 /* path name is too long */
-#define EOVERFLOW 79 /* value too large to be stored in data type */
-#define ENOTUNIQ 80 /* given log. name not unique */
-#define EBADFD 81 /* f.d. invalid for this operation */
-#define EREMCHG 82 /* Remote address changed */
-
-/* shared library problems */
-#define ELIBACC 83 /* Can't access a needed shared lib. */
-#define ELIBBAD 84 /* Accessing a corrupted shared lib. */
-#define ELIBSCN 85 /* .lib section in a.out corrupted. */
-#define ELIBMAX 86 /* Attempting to link in too many libs. */
-#define ELIBEXEC 87 /* Attempting to exec a shared library. */
-#define EILSEQ 88 /* Illegal byte sequence. */
-#define ENOSYS 89 /* Unsupported file system operation */
-#define ELOOP 90 /* Symbolic link loop */
-#define ERESTART 91 /* Restartable system call */
-#define ESTRPIPE 92 /* if pipe/FIFO, don't sleep in stream head */
-#define ENOTEMPTY 93 /* directory not empty */
-#define EUSERS 94 /* Too many users (for UFS) */
-
-/* BSD Networking Software */
- /* argument errors */
-#define ENOTSOCK 95 /* Socket operation on non-socket */
-#define EDESTADDRREQ 96 /* Destination address required */
-#define EMSGSIZE 97 /* Message too long */
-#define EPROTOTYPE 98 /* Protocol wrong type for socket */
-#define ENOPROTOOPT 99 /* Protocol not available */
-#define EPROTONOSUPPORT 120 /* Protocol not supported */
-#define ESOCKTNOSUPPORT 121 /* Socket type not supported */
-#define EOPNOTSUPP 122 /* Operation not supported on socket */
-#define EPFNOSUPPORT 123 /* Protocol family not supported */
-#define EAFNOSUPPORT 124 /* Address family not supported by */
- /* protocol family */
-#define EADDRINUSE 125 /* Address already in use */
-#define EADDRNOTAVAIL 126 /* Can't assign requested address */
- /* operational errors */
-#define ENETDOWN 127 /* Network is down */
-#define ENETUNREACH 128 /* Network is unreachable */
-#define ENETRESET 129 /* Network dropped connection because */
- /* of reset */
-#define ECONNABORTED 130 /* Software caused connection abort */
-#define ECONNRESET 131 /* Connection reset by peer */
-#define ENOBUFS 132 /* No buffer space available */
-#define EISCONN 133 /* Socket is already connected */
-#define ENOTCONN 134 /* Socket is not connected */
-/* XENIX has 135 - 142 */
-#define ESHUTDOWN 143 /* Can't send after socket shutdown */
-#define ETOOMANYREFS 144 /* Too many references: can't splice */
-#define ETIMEDOUT 145 /* Connection timed out */
-#define ECONNREFUSED 146 /* Connection refused */
-#define EHOSTDOWN 147 /* Host is down */
-#define EHOSTUNREACH 148 /* No route to host */
-#define EWOULDBLOCK EAGAIN
-#define EALREADY 149 /* operation already in progress */
-#define EINPROGRESS 150 /* operation now in progress */
-
-/* SUN Network File System */
-#define ESTALE 151 /* Stale NFS file handle */
-
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _SYS_ERRNO_H */
diff --git a/doc/legacy/fdl.texi b/doc/legacy/fdl.texi
deleted file mode 100644
index e33c687cdfb..00000000000
--- a/doc/legacy/fdl.texi
+++ /dev/null
@@ -1,454 +0,0 @@
-
-@c @node GNU Free Documentation License
-@c @appendixsec GNU Free Documentation License
-
-@cindex FDL, GNU Free Documentation License
-@center Version 1.2, November 2002
-
-@display
-Copyright @copyright{} 2000,2001,2002 Free Software Foundation, Inc.
-59 Temple Place, Suite 330, Boston, MA 02111-1307, USA
-
-Everyone is permitted to copy and distribute verbatim copies
-of this license document, but changing it is not allowed.
-@end display
-
-@enumerate 0
-@item
-PREAMBLE
-
-The purpose of this License is to make a manual, textbook, or other
-functional and useful document @dfn{free} in the sense of freedom: to
-assure everyone the effective freedom to copy and redistribute it,
-with or without modifying it, either commercially or noncommercially.
-Secondarily, this License preserves for the author and publisher a way
-to get credit for their work, while not being considered responsible
-for modifications made by others.
-
-This License is a kind of ``copyleft'', which means that derivative
-works of the document must themselves be free in the same sense. It
-complements the GNU General Public License, which is a copyleft
-license designed for free software.
-
-We have designed this License in order to use it for manuals for free
-software, because free software needs free documentation: a free
-program should come with manuals providing the same freedoms that the
-software does. But this License is not limited to software manuals;
-it can be used for any textual work, regardless of subject matter or
-whether it is published as a printed book. We recommend this License
-principally for works whose purpose is instruction or reference.
-
-@item
-APPLICABILITY AND DEFINITIONS
-
-This License applies to any manual or other work, in any medium, that
-contains a notice placed by the copyright holder saying it can be
-distributed under the terms of this License. Such a notice grants a
-world-wide, royalty-free license, unlimited in duration, to use that
-work under the conditions stated herein. The ``Document'', below,
-refers to any such manual or work. Any member of the public is a
-licensee, and is addressed as ``you''. You accept the license if you
-copy, modify or distribute the work in a way requiring permission
-under copyright law.
-
-A ``Modified Version'' of the Document means any work containing the
-Document or a portion of it, either copied verbatim, or with
-modifications and/or translated into another language.
-
-A ``Secondary Section'' is a named appendix or a front-matter section
-of the Document that deals exclusively with the relationship of the
-publishers or authors of the Document to the Document's overall
-subject (or to related matters) and contains nothing that could fall
-directly within that overall subject. (Thus, if the Document is in
-part a textbook of mathematics, a Secondary Section may not explain
-any mathematics.) The relationship could be a matter of historical
-connection with the subject or with related matters, or of legal,
-commercial, philosophical, ethical or political position regarding
-them.
-
-The ``Invariant Sections'' are certain Secondary Sections whose titles
-are designated, as being those of Invariant Sections, in the notice
-that says that the Document is released under this License. If a
-section does not fit the above definition of Secondary then it is not
-allowed to be designated as Invariant. The Document may contain zero
-Invariant Sections. If the Document does not identify any Invariant
-Sections then there are none.
-
-The ``Cover Texts'' are certain short passages of text that are listed,
-as Front-Cover Texts or Back-Cover Texts, in the notice that says that
-the Document is released under this License. A Front-Cover Text may
-be at most 5 words, and a Back-Cover Text may be at most 25 words.
-
-A ``Transparent'' copy of the Document means a machine-readable copy,
-represented in a format whose specification is available to the
-general public, that is suitable for revising the document
-straightforwardly with generic text editors or (for images composed of
-pixels) generic paint programs or (for drawings) some widely available
-drawing editor, and that is suitable for input to text formatters or
-for automatic translation to a variety of formats suitable for input
-to text formatters. A copy made in an otherwise Transparent file
-format whose markup, or absence of markup, has been arranged to thwart
-or discourage subsequent modification by readers is not Transparent.
-An image format is not Transparent if used for any substantial amount
-of text. A copy that is not ``Transparent'' is called ``Opaque''.
-
-Examples of suitable formats for Transparent copies include plain
-@sc{ascii} without markup, Texinfo input format, La@TeX{} input
-format, @acronym{SGML} or @acronym{XML} using a publicly available
-@acronym{DTD}, and standard-conforming simple @acronym{HTML},
-PostScript or @acronym{PDF} designed for human modification. Examples
-of transparent image formats include @acronym{PNG}, @acronym{XCF} and
-@acronym{JPG}. Opaque formats include proprietary formats that can be
-read and edited only by proprietary word processors, @acronym{SGML} or
-@acronym{XML} for which the @acronym{DTD} and/or processing tools are
-not generally available, and the machine-generated @acronym{HTML},
-PostScript or @acronym{PDF} produced by some word processors for
-output purposes only.
-
-The ``Title Page'' means, for a printed book, the title page itself,
-plus such following pages as are needed to hold, legibly, the material
-this License requires to appear in the title page. For works in
-formats which do not have any title page as such, ``Title Page'' means
-the text near the most prominent appearance of the work's title,
-preceding the beginning of the body of the text.
-
-A section ``Entitled XYZ'' means a named subunit of the Document whose
-title either is precisely XYZ or contains XYZ in parentheses following
-text that translates XYZ in another language. (Here XYZ stands for a
-specific section name mentioned below, such as ``Acknowledgements'',
-``Dedications'', ``Endorsements'', or ``History''.) To ``Preserve the Title''
-of such a section when you modify the Document means that it remains a
-section ``Entitled XYZ'' according to this definition.
-
-The Document may include Warranty Disclaimers next to the notice which
-states that this License applies to the Document. These Warranty
-Disclaimers are considered to be included by reference in this
-License, but only as regards disclaiming warranties: any other
-implication that these Warranty Disclaimers may have is void and has
-no effect on the meaning of this License.
-
-@item
-VERBATIM COPYING
-
-You may copy and distribute the Document in any medium, either
-commercially or noncommercially, provided that this License, the
-copyright notices, and the license notice saying this License applies
-to the Document are reproduced in all copies, and that you add no other
-conditions whatsoever to those of this License. You may not use
-technical measures to obstruct or control the reading or further
-copying of the copies you make or distribute. However, you may accept
-compensation in exchange for copies. If you distribute a large enough
-number of copies you must also follow the conditions in section 3.
-
-You may also lend copies, under the same conditions stated above, and
-you may publicly display copies.
-
-@item
-COPYING IN QUANTITY
-
-If you publish printed copies (or copies in media that commonly have
-printed covers) of the Document, numbering more than 100, and the
-Document's license notice requires Cover Texts, you must enclose the
-copies in covers that carry, clearly and legibly, all these Cover
-Texts: Front-Cover Texts on the front cover, and Back-Cover Texts on
-the back cover. Both covers must also clearly and legibly identify
-you as the publisher of these copies. The front cover must present
-the full title with all words of the title equally prominent and
-visible. You may add other material on the covers in addition.
-Copying with changes limited to the covers, as long as they preserve
-the title of the Document and satisfy these conditions, can be treated
-as verbatim copying in other respects.
-
-If the required texts for either cover are too voluminous to fit
-legibly, you should put the first ones listed (as many as fit
-reasonably) on the actual cover, and continue the rest onto adjacent
-pages.
-
-If you publish or distribute Opaque copies of the Document numbering
-more than 100, you must either include a machine-readable Transparent
-copy along with each Opaque copy, or state in or with each Opaque copy
-a computer-network location from which the general network-using
-public has access to download using public-standard network protocols
-a complete Transparent copy of the Document, free of added material.
-If you use the latter option, you must take reasonably prudent steps,
-when you begin distribution of Opaque copies in quantity, to ensure
-that this Transparent copy will remain thus accessible at the stated
-location until at least one year after the last time you distribute an
-Opaque copy (directly or through your agents or retailers) of that
-edition to the public.
-
-It is requested, but not required, that you contact the authors of the
-Document well before redistributing any large number of copies, to give
-them a chance to provide you with an updated version of the Document.
-
-@item
-MODIFICATIONS
-
-You may copy and distribute a Modified Version of the Document under
-the conditions of sections 2 and 3 above, provided that you release
-the Modified Version under precisely this License, with the Modified
-Version filling the role of the Document, thus licensing distribution
-and modification of the Modified Version to whoever possesses a copy
-of it. In addition, you must do these things in the Modified Version:
-
-@enumerate A
-@item
-Use in the Title Page (and on the covers, if any) a title distinct
-from that of the Document, and from those of previous versions
-(which should, if there were any, be listed in the History section
-of the Document). You may use the same title as a previous version
-if the original publisher of that version gives permission.
-
-@item
-List on the Title Page, as authors, one or more persons or entities
-responsible for authorship of the modifications in the Modified
-Version, together with at least five of the principal authors of the
-Document (all of its principal authors, if it has fewer than five),
-unless they release you from this requirement.
-
-@item
-State on the Title page the name of the publisher of the
-Modified Version, as the publisher.
-
-@item
-Preserve all the copyright notices of the Document.
-
-@item
-Add an appropriate copyright notice for your modifications
-adjacent to the other copyright notices.
-
-@item
-Include, immediately after the copyright notices, a license notice
-giving the public permission to use the Modified Version under the
-terms of this License, in the form shown in the Addendum below.
-
-@item
-Preserve in that license notice the full lists of Invariant Sections
-and required Cover Texts given in the Document's license notice.
-
-@item
-Include an unaltered copy of this License.
-
-@item
-Preserve the section Entitled ``History'', Preserve its Title, and add
-to it an item stating at least the title, year, new authors, and
-publisher of the Modified Version as given on the Title Page. If
-there is no section Entitled ``History'' in the Document, create one
-stating the title, year, authors, and publisher of the Document as
-given on its Title Page, then add an item describing the Modified
-Version as stated in the previous sentence.
-
-@item
-Preserve the network location, if any, given in the Document for
-public access to a Transparent copy of the Document, and likewise
-the network locations given in the Document for previous versions
-it was based on. These may be placed in the ``History'' section.
-You may omit a network location for a work that was published at
-least four years before the Document itself, or if the original
-publisher of the version it refers to gives permission.
-
-@item
-For any section Entitled ``Acknowledgements'' or ``Dedications'', Preserve
-the Title of the section, and preserve in the section all the
-substance and tone of each of the contributor acknowledgements and/or
-dedications given therein.
-
-@item
-Preserve all the Invariant Sections of the Document,
-unaltered in their text and in their titles. Section numbers
-or the equivalent are not considered part of the section titles.
-
-@item
-Delete any section Entitled ``Endorsements''. Such a section
-may not be included in the Modified Version.
-
-@item
-Do not retitle any existing section to be Entitled ``Endorsements'' or
-to conflict in title with any Invariant Section.
-
-@item
-Preserve any Warranty Disclaimers.
-@end enumerate
-
-If the Modified Version includes new front-matter sections or
-appendices that qualify as Secondary Sections and contain no material
-copied from the Document, you may at your option designate some or all
-of these sections as invariant. To do this, add their titles to the
-list of Invariant Sections in the Modified Version's license notice.
-These titles must be distinct from any other section titles.
-
-You may add a section Entitled ``Endorsements'', provided it contains
-nothing but endorsements of your Modified Version by various
-parties---for example, statements of peer review or that the text has
-been approved by an organization as the authoritative definition of a
-standard.
-
-You may add a passage of up to five words as a Front-Cover Text, and a
-passage of up to 25 words as a Back-Cover Text, to the end of the list
-of Cover Texts in the Modified Version. Only one passage of
-Front-Cover Text and one of Back-Cover Text may be added by (or
-through arrangements made by) any one entity. If the Document already
-includes a cover text for the same cover, previously added by you or
-by arrangement made by the same entity you are acting on behalf of,
-you may not add another; but you may replace the old one, on explicit
-permission from the previous publisher that added the old one.
-
-The author(s) and publisher(s) of the Document do not by this License
-give permission to use their names for publicity for or to assert or
-imply endorsement of any Modified Version.
-
-@item
-COMBINING DOCUMENTS
-
-You may combine the Document with other documents released under this
-License, under the terms defined in section 4 above for modified
-versions, provided that you include in the combination all of the
-Invariant Sections of all of the original documents, unmodified, and
-list them all as Invariant Sections of your combined work in its
-license notice, and that you preserve all their Warranty Disclaimers.
-
-The combined work need only contain one copy of this License, and
-multiple identical Invariant Sections may be replaced with a single
-copy. If there are multiple Invariant Sections with the same name but
-different contents, make the title of each such section unique by
-adding at the end of it, in parentheses, the name of the original
-author or publisher of that section if known, or else a unique number.
-Make the same adjustment to the section titles in the list of
-Invariant Sections in the license notice of the combined work.
-
-In the combination, you must combine any sections Entitled ``History''
-in the various original documents, forming one section Entitled
-``History''; likewise combine any sections Entitled ``Acknowledgements'',
-and any sections Entitled ``Dedications''. You must delete all
-sections Entitled ``Endorsements.''
-
-@item
-COLLECTIONS OF DOCUMENTS
-
-You may make a collection consisting of the Document and other documents
-released under this License, and replace the individual copies of this
-License in the various documents with a single copy that is included in
-the collection, provided that you follow the rules of this License for
-verbatim copying of each of the documents in all other respects.
-
-You may extract a single document from such a collection, and distribute
-it individually under this License, provided you insert a copy of this
-License into the extracted document, and follow this License in all
-other respects regarding verbatim copying of that document.
-
-@item
-AGGREGATION WITH INDEPENDENT WORKS
-
-A compilation of the Document or its derivatives with other separate
-and independent documents or works, in or on a volume of a storage or
-distribution medium, is called an ``aggregate'' if the copyright
-resulting from the compilation is not used to limit the legal rights
-of the compilation's users beyond what the individual works permit.
-When the Document is included in an aggregate, this License does not
-apply to the other works in the aggregate which are not themselves
-derivative works of the Document.
-
-If the Cover Text requirement of section 3 is applicable to these
-copies of the Document, then if the Document is less than one half of
-the entire aggregate, the Document's Cover Texts may be placed on
-covers that bracket the Document within the aggregate, or the
-electronic equivalent of covers if the Document is in electronic form.
-Otherwise they must appear on printed covers that bracket the whole
-aggregate.
-
-@item
-TRANSLATION
-
-Translation is considered a kind of modification, so you may
-distribute translations of the Document under the terms of section 4.
-Replacing Invariant Sections with translations requires special
-permission from their copyright holders, but you may include
-translations of some or all Invariant Sections in addition to the
-original versions of these Invariant Sections. You may include a
-translation of this License, and all the license notices in the
-Document, and any Warranty Disclaimers, provided that you also include
-the original English version of this License and the original versions
-of those notices and disclaimers. In case of a disagreement between
-the translation and the original version of this License or a notice
-or disclaimer, the original version will prevail.
-
-If a section in the Document is Entitled ``Acknowledgements'',
-``Dedications'', or ``History'', the requirement (section 4) to Preserve
-its Title (section 1) will typically require changing the actual
-title.
-
-@item
-TERMINATION
-
-You may not copy, modify, sublicense, or distribute the Document except
-as expressly provided for under this License. Any other attempt to
-copy, modify, sublicense or distribute the Document is void, and will
-automatically terminate your rights under this License. However,
-parties who have received copies, or rights, from you under this
-License will not have their licenses terminated so long as such
-parties remain in full compliance.
-
-@item
-FUTURE REVISIONS OF THIS LICENSE
-
-The Free Software Foundation may publish new, revised versions
-of the GNU Free Documentation License from time to time. Such new
-versions will be similar in spirit to the present version, but may
-differ in detail to address new problems or concerns. See
-@uref{http://www.gnu.org/copyleft/}.
-
-Each version of the License is given a distinguishing version number.
-If the Document specifies that a particular numbered version of this
-License ``or any later version'' applies to it, you have the option of
-following the terms and conditions either of that specified version or
-of any later version that has been published (not as a draft) by the
-Free Software Foundation. If the Document does not specify a version
-number of this License, you may choose any version ever published (not
-as a draft) by the Free Software Foundation.
-@end enumerate
-
-@page
-@c @appendixsubsec ADDENDUM: How to use this License for your
-@c documents
-@subsection ADDENDUM: How to use this License for your documents
-
-To use this License in a document you have written, include a copy of
-the License in the document and put the following copyright and
-license notices just after the title page:
-
-@smallexample
-@group
- Copyright (C) @var{year} @var{your name}.
- Permission is granted to copy, distribute and/or modify this document
- under the terms of the GNU Free Documentation License, Version 1.2
- or any later version published by the Free Software Foundation;
- with no Invariant Sections, no Front-Cover Texts, and no Back-Cover
- Texts. A copy of the license is included in the section entitled ``GNU
- Free Documentation License''.
-@end group
-@end smallexample
-
-If you have Invariant Sections, Front-Cover Texts and Back-Cover Texts,
-replace the ``with...Texts.'' line with this:
-
-@smallexample
-@group
- with the Invariant Sections being @var{list their titles}, with
- the Front-Cover Texts being @var{list}, and with the Back-Cover Texts
- being @var{list}.
-@end group
-@end smallexample
-
-If you have Invariant Sections without Cover Texts, or some other
-combination of the three, merge those two alternatives to suit the
-situation.
-
-If your document contains nontrivial examples of program code, we
-recommend releasing these examples in parallel under your choice of
-free software license, such as the GNU General Public License,
-to permit their use in free software.
-
-@c Local Variables:
-@c ispell-local-pdict: "ispell-dict"
-@c End:
-
diff --git a/doc/legacy/fuse.odg b/doc/legacy/fuse.odg
deleted file mode 100644
index 61bd103c78b..00000000000
--- a/doc/legacy/fuse.odg
+++ /dev/null
Binary files differ
diff --git a/doc/legacy/fuse.pdf b/doc/legacy/fuse.pdf
deleted file mode 100644
index a7d13faff56..00000000000
--- a/doc/legacy/fuse.pdf
+++ /dev/null
Binary files differ
diff --git a/doc/legacy/get_put_api_using_xattr.txt b/doc/legacy/get_put_api_using_xattr.txt
deleted file mode 100644
index 243f9f1aec2..00000000000
--- a/doc/legacy/get_put_api_using_xattr.txt
+++ /dev/null
@@ -1,22 +0,0 @@
-GlusterFS get/put API interface provided through extended attributes:
-
-API usage:
- int put(dirpath/filename, data): setfattr -n glusterfs.file.<filename> -v <data> <dirpath>
- void *get(dirpath/filename): getfattr -n glusterfs.file.<filename> <dirpath>
-
-
-internals:
-* unify handling setxattr/getxattr
- - setxattr
- unify's setxattr forwards setxattr call to all the child nodes with XATTR_REPLACE flag, except namespace. setxattr will succeeds only on the child node on which the file already exists. if the setxattr operation fails on all child nodes, it indicates that the file does not already exist on any of the child nodes. unify follows the same rules as it follows for create, but using setxattr call itself with XATTR_CREATE flag. unify sends a setxattr to namespace first, with zero length data. if namespace setxattr succeeds, unify schedules setxattr to one of the child nodes.
-
- - getxattr
- unify's getxattr forwards getxattr call to all the child nodes. wait for completion of operation on all the child nodes, and returns success if getxattr succeeded one child node.
-
-* posix handling setxattr/getxattr
- - setxattr
- posix setxattr does a open with O_CREAT|O_TRUNC on the <path>/<name>, writes value of the setxattr as data into the file and closes the file. when data is null, posix setxattr avoids doing write. file is closed after write.
-
- - getxattr
- posix getxattr does open with O_RDONLY on the <path>/<name>, reads the complete content of the file. file is closed after read.
-
diff --git a/doc/legacy/ha.odg b/doc/legacy/ha.odg
deleted file mode 100644
index e4b8b72d08b..00000000000
--- a/doc/legacy/ha.odg
+++ /dev/null
Binary files differ
diff --git a/doc/legacy/ha.pdf b/doc/legacy/ha.pdf
deleted file mode 100644
index e372c0ab03e..00000000000
--- a/doc/legacy/ha.pdf
+++ /dev/null
Binary files differ
diff --git a/doc/legacy/hacker-guide/Makefile.am b/doc/legacy/hacker-guide/Makefile.am
deleted file mode 100644
index 65c92ac235e..00000000000
--- a/doc/legacy/hacker-guide/Makefile.am
+++ /dev/null
@@ -1,8 +0,0 @@
-EXTRA_DIST = replicate.txt bdb.txt posix.txt call-stub.txt write-behind.txt
-
-#EXTRA_DIST = hacker-guide.tex afr.txt bdb.txt posix.txt call-stub.txt write-behind.txt
-#hacker_guidedir = $(docdir)
-#hacker_guide_DATA = hacker-guide.pdf
-
-#hacker-guide.pdf: $(EXTRA_DIST)
-# pdflatex $(srcdir)/hacker-guide.tex
diff --git a/doc/legacy/hacker-guide/call-stub.txt b/doc/legacy/hacker-guide/call-stub.txt
deleted file mode 100644
index cab8e4d552e..00000000000
--- a/doc/legacy/hacker-guide/call-stub.txt
+++ /dev/null
@@ -1,1033 +0,0 @@
-creating a call stub and pausing a call
----------------------------------------
-libglusterfs provides separate API to pause each of the fop. parameters to each API is
-@frame - call frame which has to be used to resume the call at call_resume().
-@fn - procedure to call during call_resume().
- NOTE: @fn should exactly take the same type and number of parameters that
- the corresponding regular fop takes.
-rest will be the regular parameters to corresponding fop.
-
-NOTE: @frame can never be NULL. fop_<operation>_stub() fails with errno
- set to EINVAL, if @frame is NULL. also wherever @loc is applicable,
- @loc cannot be NULL.
-
-refer to individual stub creation API to know about call-stub creation's behaviour with
-specific parameters.
-
-here is the list of stub creation APIs for xlator fops.
-
-@frame - call frame which has to be used to resume the call at call_resume().
-@fn - procedure to call during call_resume().
-@loc - pointer to location structure.
- NOTE: @loc will be copied to a different location, with inode_ref() to
- @loc->inode and @loc->parent, if not NULL. also @loc->path will be
- copied to a different location.
-@need_xattr - flag to specify if xattr should be returned or not.
-call_stub_t *
-fop_lookup_stub (call_frame_t *frame,
- fop_lookup_t fn,
- loc_t *loc,
- int32_t need_xattr);
-
-@frame - call frame which has to be used to resume the call at call_resume().
-@fn - procedure to call during call_resume().
-@loc - pointer to location structure.
- NOTE: @loc will be copied to a different location, with inode_ref() to
- @loc->inode and @loc->parent, if not NULL. also @loc->path will be
- copied to a different location.
-call_stub_t *
-fop_stat_stub (call_frame_t *frame,
- fop_stat_t fn,
- loc_t *loc);
-
-@frame - call frame which has to be used to resume the call at call_resume().
-@fn - procedure to call during call_resume().
-@fd - file descriptor parameter to lk fop.
- NOTE: @fd is stored with a fd_ref().
-call_stub_t *
-fop_fstat_stub (call_frame_t *frame,
- fop_fstat_t fn,
- fd_t *fd);
-
-@frame - call frame which has to be used to resume the call at call_resume().
-@fn - procedure to call during call_resume().
-@loc - pointer to location structure.
- NOTE: @loc will be copied to a different location, with inode_ref() to @loc->inode and
- @loc->parent, if not NULL. also @loc->path will be copied to a different location.
-@mode - mode parameter to chmod.
-call_stub_t *
-fop_chmod_stub (call_frame_t *frame,
- fop_chmod_t fn,
- loc_t *loc,
- mode_t mode);
-
-@frame - call frame which has to be used to resume the call at call_resume().
-@fn - procedure to call during call_resume().
-@fd - file descriptor parameter to lk fop.
- NOTE: @fd is stored with a fd_ref().
-@mode - mode parameter for fchmod fop.
-call_stub_t *
-fop_fchmod_stub (call_frame_t *frame,
- fop_fchmod_t fn,
- fd_t *fd,
- mode_t mode);
-
-@frame - call frame which has to be used to resume the call at call_resume().
-@fn - procedure to call during call_resume().
-@loc - pointer to location structure.
- NOTE: @loc will be copied to a different location, with inode_ref() to @loc->inode and
- @loc->parent, if not NULL. also @loc->path will be copied to a different location.
-@uid - uid parameter to chown.
-@gid - gid parameter to chown.
-call_stub_t *
-fop_chown_stub (call_frame_t *frame,
- fop_chown_t fn,
- loc_t *loc,
- uid_t uid,
- gid_t gid);
-
-@frame - call frame which has to be used to resume the call at call_resume().
-@fn - procedure to call during call_resume().
-@fd - file descriptor parameter to lk fop.
- NOTE: @fd is stored with a fd_ref().
-@uid - uid parameter to fchown.
-@gid - gid parameter to fchown.
-call_stub_t *
-fop_fchown_stub (call_frame_t *frame,
- fop_fchown_t fn,
- fd_t *fd,
- uid_t uid,
- gid_t gid);
-
-@frame - call frame which has to be used to resume the call at call_resume().
-@fn - procedure to call during call_resume().
-@loc - pointer to location structure.
- NOTE: @loc will be copied to a different location, with inode_ref() to
- @loc->inode and @loc->parent, if not NULL. also @loc->path will be
- copied to a different location, if not NULL.
-@off - offset parameter to truncate fop.
-call_stub_t *
-fop_truncate_stub (call_frame_t *frame,
- fop_truncate_t fn,
- loc_t *loc,
- off_t off);
-
-@frame - call frame which has to be used to resume the call at call_resume().
-@fn - procedure to call during call_resume().
-@fd - file descriptor parameter to lk fop.
- NOTE: @fd is stored with a fd_ref().
-@off - offset parameter to ftruncate fop.
-call_stub_t *
-fop_ftruncate_stub (call_frame_t *frame,
- fop_ftruncate_t fn,
- fd_t *fd,
- off_t off);
-
-@frame - call frame which has to be used to resume the call at call_resume().
-@fn - procedure to call during call_resume().
-@loc - pointer to location structure.
- NOTE: @loc will be copied to a different location, with inode_ref() to
- @loc->inode and @loc->parent, if not NULL. also @loc->path will be
- copied to a different location.
-@tv - tv parameter to utimens fop.
-call_stub_t *
-fop_utimens_stub (call_frame_t *frame,
- fop_utimens_t fn,
- loc_t *loc,
- struct timespec tv[2]);
-
-@frame - call frame which has to be used to resume the call at call_resume().
-@fn - procedure to call during call_resume().
-@loc - pointer to location structure.
- NOTE: @loc will be copied to a different location, with inode_ref() to
- @loc->inode and @loc->parent, if not NULL. also @loc->path will be
- copied to a different location.
-@mask - mask parameter for access fop.
-call_stub_t *
-fop_access_stub (call_frame_t *frame,
- fop_access_t fn,
- loc_t *loc,
- int32_t mask);
-
-@frame - call frame which has to be used to resume the call at call_resume().
-@fn - procedure to call during call_resume().
-@loc - pointer to location structure.
- NOTE: @loc will be copied to a different location, with inode_ref() to
- @loc->inode and @loc->parent, if not NULL. also @loc->path will be
- copied to a different location.
-@size - size parameter to readlink fop.
-call_stub_t *
-fop_readlink_stub (call_frame_t *frame,
- fop_readlink_t fn,
- loc_t *loc,
- size_t size);
-
-@frame - call frame which has to be used to resume the call at call_resume().
-@fn - procedure to call during call_resume().
-@loc - pointer to location structure.
- NOTE: @loc will be copied to a different location, with inode_ref() to
- @loc->inode and @loc->parent, if not NULL. also @loc->path will be
- copied to a different location.
-@mode - mode parameter to mknod fop.
-@rdev - rdev parameter to mknod fop.
-call_stub_t *
-fop_mknod_stub (call_frame_t *frame,
- fop_mknod_t fn,
- loc_t *loc,
- mode_t mode,
- dev_t rdev);
-
-@frame - call frame which has to be used to resume the call at call_resume().
-@fn - procedure to call during call_resume().
-@loc - pointer to location structure.
- NOTE: @loc will be copied to a different location, with inode_ref() to
- @loc->inode and @loc->parent, if not NULL. also @loc->path will be
- copied to a different location.
-@mode - mode parameter to mkdir fop.
-call_stub_t *
-fop_mkdir_stub (call_frame_t *frame,
- fop_mkdir_t fn,
- loc_t *loc,
- mode_t mode);
-
-@frame - call frame which has to be used to resume the call at call_resume().
-@fn - procedure to call during call_resume().
-@loc - pointer to location structure.
- NOTE: @loc will be copied to a different location, with inode_ref() to
- @loc->inode and @loc->parent, if not NULL. also @loc->path will be
- copied to a different location.
-call_stub_t *
-fop_unlink_stub (call_frame_t *frame,
- fop_unlink_t fn,
- loc_t *loc);
-
-@frame - call frame which has to be used to resume the call at call_resume().
-@fn - procedure to call during call_resume().
-@loc - pointer to location structure.
- NOTE: @loc will be copied to a different location, with inode_ref() to
- @loc->inode and @loc->parent, if not NULL. also @loc->path will be
- copied to a different location.
-call_stub_t *
-fop_rmdir_stub (call_frame_t *frame,
- fop_rmdir_t fn,
- loc_t *loc);
-
-@frame - call frame which has to be used to resume the call at call_resume().
-@fn - procedure to call during call_resume().
-@linkname - linkname parameter to symlink fop.
-@loc - pointer to location structure.
- NOTE: @loc will be copied to a different location, with inode_ref() to
- @loc->inode and @loc->parent, if not NULL. also @loc->path will be
- copied to a different location.
-call_stub_t *
-fop_symlink_stub (call_frame_t *frame,
- fop_symlink_t fn,
- const char *linkname,
- loc_t *loc);
-
-@frame - call frame which has to be used to resume the call at call_resume().
-@fn - procedure to call during call_resume().
-@oldloc - pointer to location structure.
- NOTE: @oldloc will be copied to a different location, with inode_ref() to
- @oldloc->inode and @oldloc->parent, if not NULL. also @oldloc->path will
- be copied to a different location, if not NULL.
-@newloc - pointer to location structure.
- NOTE: @newloc will be copied to a different location, with inode_ref() to
- @newloc->inode and @newloc->parent, if not NULL. also @newloc->path will
- be copied to a different location, if not NULL.
-call_stub_t *
-fop_rename_stub (call_frame_t *frame,
- fop_rename_t fn,
- loc_t *oldloc,
- loc_t *newloc);
-
-@frame - call frame which has to be used to resume the call at call_resume().
-@fn - procedure to call during call_resume().
-@loc - pointer to location structure.
- NOTE: @loc will be copied to a different location, with inode_ref() to
- @loc->inode and @loc->parent, if not NULL. also @loc->path will be
- copied to a different location.
-@newpath - newpath parameter to link fop.
-call_stub_t *
-fop_link_stub (call_frame_t *frame,
- fop_link_t fn,
- loc_t *oldloc,
- const char *newpath);
-
-@frame - call frame which has to be used to resume the call at call_resume().
-@fn - procedure to call during call_resume().
-@loc - pointer to location structure.
- NOTE: @loc will be copied to a different location, with inode_ref() to
- @loc->inode and @loc->parent, if not NULL. also @loc->path will be
- copied to a different location.
-@flags - flags parameter to create fop.
-@mode - mode parameter to create fop.
-@fd - file descriptor parameter to create fop.
- NOTE: @fd is stored with a fd_ref().
-call_stub_t *
-fop_create_stub (call_frame_t *frame,
- fop_create_t fn,
- loc_t *loc,
- int32_t flags,
- mode_t mode, fd_t *fd);
-
-@frame - call frame which has to be used to resume the call at call_resume().
-@fn - procedure to call during call_resume().
-@flags - flags parameter to open fop.
-@loc - pointer to location structure.
- NOTE: @loc will be copied to a different location, with inode_ref() to
- @loc->inode and @loc->parent, if not NULL. also @loc->path will be
- copied to a different location.
-call_stub_t *
-fop_open_stub (call_frame_t *frame,
- fop_open_t fn,
- loc_t *loc,
- int32_t flags,
- fd_t *fd);
-
-@frame - call frame which has to be used to resume the call at call_resume().
-@fn - procedure to call during call_resume().
-@fd - file descriptor parameter to lk fop.
- NOTE: @fd is stored with a fd_ref().
-@size - size parameter to readv fop.
-@off - offset parameter to readv fop.
-call_stub_t *
-fop_readv_stub (call_frame_t *frame,
- fop_readv_t fn,
- fd_t *fd,
- size_t size,
- off_t off);
-
-@frame - call frame which has to be used to resume the call at call_resume().
-@fn - procedure to call during call_resume().
-@fd - file descriptor parameter to lk fop.
- NOTE: @fd is stored with a fd_ref().
-@vector - vector parameter to writev fop.
- NOTE: @vector is iov_dup()ed while creating stub. and frame->root->req_refs
- dictionary is dict_ref()ed.
-@count - count parameter to writev fop.
-@off - off parameter to writev fop.
-call_stub_t *
-fop_writev_stub (call_frame_t *frame,
- fop_writev_t fn,
- fd_t *fd,
- struct iovec *vector,
- int32_t count,
- off_t off);
-
-@frame - call frame which has to be used to resume the call at call_resume().
-@fn - procedure to call during call_resume().
-@fd - file descriptor parameter to flush fop.
- NOTE: @fd is stored with a fd_ref().
-call_stub_t *
-fop_flush_stub (call_frame_t *frame,
- fop_flush_t fn,
- fd_t *fd);
-
-
-@frame - call frame which has to be used to resume the call at call_resume().
-@fn - procedure to call during call_resume().
-@fd - file descriptor parameter to lk fop.
- NOTE: @fd is stored with a fd_ref().
-@datasync - datasync parameter to fsync fop.
-call_stub_t *
-fop_fsync_stub (call_frame_t *frame,
- fop_fsync_t fn,
- fd_t *fd,
- int32_t datasync);
-
-@frame - call frame which has to be used to resume the call at call_resume().
-@fn - procedure to call during call_resume().
-@loc - pointer to location structure.
- NOTE: @loc will be copied to a different location, with inode_ref() to @loc->inode and
- @loc->parent, if not NULL. also @loc->path will be copied to a different location.
-@fd - file descriptor parameter to opendir fop.
- NOTE: @fd is stored with a fd_ref().
-call_stub_t *
-fop_opendir_stub (call_frame_t *frame,
- fop_opendir_t fn,
- loc_t *loc,
- fd_t *fd);
-
-@frame - call frame which has to be used to resume the call at call_resume().
-@fn - procedure to call during call_resume().
-@fd - file descriptor parameter to getdents fop.
- NOTE: @fd is stored with a fd_ref().
-@size - size parameter to getdents fop.
-@off - off parameter to getdents fop.
-@flags - flags parameter to getdents fop.
-call_stub_t *
-fop_getdents_stub (call_frame_t *frame,
- fop_getdents_t fn,
- fd_t *fd,
- size_t size,
- off_t off,
- int32_t flag);
-
-@frame - call frame which has to be used to resume the call at call_resume().
-@fn - procedure to call during call_resume().
-@fd - file descriptor parameter to setdents fop.
- NOTE: @fd is stored with a fd_ref().
-@flags - flags parameter to setdents fop.
-@entries - entries parameter to setdents fop.
-call_stub_t *
-fop_setdents_stub (call_frame_t *frame,
- fop_setdents_t fn,
- fd_t *fd,
- int32_t flags,
- dir_entry_t *entries,
- int32_t count);
-
-@frame - call frame which has to be used to resume the call at call_resume().
-@fn - procedure to call during call_resume().
-@fd - file descriptor parameter to setdents fop.
- NOTE: @fd is stored with a fd_ref().
-@datasync - datasync parameter to fsyncdir fop.
-call_stub_t *
-fop_fsyncdir_stub (call_frame_t *frame,
- fop_fsyncdir_t fn,
- fd_t *fd,
- int32_t datasync);
-
-@frame - call frame which has to be used to resume the call at call_resume().
-@fn - procedure to call during call_resume().
-@loc - pointer to location structure.
- NOTE: @loc will be copied to a different location, with inode_ref() to
- @loc->inode and @loc->parent, if not NULL. also @loc->path will be
- copied to a different location.
-call_stub_t *
-fop_statfs_stub (call_frame_t *frame,
- fop_statfs_t fn,
- loc_t *loc);
-
-@frame - call frame which has to be used to resume the call at call_resume().
-@fn - procedure to call during call_resume().
-@loc - pointer to location structure.
- NOTE: @loc will be copied to a different location, with inode_ref() to
- @loc->inode and @loc->parent, if not NULL. also @loc->path will be
- copied to a different location.
-@dict - dict parameter to setxattr fop.
- NOTE: stub creation procedure stores @dict pointer with dict_ref() to it.
-call_stub_t *
-fop_setxattr_stub (call_frame_t *frame,
- fop_setxattr_t fn,
- loc_t *loc,
- dict_t *dict,
- int32_t flags);
-
-@frame - call frame which has to be used to resume the call at call_resume().
-@fn - procedure to call during call_resume().
-@loc - pointer to location structure.
- NOTE: @loc will be copied to a different location, with inode_ref() to
- @loc->inode and @loc->parent, if not NULL. also @loc->path will be
- copied to a different location.
-@name - name parameter to getxattr fop.
-call_stub_t *
-fop_getxattr_stub (call_frame_t *frame,
- fop_getxattr_t fn,
- loc_t *loc,
- const char *name);
-
-@frame - call frame which has to be used to resume the call at call_resume().
-@fn - procedure to call during call_resume().
-@loc - pointer to location structure.
- NOTE: @loc will be copied to a different location, with inode_ref() to
- @loc->inode and @loc->parent, if not NULL. also @loc->path will be
- copied to a different location.
-@name - name parameter to removexattr fop.
- NOTE: name string will be copied to a different location while creating stub.
-call_stub_t *
-fop_removexattr_stub (call_frame_t *frame,
- fop_removexattr_t fn,
- loc_t *loc,
- const char *name);
-
-@frame - call frame which has to be used to resume the call at call_resume().
-@fn - procedure to call during call_resume().
-@fd - file descriptor parameter to lk fop.
- NOTE: @fd is stored with a fd_ref().
-@cmd - command parameter to lk fop.
-@lock - lock parameter to lk fop.
- NOTE: lock will be copied to a different location while creating stub.
-call_stub_t *
-fop_lk_stub (call_frame_t *frame,
- fop_lk_t fn,
- fd_t *fd,
- int32_t cmd,
- struct flock *lock);
-
-@frame - call frame which has to be used to resume the call at call_resume().
-@fn - procedure to call during call_resume().
-@fd - fd parameter to gf_lk fop.
- NOTE: @fd is fd_ref()ed while creating stub, if not NULL.
-@cmd - cmd parameter to gf_lk fop.
-@lock - lock paramater to gf_lk fop.
- NOTE: @lock is copied to a different memory location while creating
- stub.
-call_stub_t *
-fop_gf_lk_stub (call_frame_t *frame,
- fop_gf_lk_t fn,
- fd_t *fd,
- int32_t cmd,
- struct flock *lock);
-
-@frame - call frame which has to be used to resume the call at call_resume().
-@fn - procedure to call during call_resume().
-@fd - file descriptor parameter to readdir fop.
- NOTE: @fd is stored with a fd_ref().
-@size - size parameter to readdir fop.
-@off - offset parameter to readdir fop.
-call_stub_t *
-fop_readdir_stub (call_frame_t *frame,
- fop_readdir_t fn,
- fd_t *fd,
- size_t size,
- off_t off);
-
-@frame - call frame which has to be used to resume the call at call_resume().
-@fn - procedure to call during call_resume().
-@loc - pointer to location structure.
- NOTE: @loc will be copied to a different location, with inode_ref() to
- @loc->inode and @loc->parent, if not NULL. also @loc->path will be
- copied to a different location.
-@flags - flags parameter to checksum fop.
-call_stub_t *
-fop_checksum_stub (call_frame_t *frame,
- fop_checksum_t fn,
- loc_t *loc,
- int32_t flags);
-
-@frame - call frame which has to be used to resume the call at call_resume().
-@fn - procedure to call during call_resume().
-@op_ret - op_ret parameter to @fn.
-@op_errno - op_errno parameter to @fn.
-@inode - inode parameter to @fn.
- NOTE: @inode pointer is stored with a inode_ref().
-@buf - buf parameter to @fn.
- NOTE: @buf is copied to a different memory location, if not NULL.
-@dict - dict parameter to @fn.
- NOTE: @dict pointer is stored with dict_ref().
-call_stub_t *
-fop_lookup_cbk_stub (call_frame_t *frame,
- fop_lookup_cbk_t fn,
- int32_t op_ret,
- int32_t op_errno,
- inode_t *inode,
- struct stat *buf,
- dict_t *dict);
-@frame - call frame which has to be used to resume the call at call_resume().
-@fn - procedure to call during call_resume().
-@op_ret - op_ret parameter to @fn.
-@op_errno - op_errno parameter to @fn.
-@buf - buf parameter to @fn.
- NOTE: @buf is copied to a different memory location, if not NULL.
-call_stub_t *
-fop_stat_cbk_stub (call_frame_t *frame,
- fop_stat_cbk_t fn,
- int32_t op_ret,
- int32_t op_errno,
- struct stat *buf);
-
-@frame - call frame which has to be used to resume the call at call_resume().
-@fn - procedure to call during call_resume().
-@op_ret - op_ret parameter to @fn.
-@op_errno - op_errno parameter to @fn.
-@buf - buf parameter to @fn.
- NOTE: @buf is copied to a different memory location, if not NULL.
-call_stub_t *
-fop_fstat_cbk_stub (call_frame_t *frame,
- fop_fstat_cbk_t fn,
- int32_t op_ret,
- int32_t op_errno,
- struct stat *buf);
-
-@frame - call frame which has to be used to resume the call at call_resume().
-@fn - procedure to call during call_resume().
-@op_ret - op_ret parameter to @fn.
-@op_errno - op_errno parameter to @fn.
-@buf - buf parameter to @fn.
- NOTE: @buf is copied to a different memory location, if not NULL.
-call_stub_t *
-fop_chmod_cbk_stub (call_frame_t *frame,
- fop_chmod_cbk_t fn,
- int32_t op_ret,
- int32_t op_errno,
- struct stat *buf);
-
-@frame - call frame which has to be used to resume the call at call_resume().
-@fn - procedure to call during call_resume().
-@op_ret - op_ret parameter to @fn.
-@op_errno - op_errno parameter to @fn.
-@buf - buf parameter to @fn.
- NOTE: @buf is copied to a different memory location, if not NULL.
-call_stub_t *
-fop_fchmod_cbk_stub (call_frame_t *frame,
- fop_fchmod_cbk_t fn,
- int32_t op_ret,
- int32_t op_errno,
- struct stat *buf);
-
-@frame - call frame which has to be used to resume the call at call_resume().
-@fn - procedure to call during call_resume().
-@op_ret - op_ret parameter to @fn.
-@op_errno - op_errno parameter to @fn.
-@buf - buf parameter to @fn.
- NOTE: @buf is copied to a different memory location, if not NULL.
-call_stub_t *
-fop_chown_cbk_stub (call_frame_t *frame,
- fop_chown_cbk_t fn,
- int32_t op_ret,
- int32_t op_errno,
- struct stat *buf);
-
-
-@frame - call frame which has to be used to resume the call at call_resume().
-@fn - procedure to call during call_resume().
-@op_ret - op_ret parameter to @fn.
-@op_errno - op_errno parameter to @fn.
-@buf - buf parameter to @fn.
- NOTE: @buf is copied to a different memory location, if not NULL.
-call_stub_t *
-fop_fchown_cbk_stub (call_frame_t *frame,
- fop_fchown_cbk_t fn,
- int32_t op_ret,
- int32_t op_errno,
- struct stat *buf);
-
-
-@frame - call frame which has to be used to resume the call at call_resume().
-@fn - procedure to call during call_resume().
-@op_ret - op_ret parameter to @fn.
-@op_errno - op_errno parameter to @fn.
-@buf - buf parameter to @fn.
- NOTE: @buf is copied to a different memory location, if not NULL.
-call_stub_t *
-fop_truncate_cbk_stub (call_frame_t *frame,
- fop_truncate_cbk_t fn,
- int32_t op_ret,
- int32_t op_errno,
- struct stat *buf);
-
-
-@frame - call frame which has to be used to resume the call at call_resume().
-@fn - procedure to call during call_resume().
-@op_ret - op_ret parameter to @fn.
-@op_errno - op_errno parameter to @fn.
-@buf - buf parameter to @fn.
- NOTE: @buf is copied to a different memory location, if not NULL.
-call_stub_t *
-fop_ftruncate_cbk_stub (call_frame_t *frame,
- fop_ftruncate_cbk_t fn,
- int32_t op_ret,
- int32_t op_errno,
- struct stat *buf);
-
-
-@frame - call frame which has to be used to resume the call at call_resume().
-@fn - procedure to call during call_resume().
-@op_ret - op_ret parameter to @fn.
-@op_errno - op_errno parameter to @fn.
-@buf - buf parameter to @fn.
- NOTE: @buf is copied to a different memory location, if not NULL.
-call_stub_t *
-fop_utimens_cbk_stub (call_frame_t *frame,
- fop_utimens_cbk_t fn,
- int32_t op_ret,
- int32_t op_errno,
- struct stat *buf);
-
-
-@frame - call frame which has to be used to resume the call at call_resume().
-@fn - procedure to call during call_resume().
-@op_ret - op_ret parameter to @fn.
-@op_errno - op_errno parameter to @fn.
-call_stub_t *
-fop_access_cbk_stub (call_frame_t *frame,
- fop_access_cbk_t fn,
- int32_t op_ret,
- int32_t op_errno);
-
-
-@frame - call frame which has to be used to resume the call at call_resume().
-@fn - procedure to call during call_resume().
-@op_ret - op_ret parameter to @fn.
-@op_errno - op_errno parameter to @fn.
-@path - path parameter to @fn.
- NOTE: @path is copied to a different memory location, if not NULL.
-call_stub_t *
-fop_readlink_cbk_stub (call_frame_t *frame,
- fop_readlink_cbk_t fn,
- int32_t op_ret,
- int32_t op_errno,
- const char *path);
-
-
-@frame - call frame which has to be used to resume the call at call_resume().
-@fn - procedure to call during call_resume().
-@op_ret - op_ret parameter to @fn.
-@op_errno - op_errno parameter to @fn.
-@inode - inode parameter to @fn.
- NOTE: @inode pointer is stored with a inode_ref().
-@buf - buf parameter to @fn.
- NOTE: @buf is copied to a different memory location, if not NULL.
-call_stub_t *
-fop_mknod_cbk_stub (call_frame_t *frame,
- fop_mknod_cbk_t fn,
- int32_t op_ret,
- int32_t op_errno,
- inode_t *inode,
- struct stat *buf);
-
-
-@frame - call frame which has to be used to resume the call at call_resume().
-@fn - procedure to call during call_resume().
-@op_ret - op_ret parameter to @fn.
-@op_errno - op_errno parameter to @fn.
-@inode - inode parameter to @fn.
- NOTE: @inode pointer is stored with a inode_ref().
-@buf - buf parameter to @fn.
- NOTE: @buf is copied to a different memory location, if not NULL.
-call_stub_t *
-fop_mkdir_cbk_stub (call_frame_t *frame,
- fop_mkdir_cbk_t fn,
- int32_t op_ret,
- int32_t op_errno,
- inode_t *inode,
- struct stat *buf);
-
-
-@frame - call frame which has to be used to resume the call at call_resume().
-@fn - procedure to call during call_resume().
-@op_ret - op_ret parameter to @fn.
-@op_errno - op_errno parameter to @fn.
-call_stub_t *
-fop_unlink_cbk_stub (call_frame_t *frame,
- fop_unlink_cbk_t fn,
- int32_t op_ret,
- int32_t op_errno);
-
-
-@frame - call frame which has to be used to resume the call at call_resume().
-@fn - procedure to call during call_resume().
-@op_ret - op_ret parameter to @fn.
-@op_errno - op_errno parameter to @fn.
-call_stub_t *
-fop_rmdir_cbk_stub (call_frame_t *frame,
- fop_rmdir_cbk_t fn,
- int32_t op_ret,
- int32_t op_errno);
-
-
-@frame - call frame which has to be used to resume the call at call_resume().
-@fn - procedure to call during call_resume().
-@op_ret - op_ret parameter to @fn.
-@op_errno - op_errno parameter to @fn.
-@inode - inode parameter to @fn.
- NOTE: @inode pointer is stored with a inode_ref().
-@buf - buf parameter to @fn.
- NOTE: @buf is copied to a different memory location, if not NULL.
-call_stub_t *
-fop_symlink_cbk_stub (call_frame_t *frame,
- fop_symlink_cbk_t fn,
- int32_t op_ret,
- int32_t op_errno,
- inode_t *inode,
- struct stat *buf);
-
-
-@frame - call frame which has to be used to resume the call at call_resume().
-@fn - procedure to call during call_resume().
-@op_ret - op_ret parameter to @fn.
-@op_errno - op_errno parameter to @fn.
-@buf - buf parameter to @fn.
- NOTE: @buf is copied to a different memory location, if not NULL.
-call_stub_t *
-fop_rename_cbk_stub (call_frame_t *frame,
- fop_rename_cbk_t fn,
- int32_t op_ret,
- int32_t op_errno,
- struct stat *buf);
-
-
-@frame - call frame which has to be used to resume the call at call_resume().
-@fn - procedure to call during call_resume().
-@op_ret - op_ret parameter to @fn.
-@op_errno - op_errno parameter to @fn.
-@inode - inode parameter to @fn.
- NOTE: @inode pointer is stored with a inode_ref().
-@buf - buf parameter to @fn.
- NOTE: @buf is copied to a different memory location, if not NULL.
-call_stub_t *
-fop_link_cbk_stub (call_frame_t *frame,
- fop_link_cbk_t fn,
- int32_t op_ret,
- int32_t op_errno,
- inode_t *inode,
- struct stat *buf);
-
-@frame - call frame which has to be used to resume the call at call_resume().
-@fn - procedure to call during call_resume().
-@op_ret - op_ret parameter to @fn.
-@op_errno - op_errno parameter to @fn.
-@fd - fd parameter to @fn.
- NOTE: @fd pointer is stored with a fd_ref().
-@inode - inode parameter to @fn.
- NOTE: @inode pointer is stored with a inode_ref().
-@buf - buf parameter to @fn.
- NOTE: @buf is copied to a different memory location, if not NULL.
-call_stub_t *
-fop_create_cbk_stub (call_frame_t *frame,
- fop_create_cbk_t fn,
- int32_t op_ret,
- int32_t op_errno,
- fd_t *fd,
- inode_t *inode,
- struct stat *buf);
-
-
-@frame - call frame which has to be used to resume the call at call_resume().
-@fn - procedure to call during call_resume().
-@op_ret - op_ret parameter to @fn.
-@op_errno - op_errno parameter to @fn.
-@fd - fd parameter to @fn.
- NOTE: @fd pointer is stored with a fd_ref().
-call_stub_t *
-fop_open_cbk_stub (call_frame_t *frame,
- fop_open_cbk_t fn,
- int32_t op_ret,
- int32_t op_errno,
- fd_t *fd);
-
-
-@frame - call frame which has to be used to resume the call at call_resume().
-@fn - procedure to call during call_resume().
-@op_ret - op_ret parameter to @fn.
-@op_errno - op_errno parameter to @fn.
-@vector - vector parameter to @fn.
- NOTE: @vector is copied to a different memory location, if not NULL. also
- frame->root->rsp_refs is dict_ref()ed.
-@stbuf - stbuf parameter to @fn.
- NOTE: @stbuf is copied to a different memory location, if not NULL.
-call_stub_t *
-fop_readv_cbk_stub (call_frame_t *frame,
- fop_readv_cbk_t fn,
- int32_t op_ret,
- int32_t op_errno,
- struct iovec *vector,
- int32_t count,
- struct stat *stbuf);
-
-
-@frame - call frame which has to be used to resume the call at call_resume().
-@fn - procedure to call during call_resume().
-@op_ret - op_ret parameter to @fn.
-@op_errno - op_errno parameter to @fn.
-@stbuf - stbuf parameter to @fn.
- NOTE: @stbuf is copied to a different memory location, if not NULL.
-call_stub_t *
-fop_writev_cbk_stub (call_frame_t *frame,
- fop_writev_cbk_t fn,
- int32_t op_ret,
- int32_t op_errno,
- struct stat *stbuf);
-
-
-@frame - call frame which has to be used to resume the call at call_resume().
-@fn - procedure to call during call_resume().
-@op_ret - op_ret parameter to @fn.
-@op_errno - op_errno parameter to @fn.
-call_stub_t *
-fop_flush_cbk_stub (call_frame_t *frame,
- fop_flush_cbk_t fn,
- int32_t op_ret,
- int32_t op_errno);
-
-@frame - call frame which has to be used to resume the call at call_resume().
-@fn - procedure to call during call_resume().
-@op_ret - op_ret parameter to @fn.
-@op_errno - op_errno parameter to @fn.
-call_stub_t *
-fop_fsync_cbk_stub (call_frame_t *frame,
- fop_fsync_cbk_t fn,
- int32_t op_ret,
- int32_t op_errno);
-
-
-@frame - call frame which has to be used to resume the call at call_resume().
-@fn - procedure to call during call_resume().
-@op_ret - op_ret parameter to @fn.
-@op_errno - op_errno parameter to @fn.
-@fd - fd parameter to @fn.
- NOTE: @fd pointer is stored with a fd_ref().
-call_stub_t *
-fop_opendir_cbk_stub (call_frame_t *frame,
- fop_opendir_cbk_t fn,
- int32_t op_ret,
- int32_t op_errno,
- fd_t *fd);
-
-
-@frame - call frame which has to be used to resume the call at call_resume().
-@fn - procedure to call during call_resume().
-@op_ret - op_ret parameter to @fn.
-@op_errno - op_errno parameter to @fn.
-@entries - entries parameter to @fn.
-@count - count parameter to @fn.
-call_stub_t *
-fop_getdents_cbk_stub (call_frame_t *frame,
- fop_getdents_cbk_t fn,
- int32_t op_ret,
- int32_t op_errno,
- dir_entry_t *entries,
- int32_t count);
-
-
-@frame - call frame which has to be used to resume the call at call_resume().
-@fn - procedure to call during call_resume().
-@op_ret - op_ret parameter to @fn.
-@op_errno - op_errno parameter to @fn.
-call_stub_t *
-fop_setdents_cbk_stub (call_frame_t *frame,
- fop_setdents_cbk_t fn,
- int32_t op_ret,
- int32_t op_errno);
-
-@frame - call frame which has to be used to resume the call at call_resume().
-@fn - procedure to call during call_resume().
-@op_ret - op_ret parameter to @fn.
-@op_errno - op_errno parameter to @fn.
-call_stub_t *
-fop_fsyncdir_cbk_stub (call_frame_t *frame,
- fop_fsyncdir_cbk_t fn,
- int32_t op_ret,
- int32_t op_errno);
-
-
-@frame - call frame which has to be used to resume the call at call_resume().
-@fn - procedure to call during call_resume().
-@op_ret - op_ret parameter to @fn.
-@op_errno - op_errno parameter to @fn.
-@buf - buf parameter to @fn.
- NOTE: @buf is copied to a different memory location, if not NULL.
-call_stub_t *
-fop_statfs_cbk_stub (call_frame_t *frame,
- fop_statfs_cbk_t fn,
- int32_t op_ret,
- int32_t op_errno,
- struct statvfs *buf);
-
-
-@frame - call frame which has to be used to resume the call at call_resume().
-@fn - procedure to call during call_resume().
-@op_ret - op_ret parameter to @fn.
-@op_errno - op_errno parameter to @fn.
-call_stub_t *
-fop_setxattr_cbk_stub (call_frame_t *frame,
- fop_setxattr_cbk_t fn,
- int32_t op_ret,
- int32_t op_errno);
-
-
-@frame - call frame which has to be used to resume the call at call_resume().
-@fn - procedure to call during call_resume().
-@op_ret - op_ret parameter to @fn.
-@op_errno - op_errno parameter to @fn.
-@value - value dictionary parameter to @fn.
- NOTE: @value pointer is stored with a dict_ref().
-call_stub_t *
-fop_getxattr_cbk_stub (call_frame_t *frame,
- fop_getxattr_cbk_t fn,
- int32_t op_ret,
- int32_t op_errno,
- dict_t *value);
-
-
-@frame - call frame which has to be used to resume the call at call_resume().
-@fn - procedure to call during call_resume().
-@op_ret - op_ret parameter to @fn.
-@op_errno - op_errno parameter to @fn.
-call_stub_t *
-fop_removexattr_cbk_stub (call_frame_t *frame,
- fop_removexattr_cbk_t fn,
- int32_t op_ret,
- int32_t op_errno);
-
-
-@frame - call frame which has to be used to resume the call at call_resume().
-@fn - procedure to call during call_resume().
-@op_ret - op_ret parameter to @fn.
-@op_errno - op_errno parameter to @fn.
-@lock - lock parameter to @fn.
- NOTE: @lock is copied to a different memory location while creating
- stub.
-call_stub_t *
-fop_lk_cbk_stub (call_frame_t *frame,
- fop_lk_cbk_t fn,
- int32_t op_ret,
- int32_t op_errno,
- struct flock *lock);
-
-@frame - call frame which has to be used to resume the call at call_resume().
-@fn - procedure to call during call_resume().
-@op_ret - op_ret parameter to @fn.
-@op_errno - op_errno parameter to @fn.
-@lock - lock parameter to @fn.
- NOTE: @lock is copied to a different memory location while creating
- stub.
-call_stub_t *
-fop_gf_lk_cbk_stub (call_frame_t *frame,
- fop_gf_lk_cbk_t fn,
- int32_t op_ret,
- int32_t op_errno,
- struct flock *lock);
-
-
-@frame - call frame which has to be used to resume the call at call_resume().
-@fn - procedure to call during call_resume().
-@op_ret - op_ret parameter to @fn.
-@op_errno - op_errno parameter to @fn.
-@entries - entries parameter to @fn.
-call_stub_t *
-fop_readdir_cbk_stub (call_frame_t *frame,
- fop_readdir_cbk_t fn,
- int32_t op_ret,
- int32_t op_errno,
- gf_dirent_t *entries);
-
-
-@frame - call frame which has to be used to resume the call at call_resume().
-@fn - procedure to call during call_resume().
-@op_ret - op_ret parameter to @fn.
-@op_errno - op_errno parameter to @fn.
-@file_checksum - file_checksum parameter to @fn.
- NOTE: file_checksum will be copied to a different memory location
- while creating stub.
-@dir_checksum - dir_checksum parameter to @fn.
- NOTE: file_checksum will be copied to a different memory location
- while creating stub.
-call_stub_t *
-fop_checksum_cbk_stub (call_frame_t *frame,
- fop_checksum_cbk_t fn,
- int32_t op_ret,
- int32_t op_errno,
- uint8_t *file_checksum,
- uint8_t *dir_checksum);
-
-resuming a call:
----------------
- call can be resumed using call stub through call_resume API.
-
- void call_resume (call_stub_t *stub);
-
- stub - call stub created during pausing a call.
-
- NOTE: call_resume() will decrease reference count of any fd_t, dict_t and inode_t that it finds
- in stub->args.<operation>.<fd_t-or-inode_t-or-dict_t>. so, if any fd_t, dict_t or
- inode_t pointers are assigned at stub->args.<operation>.<fd_t-or-inode_t-or-dict_t> after
- fop_<operation>_stub() call, they must be <fd_t-or-inode_t-or-dict_t>_ref()ed.
-
- call_resume does not STACK_DESTROY() for any fop.
-
- if stub->fn is NULL, call_resume does STACK_WIND() or STACK_UNWIND() using the stub->frame.
-
- return - call resume fails only if stub is NULL. call resume fails with errno set to EINVAL.
diff --git a/doc/legacy/hacker-guide/hacker-guide.tex b/doc/legacy/hacker-guide/hacker-guide.tex
deleted file mode 100644
index 11101e7a87a..00000000000
--- a/doc/legacy/hacker-guide/hacker-guide.tex
+++ /dev/null
@@ -1,309 +0,0 @@
-\documentclass{book}[12pt]
-\usepackage{graphicx}
-% \usepackage{fancyhdr}
-
-% \pagestyle{fancy}
-\begin{document}
-
-% \headheight 117pt
-% \rhead{\includegraphics{zr-logo.eps}}
-
-\author{Gluster}
-\title{GlusterFS 1.3 Hacker's Guide}
-\date{June 1, 2007}
-
-\maketitle
-\frontmatter
-\tableofcontents
-
-\mainmatter
-\chapter{Introduction}
-
-\section{Coding guidelines}
-GlusterFS uses Git for version control. To get the latest source do:
-\begin{verbatim}
- $ git clone git://git.gluster.com/glusterfs.git glusterfs
-\end{verbatim}
-\noindent
-GlusterFS follows the GNU coding
-standards\footnote{http://www.gnu.org/prep/standards\_toc.html} for the
-most part.
-
-\chapter{Major components}
-\section{libglusterfs}
-\texttt{libglusterfs} contains supporting code used by all the other components.
-The important files here are:
-
-\texttt{dict.c}: This is an implementation of a serializable dictionary type. It is
-used by the protocol code to send requests and replies. It is also used to pass options
-to translators.
-
-\texttt{logging.c}: This is a thread-safe logging library. The log messages go to a
-file (default \texttt{/usr/local/var/log/glusterfs/*}).
-
-\texttt{protocol.c}: This file implements the GlusterFS on-the-wire
-protocol. The protocol itself is a simple ASCII protocol, designed to
-be easy to parse and be human readable.
-
-A sample GlusterFS protocol block looks like this:
-\begin{verbatim}
- Block Start header
- 0000000000000023 callid
- 00000001 type
- 00000016 op
- xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx human-readable name
- 00000000000000000000000000000ac3 block size
- <...> block
- Block End
-\end{verbatim}
-
-\texttt{stack.h}: This file defines the \texttt{STACK\_WIND} and
-\texttt{STACK\_UNWIND} macros which are used to implement the parallel
-stack that is maintained for inter-xlator calls. See the \textsl{Taking control
-of the stack} section below for more details.
-
-\texttt{spec.y}: This contains the Yacc grammar for the GlusterFS
-specification file, and the parsing code.
-
-
-Draw diagrams of trees
-Two rules:
-(1) directory structure is same
-(2) file can exist only on one node
-
-\section{glusterfs-fuse}
-\section{glusterfsd}
-\section{transport}
-\section{scheduler}
-\section{xlator}
-
-\chapter{xlators}
-\section{Taking control of the stack}
-One can think of STACK\_WIND/UNWIND as a very specific RPC mechanism.
-
-% \includegraphics{stack.eps}
-
-\section{Overview of xlators}
-
-\flushleft{\LARGE\texttt{cluster/}}
-\vskip 2ex
-\flushleft{\Large\texttt{afr}}
-\vskip 2ex
-\flushleft{\Large\texttt{stripe}}
-\vskip 2ex
-\flushleft{\Large\texttt{unify}}
-
-\vskip 4ex
-\flushleft{\LARGE\texttt{debug/}}
-\vskip 2ex
-\flushleft{\Large\texttt{trace}}
-\vskip 2ex
-The trace xlator simply logs all fops and mops, and passes them through to its child.
-
-\vskip 4ex
-\flushleft{\LARGE\texttt{features/}}
-\flushleft{\Large\texttt{posix-locks}}
-\vskip 2ex
-This xlator implements \textsc{posix} record locking semantics over
-any kind of storage.
-
-\vskip 4ex
-\flushleft{\LARGE\texttt{performance/}}
-
-\flushleft{\Large\texttt{io-threads}}
-\vskip 2ex
-\flushleft{\Large\texttt{read-ahead}}
-\vskip 2ex
-\flushleft{\Large\texttt{stat-prefetch}}
-\vskip 2ex
-\flushleft{\Large\texttt{write-behind}}
-\vskip 2ex
-
-\vskip 4ex
-\flushleft{\LARGE\texttt{protocol/}}
-\vskip 2ex
-
-\flushleft{\Large\texttt{client}}
-\vskip 2ex
-
-\flushleft{\Large\texttt{server}}
-\vskip 2ex
-
-\vskip 4ex
-\flushleft{\LARGE\texttt{storage/}}
-\flushleft{\Large\texttt{posix}}
-\vskip 2ex
-The \texttt{posix} xlator is the one which actually makes calls to the
-on-disk filesystem. Currently this is the only storage xlator available. However,
-plans to develop other storage xlators, such as one for Amazon's S3 service, are
-on the roadmap.
-
-\chapter{Writing a simple xlator}
-\noindent
-In this section we're going to write a rot13 xlator. ``Rot13'' is a
-simple substitution cipher which obscures a text by replacing each
-letter with the letter thirteen places down the alphabet. So `a' (0)
-would become `n' (12), `b' would be 'm', and so on. Rot13 applied to
-a piece of ciphertext yields the plaintext again, because rot13 is its
-own inverse, since:
-
-\[
-x_c = x + 13\; (mod\; 26)
-\]
-\[
-x_c + 13\; (mod\; 26) = x + 13 + 13\; (mod\; 26) = x
-\]
-
-First we include the requisite headers.
-
-\begin{verbatim}
-#include <ctype.h>
-#include <sys/uio.h>
-
-#include "glusterfs.h"
-#include "xlator.h"
-#include "logging.h"
-
-/*
- * This is a rot13 ``encryption'' xlator. It rot13's data when
- * writing to disk and rot13's it back when reading it.
- * This xlator is meant as an example, not for production
- * use ;) (hence no error-checking)
- */
-
-\end{verbatim}
-
-Then we write the rot13 function itself. For simplicity, we only transform lower case
-letters. Any other byte is passed through as it is.
-
-\begin{verbatim}
-/* We only handle lower case letters for simplicity */
-static void
-rot13 (char *buf, int len)
-{
- int i;
- for (i = 0; i < len; i++) {
- if (isalpha (buf[i]))
- buf[i] = (buf[i] - 'a' + 13) % 26;
- else if (buf[i] <= 26)
- buf[i] = (buf[i] + 13) % 26 + 'a';
- }
-}
-\end{verbatim}
-
-Next comes a utility function whose purpose will be clear after looking at the code
-below.
-
-\begin{verbatim}
-static void
-rot13_iovec (struct iovec *vector, int count)
-{
- int i;
- for (i = 0; i < count; i++) {
- rot13 (vector[i].iov_base, vector[i].iov_len);
- }
-}
-\end{verbatim}
-
-\begin{verbatim}
-static int32_t
-rot13_readv_cbk (call_frame_t *frame,
- call_frame_t *prev_frame,
- xlator_t *this,
- int32_t op_ret,
- int32_t op_errno,
- struct iovec *vector,
- int32_t count)
-{
- rot13_iovec (vector, count);
-
- STACK_UNWIND (frame, op_ret, op_errno, vector, count);
- return 0;
-}
-
-static int32_t
-rot13_readv (call_frame_t *frame,
- xlator_t *this,
- dict_t *ctx,
- size_t size,
- off_t offset)
-{
- STACK_WIND (frame,
- rot13_readv_cbk,
- FIRST_CHILD (this),
- FIRST_CHILD (this)->fops->readv,
- ctx, size, offset);
- return 0;
-}
-
-static int32_t
-rot13_writev_cbk (call_frame_t *frame,
- call_frame_t *prev_frame,
- xlator_t *this,
- int32_t op_ret,
- int32_t op_errno)
-{
- STACK_UNWIND (frame, op_ret, op_errno);
- return 0;
-}
-
-static int32_t
-rot13_writev (call_frame_t *frame,
- xlator_t *this,
- dict_t *ctx,
- struct iovec *vector,
- int32_t count,
- off_t offset)
-{
- rot13_iovec (vector, count);
-
- STACK_WIND (frame,
- rot13_writev_cbk,
- FIRST_CHILD (this),
- FIRST_CHILD (this)->fops->writev,
- ctx, vector, count, offset);
- return 0;
-}
-
-\end{verbatim}
-
-Every xlator must define two functions and two external symbols. The functions are
-\texttt{init} and \texttt{fini}, and the symbols are \texttt{fops} and \texttt{mops}.
-The \texttt{init} function is called when the xlator is loaded by GlusterFS, and
-contains code for the xlator to initialize itself. Note that if an xlator is present
-multiple times in the spec tree, the \texttt{init} function will be called each time
-the xlator is loaded.
-
-\begin{verbatim}
-int32_t
-init (xlator_t *this)
-{
- if (!this->children) {
- gf_log ("rot13", GF_LOG_ERROR,
- "FATAL: rot13 should have exactly one child");
- return -1;
- }
-
- gf_log ("rot13", GF_LOG_DEBUG, "rot13 xlator loaded");
- return 0;
-}
-\end{verbatim}
-
-\begin{verbatim}
-
-void
-fini (xlator_t *this)
-{
- return;
-}
-
-struct xlator_fops fops = {
- .readv = rot13_readv,
- .writev = rot13_writev
-};
-
-
-\end{verbatim}
-
-\end{document}
-
diff --git a/doc/legacy/hacker-guide/replicate.txt b/doc/legacy/hacker-guide/replicate.txt
deleted file mode 100644
index ad5b352a829..00000000000
--- a/doc/legacy/hacker-guide/replicate.txt
+++ /dev/null
@@ -1,206 +0,0 @@
----------------
-* cluster/replicate
----------------
-
-Before understanding replicate, one must understand two internal FOPs:
-
-GF_FILE_LK:
- This is exactly like fcntl(2) locking, except the locks are in a
- separate domain from locks held by applications.
-
-GF_DIR_LK (loc_t *loc, char *basename):
- This allows one to lock a name under a directory. For example,
- to lock /mnt/glusterfs/foo, one would use the call:
-
- GF_DIR_LK ({loc_t for "/mnt/glusterfs"}, "foo")
-
- If one wishes to lock *all* the names under a particular directory,
- supply the basename argument as NULL.
-
- The locks can either be read locks or write locks; consult the
- function prototype for more details.
-
-Both these operations are implemented by the features/locks (earlier
-known as posix-locks) translator.
-
---------------
-* Basic design
---------------
-
-All FOPs can be classified into four major groups:
-
- - inode-read
- Operations that read an inode's data (file contents) or metadata (perms, etc.).
-
- access, getxattr, fstat, readlink, readv, stat.
-
- - inode-write
- Operations that modify an inode's data or metadata.
-
- chmod, chown, truncate, writev, utimens.
-
- - dir-read
- Operations that read a directory's contents or metadata.
-
- readdir, getdents, checksum.
-
- - dir-write
- Operations that modify a directory's contents or metadata.
-
- create, link, mkdir, mknod, rename, rmdir, symlink, unlink.
-
- Some of these make a subgroup in that they modify *two* different entries:
- link, rename, symlink.
-
- - Others
- Other operations.
-
- flush, lookup, open, opendir, statfs.
-
-------------
-* Algorithms
-------------
-
-Each of the four major groups has its own algorithm:
-
- ----------------------
- - inode-read, dir-read
- ----------------------
-
- = Send a request to the first child that is up:
- - if it fails:
- try the next available child
- - if we have exhausted all children:
- return failure
-
- -------------
- - inode-write
- -------------
-
- All operations are done in parallel unless specified otherwise.
-
- (1) Send a GF_FILE_LK request on all children for a write lock on
- the appropriate region
- (for metadata operations: entire file (0, 0)
- for writev: (offset, offset+size of buffer))
-
- - If a lock request fails on a child:
- unlock all children
- try to acquire a blocking lock (F_SETLKW) on each child, serially.
-
- If this fails (due to ENOTCONN or EINVAL):
- Consider this child as dead for rest of transaction.
-
- (2) Mark all children as "pending" on all (alive) children
- (see below for meaning of "pending").
-
- - If it fails on any child:
- mark it as dead (in transaction local state).
-
- (3) Perform operation on all (alive) children.
-
- - If it fails on any child:
- mark it as dead (in transaction local state).
-
- (4) Unmark all successful children as not "pending" on all nodes.
-
- (5) Unlock region on all (alive) children.
-
- -----------
- - dir-write
- -----------
-
- The algorithm for dir-write is same as above except instead of holding
- GF_FILE_LK locks we hold a GF_DIR_LK lock on the name being operated upon.
- In case of link-type calls, we hold locks on both the operand names.
-
------------
-* "pending"
------------
-
- The "pending" number is like a journal entry. A pending entry is an
- array of 32-bit integers stored in network byte-order as the extended
- attribute of an inode (which can be a directory as well).
-
- There are three keys corresponding to three types of pending operations:
-
- - AFR_METADATA_PENDING
- There are some metadata operations pending on this inode (perms, ctime/mtime,
- xattr, etc.).
-
- - AFR_DATA_PENDING
- There is some data pending on this inode (writev).
-
- - AFR_ENTRY_PENDING
- There are some directory operations pending on this directory
- (create, unlink, etc.).
-
------------
-* Self heal
------------
-
- - On lookup, gather extended attribute data:
- - If entry is a regular file:
- - If an entry is present on one child and not on others:
- - create entry on others.
- - If entries exist but have different metadata (perms, etc.):
- - consider the entry with the highest AFR_METADATA_PENDING number as
- definitive and replicate its attributes on children.
-
- - If entry is a directory:
- - Consider the entry with the highest AFR_ENTRY_PENDING number as
- definitive and replicate its contents on all children.
-
- - If any two entries have non-matching types (i.e., one is file and
- other is directory):
- - Announce to the user via log that a split-brain situation has been
- detected, and do nothing.
-
- - On open, gather extended attribute data:
- - Consider the file with the highest AFR_DATA_PENDING number as
- the definitive one and replicate its contents on all other
- children.
-
- During all self heal operations, appropriate locks must be held on all
- regions/entries being affected.
-
----------------
-* Inode scaling
----------------
-
-Inode scaling is necessary because if a situation arises where:
- - An inode number is returned for a directory (by lookup) which was
- previously the inode number of a file (as per FUSE's table), then
- FUSE gets horribly confused (consult a FUSE expert for more details).
-
-To avoid such a situation, we distribute the 64-bit inode space equally
-among all children of replicate.
-
-To illustrate:
-
-If c1, c2, c3 are children of replicate, they each get 1/3 of the available
-inode space:
-
-Child: c1 c2 c3 c1 c2 c3 c1 c2 c3 c1 c2 ...
-Inode number: 1 2 3 4 5 6 7 8 9 10 11 ...
-
-Thus, if lookup on c1 returns an inode number "2", it is scaled to "4"
-(which is the second inode number in c1's space).
-
-This way we ensure that there is never a collision of inode numbers from
-two different children.
-
-This reduction of inode space doesn't really reduce the usability of
-replicate since even if we assume replicate has 1024 children (which would be a
-highly unusual scenario), each child still has a 54-bit inode space.
-
-2^54 ~ 1.8 * 10^16
-
-which is much larger than any real world requirement.
-
-
-==============================================
-$ Last updated: Sun Oct 12 23:17:01 IST 2008 $
-$ Author: Vikas Gorur <vikas@gluster.com> $
-==============================================
-
diff --git a/doc/legacy/handling-options.txt b/doc/legacy/handling-options.txt
deleted file mode 100644
index 9a3b2510acb..00000000000
--- a/doc/legacy/handling-options.txt
+++ /dev/null
@@ -1,13 +0,0 @@
-
-How to add a new option to a given volume ?
-===========================================
-
-* Add a entry in 'struct volume_options options[]' with your key, what is
- the type of the 'key', etc.
-
-* The 'key' and corresponding 'value' given for the same by user are validated
- before calling init() of the translator/transport/scheduler/auth-module.
-
-* Once the complete init() is successful, user will get a warning if he has
- given a 'key' which is not defined in these modules.
-
diff --git a/doc/legacy/mac-related-xattrs.txt b/doc/legacy/mac-related-xattrs.txt
deleted file mode 100644
index 92bb2ceef2d..00000000000
--- a/doc/legacy/mac-related-xattrs.txt
+++ /dev/null
@@ -1,21 +0,0 @@
-
-This document is intended to briefly explain how the Extended Attributes on
-Darwin 10.5.x releases works
-----
-
-On Darwin other than all the normal filesystem operations, 'Finder' (like
-Explorer in Windows but a little more) keeps its information in two extended
-attributes named 'com.apple.FinderInfo' and 'com.apple.ResourceFork'. If these
-xattrs are not implemented the filesystem won't be shown on Finder, and if they
-are not implemented properly there may be issues when some of the file operations
-are done through GUI of Finder. But when a filesystem is used over mountpoint in a
-terminal, everything is fine and these xattrs are not required.
-
-Currently the way these xattrs are implemented is simple. All the xattr calls
-(getxattr, setxattr, listxattr, removexattr) are passed down to underlaying filesystem,
-most of the cases when exported FS is on MacOS X itself, these keys are supported, hence
-the fops succeed. But in the case of using exports of different OS on Darwin the issue is
-extended attribute prefix like 'com.apple.' may not be supported, hence the problem with
-Finder. To solve this issue, GlusterFS returns virtual default values to these keys, which
-works fine on most of the cases.
-
diff --git a/doc/legacy/porting_guide.txt b/doc/legacy/porting_guide.txt
deleted file mode 100644
index 5705cd96461..00000000000
--- a/doc/legacy/porting_guide.txt
+++ /dev/null
@@ -1,45 +0,0 @@
- GlusterFS Porting Guide
- -----------------------
-
-* General setup
-
-The configure script will detect the target platform for the build.
-All platform-specific CFLAGS, macro definitions should be done
-in configure.ac
-
-Platform-specific code can be written like this:
-
-#ifdef GF_DARWIN_HOST_OS
- /* some code specific to Darwin */
-#endif
-
-* Coding guidelines
-
-In general, avoid glibc extensions. For example, nested functions don't work
-on Mac OS X. It is best to stick to C99.
-
-When using library calls and system calls, pay attention to the
-portability notes. As far as possible stick to POSIX-specified behavior.
-Do not use anything expressly permitted by the specification. For example,
-some fields in structures may be present only on certain platforms. Avoid
-use of such things.
-
-Do not pass values of constants such as F_*, O_*, errno values, etc. across
-platforms.
-
-Please refer compat-errno.h for more details about errno handling inside
-glusterfs for cross platform.
-
-* Specific issues
-
-- The argp library is available only on Linux through glibc, but for other
- platforms glusterfs has already included argp-standalone library which will
- statically linked during the glusterfs build.
-
-- Extended attribute calls (setxattr, listxattr, etc.) have differing prototypes
- on different platforms. See compat.h for macro definitions to resolve this, also
- read out the specific extended attribute documentation for your platforms.
-
-------------------------------------------
-Last revised: Thu Feb 28 13:58:07 IST 2008
-------------------------------------------
diff --git a/doc/legacy/replicate.lyx b/doc/legacy/replicate.lyx
deleted file mode 100644
index e3d081191e0..00000000000
--- a/doc/legacy/replicate.lyx
+++ /dev/null
@@ -1,797 +0,0 @@
-#LyX 1.4.2 created this file. For more info see http://www.lyx.org/
-\lyxformat 245
-\begin_document
-\begin_header
-\textclass article
-\language english
-\inputencoding auto
-\fontscheme default
-\graphics default
-\paperfontsize default
-\spacing single
-\papersize default
-\use_geometry false
-\use_amsmath 1
-\cite_engine basic
-\use_bibtopic false
-\paperorientation portrait
-\secnumdepth 3
-\tocdepth 3
-\paragraph_separation skip
-\defskip medskip
-\quotes_language english
-\papercolumns 1
-\papersides 1
-\paperpagestyle default
-\tracking_changes false
-\output_changes false
-\end_header
-
-\begin_body
-
-\begin_layout Title
-
-\size larger
-Automatic File Replication (replicate) in GlusterFS
-\end_layout
-
-\begin_layout Author
-Vikas Gorur
-\family typewriter
-\size larger
-<vikas@gluster.com>
-\end_layout
-
-\begin_layout Standard
-\begin_inset ERT
-status open
-
-\begin_layout Standard
-
-
-\backslash
-hrule
-\end_layout
-
-\end_inset
-
-
-\end_layout
-
-\begin_layout Section*
-Overview
-\end_layout
-
-\begin_layout Standard
-This document describes the design and usage of the replicate translator in GlusterFS.
- This document is valid for the 1.4.x releases, and not earlier ones.
-\end_layout
-
-\begin_layout Standard
-The replicate translator of GlusterFS aims to keep identical copies of a file
- on all its subvolumes, as far as possible.
- It tries to do this by performing all filesystem mutation operations (writing
- data, creating files, changing ownership, etc.) on all its subvolumes in
- such a way that if an operation succeeds on atleast one subvolume, all
- other subvolumes can later be brought up to date.
-\end_layout
-
-\begin_layout Standard
-In the rest of the document the terms
-\begin_inset Quotes eld
-\end_inset
-
-subvolume
-\begin_inset Quotes erd
-\end_inset
-
- and
-\begin_inset Quotes eld
-\end_inset
-
-server
-\begin_inset Quotes erd
-\end_inset
-
- are used interchangeably, trusting that it will cause no confusion to the
- reader.
-\end_layout
-
-\begin_layout Section*
-Usage
-\end_layout
-
-\begin_layout Standard
-A sample volume declaration for replicate looks like this:
-\end_layout
-
-\begin_layout Standard
-\begin_inset ERT
-status open
-
-\begin_layout Standard
-
-
-\backslash
-begin{verbatim}
-\end_layout
-
-\begin_layout Standard
-
-volume replicate
-\end_layout
-
-\begin_layout Standard
-
- type cluster/replicate
-\end_layout
-
-\begin_layout Standard
-
- # options, see below for description
-\end_layout
-
-\begin_layout Standard
-
- subvolumes brick1 brick2
-\end_layout
-
-\begin_layout Standard
-
-end-volume
-\end_layout
-
-\begin_layout Standard
-
-
-\backslash
-end{verbatim}
-\end_layout
-
-\begin_layout Standard
-
-\end_layout
-
-\begin_layout Standard
-
-\end_layout
-
-\begin_layout Standard
-
-\end_layout
-
-\end_inset
-
-
-\end_layout
-
-\begin_layout Standard
-This defines an replicate volume with two subvolumes, brick1, and brick2.
- For replicate to work properly, it is essential that its subvolumes support
-\series bold
-extended attributes
-\series default
-.
- This means that you should choose a backend filesystem that supports extended
- attributes, like XFS, ReiserFS, or Ext3.
-\end_layout
-
-\begin_layout Standard
-The storage volumes used as backend for replicate
-\emph on
-must
-\emph default
- have a posix-locks volume loaded above them.
-\end_layout
-
-\begin_layout Standard
-\begin_inset ERT
-status open
-
-\begin_layout Standard
-
-
-\backslash
-begin{verbatim}
-\end_layout
-
-\begin_layout Standard
-
-volume brick1
-\end_layout
-
-\begin_layout Standard
-
- type features/posix-locks
-\end_layout
-
-\begin_layout Standard
-
- subvolumes brick1-ds
-\end_layout
-
-\begin_layout Standard
-
-end-volume
-\end_layout
-
-\begin_layout Standard
-
-
-\backslash
-end{verbatim}
-\end_layout
-
-\end_inset
-
-
-\end_layout
-
-\begin_layout Section*
-Design
-\end_layout
-
-\begin_layout Subsection*
-Read algorithm
-\end_layout
-
-\begin_layout Standard
-All operations that do not modify the file or directory are sent to all
- the subvolumes and the first successful reply is returned to the application.
-\end_layout
-
-\begin_layout Standard
-The read() system call (reading data from a file) is an exception.
- For read() calls, replicate tries to do load balancing by sending all reads from
- a particular file to a particular server.
-\end_layout
-
-\begin_layout Standard
-The read algorithm is also affected by the option read-subvolume; see below
- for details.
-\end_layout
-
-\begin_layout Subsection*
-Classes of file operations
-\end_layout
-
-\begin_layout Standard
-replicate divides all filesystem write operations into three classes:
-\end_layout
-
-\begin_layout Itemize
-
-\series bold
-data:
-\series default
-Operations that modify the contents of a file (write, truncate).
-\end_layout
-
-\begin_layout Itemize
-
-\series bold
-metadata:
-\series default
-Operations that modify attributes of a file or directory (permissions, ownership
-, etc.).
-\end_layout
-
-\begin_layout Itemize
-
-\series bold
-entry:
-\series default
-Operations that create or delete directory entries (mkdir, create, rename,
- rmdir, unlink, etc.).
-\end_layout
-
-\begin_layout Subsection*
-Locking and Change Log
-\end_layout
-
-\begin_layout Standard
-To ensure consistency across subvolumes, replicate holds a lock whenever a modificatio
-n is being made to a file or directory.
- By default, replicate considers the first subvolume as the sole lock server.
- However, the number of lock servers can be increased up to the total number
- of subvolumes.
-\end_layout
-
-\begin_layout Standard
-The change log is a set of extended attributes associated with files and
- directories that replicate maintains.
- The change log keeps track of the changes made to files and directories
- (data, metadata, entry) so that the self-heal algorithm knows which copy
- of a file or directory is the most recent one.
-\end_layout
-
-\begin_layout Subsection*
-Write algorithm
-\end_layout
-
-\begin_layout Standard
-The algorithm for all write operations (data, metadata, entry) is:
-\end_layout
-
-\begin_layout Enumerate
-Lock the file (or directory) on all of the lock servers (see options below).
-\end_layout
-
-\begin_layout Enumerate
-Write change log entries on all servers.
-\end_layout
-
-\begin_layout Enumerate
-Perform the operation.
-\end_layout
-
-\begin_layout Enumerate
-Erase change log entries.
-\end_layout
-
-\begin_layout Enumerate
-Unlock the file (or directory) on all of the lock servers.
-\end_layout
-
-\begin_layout Standard
-The above algorithm is a simplified version intended for general users.
- Please refer to the source code for the full details.
-\end_layout
-
-\begin_layout Subsection*
-Self-Heal
-\end_layout
-
-\begin_layout Standard
-replicate automatically tries to fix any inconsistencies it detects among different
- copies of a file.
- It uses information in the change log to determine which copy is the
-\begin_inset Quotes eld
-\end_inset
-
-correct
-\begin_inset Quotes erd
-\end_inset
-
- version.
-\end_layout
-
-\begin_layout Standard
-Self-heal is triggered when a file or directory is first
-\begin_inset Quotes eld
-\end_inset
-
-accessed
-\begin_inset Quotes erd
-\end_inset
-
-, that is, the first time any operation is attempted on it.
- The self-heal algorithm does the following things:
-\end_layout
-
-\begin_layout Standard
-If the entry being accessed is a directory:
-\end_layout
-
-\begin_layout Itemize
-The contents of the
-\begin_inset Quotes eld
-\end_inset
-
-correct
-\begin_inset Quotes erd
-\end_inset
-
- version is replicated on all subvolumes, by deleting entries and creating
- entries as necessary.
-\end_layout
-
-\begin_layout Standard
-If the entry being accessed is a file:
-\end_layout
-
-\begin_layout Itemize
-If the file does not exist on some subvolumes, it is created.
-\end_layout
-
-\begin_layout Itemize
-If there is a mismatch in the size of the file, or ownership, or permission,
- it is fixed.
-\end_layout
-
-\begin_layout Itemize
-If the change log indicates that some copies need updating, they are updated.
-\end_layout
-
-\begin_layout Subsection*
-Split-brain
-\end_layout
-
-\begin_layout Standard
-It may happen that one replicate client can access only some of the servers in
- a cluster and another replicate client can access the remaining servers.
- Or it may happen that in a cluster of two servers, one server goes down
- and comes back up, but the other goes down immediately.
- Both these scenarios result in a
-\begin_inset Quotes eld
-\end_inset
-
-split-brain
-\begin_inset Quotes erd
-\end_inset
-
-.
-\end_layout
-
-\begin_layout Standard
-In a split-brain situation, there will be two or more copies of a file,
- all of which are
-\begin_inset Quotes eld
-\end_inset
-
-correct
-\begin_inset Quotes erd
-\end_inset
-
- in some sense.
- replicate without manual intervention has no way of knowing what to do, since
- it cannot consider any single copy as definitive, nor does it know of any
- meaningful way to merge the copies.
-\end_layout
-
-\begin_layout Standard
-If replicate detects that a split-brain has happened on a file, it disallows opening
- of that file.
- You will have to manually resolve the conflict by deleting all but one
- copy of the file.
- Alternatively you can set an automatic split-brain resolution policy by
- using the `favorite-child' option (see below).
-\end_layout
-
-\begin_layout Section*
-Translator Options
-\end_layout
-
-\begin_layout Standard
-replicate accepts the following options:
-\end_layout
-
-\begin_layout Subsection*
-read-subvolume (default: none)
-\end_layout
-
-\begin_layout Standard
-The value of this option must be the name of a subvolume.
- If given, all read operations are sent to only the specified subvolume,
- instead of being balanced across all subvolumes.
-\end_layout
-
-\begin_layout Subsection*
-favorite-child (default: none)
-\end_layout
-
-\begin_layout Standard
-The value of this option must be the name of a subvolume.
- If given, the specified subvolume will be preferentially used in resolving
- conflicts (
-\begin_inset Quotes eld
-\end_inset
-
-split-brain
-\begin_inset Quotes erd
-\end_inset
-
-).
- This means if a discrepancy is noticed in the attributes or content of
- a file, the copy on the `favorite-child' will be considered the definitive
- version and its contents will
-\emph on
-overwrite
-\emph default
-the contents of all other copies.
- Use this option with caution! It is possible to
-\emph on
-lose data
-\emph default
- with this option.
- If you are in doubt, do not specify this option.
-\end_layout
-
-\begin_layout Subsection*
-Self-heal options
-\end_layout
-
-\begin_layout Standard
-Setting any of these options to
-\begin_inset Quotes eld
-\end_inset
-
-off
-\begin_inset Quotes erd
-\end_inset
-
- prevents that kind of self-heal from being done on a file or directory.
- For example, if metadata self-heal is turned off, permissions and ownership
- are no longer fixed automatically.
-\end_layout
-
-\begin_layout Subsubsection*
-data-self-heal (default: on)
-\end_layout
-
-\begin_layout Standard
-Enable/disable self-healing of file contents.
-\end_layout
-
-\begin_layout Subsubsection*
-metadata-self-heal (default: off)
-\end_layout
-
-\begin_layout Standard
-Enable/disable self-healing of metadata (permissions, ownership, modification
- times).
-\end_layout
-
-\begin_layout Subsubsection*
-entry-self-heal (default: on)
-\end_layout
-
-\begin_layout Standard
-Enable/disable self-healing of directory entries.
-\end_layout
-
-\begin_layout Subsection*
-Change Log options
-\end_layout
-
-\begin_layout Standard
-If any of these options is turned off, it disables writing of change log
- entries for that class of file operations.
- That is, steps 2 and 4 of the write algorithm (see above) are not done.
- Note that if the change log is not written, the self-heal algorithm cannot
- determine the
-\begin_inset Quotes eld
-\end_inset
-
-correct
-\begin_inset Quotes erd
-\end_inset
-
- version of a file and hence self-heal will only be able to fix
-\begin_inset Quotes eld
-\end_inset
-
-obviously
-\begin_inset Quotes erd
-\end_inset
-
- wrong things (such as a file existing on only one node).
-\end_layout
-
-\begin_layout Subsubsection*
-data-change-log (default: on)
-\end_layout
-
-\begin_layout Standard
-Enable/disable writing of change log for data operations.
-\end_layout
-
-\begin_layout Subsubsection*
-metadata-change-log (default: on)
-\end_layout
-
-\begin_layout Standard
-Enable/disable writing of change log for metadata operations.
-\end_layout
-
-\begin_layout Subsubsection*
-entry-change-log (default: on)
-\end_layout
-
-\begin_layout Standard
-Enable/disable writing of change log for entry operations.
-\end_layout
-
-\begin_layout Subsection*
-Locking options
-\end_layout
-
-\begin_layout Standard
-These options let you specify the number of lock servers to use for each
- class of file operations.
- The default values are satisfactory in most cases.
- If you are extra paranoid, you may want to increase the values.
- However, be very cautious if you set the data- or entry- lock server counts
- to zero, since this can result in
-\emph on
-lost data.
-
-\emph default
- For example, if you set the data-lock-server-count to zero, and two application
-s write to the same region of a file, there is a possibility that none of
- your servers will have all the data.
- In other words, the copies will be
-\emph on
-inconsistent
-\emph default
-, and
-\emph on
-incomplete
-\emph default
-.
- Do not set data- and entry- lock server counts to zero unless you absolutely
- know what you are doing and agree to not hold GlusterFS responsible for
- any lost data.
-\end_layout
-
-\begin_layout Subsubsection*
-data-lock-server-count (default: 1)
-\end_layout
-
-\begin_layout Standard
-Number of lock servers to use for data operations.
-\end_layout
-
-\begin_layout Subsubsection*
-metadata-lock-server-count (default: 0)
-\end_layout
-
-\begin_layout Standard
-Number of lock servers to use for metadata operations.
-\end_layout
-
-\begin_layout Subsubsection*
-entry-lock-server-count (default: 1)
-\end_layout
-
-\begin_layout Standard
-Number of lock servers to use for entry operations.
-\end_layout
-
-\begin_layout Section*
-Known Issues
-\end_layout
-
-\begin_layout Subsection*
-Self-heal of file with more than one link (hard links):
-\end_layout
-
-\begin_layout Standard
-Consider two servers, A and B.
- Assume A is down, and the user creates a file `new' as a hard link to a
- file `old'.
- When A comes back up, replicate will see that the file `new' does not exist on
- A, and self-heal will create the file and copy the contents from B.
- However, now on server A the file `new' is not a link to the file `old'
- but an entirely different file.
-\end_layout
-
-\begin_layout Standard
-We know of no easy way to fix this problem, but we will try to fix it in
- forthcoming releases.
-\end_layout
-
-\begin_layout Subsection*
-File re-opening after a server comes back up:
-\end_layout
-
-\begin_layout Standard
-If a server A goes down and comes back up, any files which were opened while
- A was down and are still open will not have their writes replicated on
- A.
- In other words, data replication only happens on those servers which were
- alive when the file was opened.
-\end_layout
-
-\begin_layout Standard
-This is a rather tricky issue but we hope to fix it very soon.
-\end_layout
-
-\begin_layout Section*
-Frequently Asked Questions
-\end_layout
-
-\begin_layout Subsection*
-1.
- How can I force self-heal to happen?
-\end_layout
-
-\begin_layout Standard
-You can force self-heal to happen on your cluster by running a script or
- a command that accesses every file.
- A simple way to do it would be:
-\end_layout
-
-\begin_layout Standard
-\begin_inset ERT
-status open
-
-\begin_layout Standard
-
-\end_layout
-
-\begin_layout Standard
-
-
-\backslash
-begin{verbatim}
-\end_layout
-
-\begin_layout Standard
-
-$ ls -lR
-\end_layout
-
-\begin_layout Standard
-
-
-\backslash
-end{verbatim}
-\end_layout
-
-\begin_layout Standard
-
-\end_layout
-
-\end_inset
-
-
-\end_layout
-
-\begin_layout Standard
-Run the command in all directories which you want to forcibly self-heal.
-\end_layout
-
-\begin_layout Subsection*
-2.
- Which backend filesystem should I use for replicate?
-\end_layout
-
-\begin_layout Standard
-You can use any backend filesystem that supports extended attributes.
- We know of users successfully using XFR, ReiserFS, and Ext3.
-\end_layout
-
-\begin_layout Subsection*
-3.
- What can I do to improve replicate performance?
-\end_layout
-
-\begin_layout Standard
-Try loading performance translators such as io-threads, write-behind, io-cache,
- and read-ahead depending on your workload.
- If you are willing to sacrifice correctness in corner cases, you can experiment
- with the lock-server-count and the change-log options (see above).
- As warned earlier, be very careful!
-\end_layout
-
-\begin_layout Subsection*
-4.
- How can I selectively replicate files?
-\end_layout
-
-\begin_layout Standard
-There is no support for selective replication in replicate itself.
- You can achieve selective replication by loading the unify translator over
- replicate, and using the switch scheduler.
- Configure unify with two subvolumes, one of them being replicate.
- Using the switch scheduler, schedule all files for which you need replication
- to the replicate subvolume.
- Consult unify and switch documentation for more details.
-\end_layout
-
-\begin_layout Section*
-Contact
-\end_layout
-
-\begin_layout Standard
-If you need more assistance on replicate, contact us on the mailing list <gluster-user
-s@gluster.org> (visit gluster.org for details on how to subscribe).
-\end_layout
-
-\begin_layout Standard
-Send you comments and suggestions about this document to <vikas@gluster.com>.
-\end_layout
-
-\end_body
-\end_document
diff --git a/doc/legacy/replicate.pdf b/doc/legacy/replicate.pdf
deleted file mode 100644
index b7212af2b4e..00000000000
--- a/doc/legacy/replicate.pdf
+++ /dev/null
Binary files differ
diff --git a/doc/legacy/solaris-related-xattrs.txt b/doc/legacy/solaris-related-xattrs.txt
deleted file mode 100644
index fa41e29000b..00000000000
--- a/doc/legacy/solaris-related-xattrs.txt
+++ /dev/null
@@ -1,44 +0,0 @@
- Solaris Extended Attributes
-
-In solaris extended attributes are logically supported as files
-within the filesystem. The file system is therefore augmented
-with an orthogonal namespace of file attributes. Attribute values
-are accessed by file descriptors obtained through a special attribute
-interface. This type of logical view of "attributes as files" allows
-the leveraging of existing file system interface functionality to
-support the construction, deletion and manipulation of attributes.
-
-But as we have tested through this functionality provided by Solaris
-we have come across two major issues as written below.
-
-1. Symlink XATTR_NOFOLLOW not present for creating extended attributes
- directly on the symlinks like other platforms Linux,MAC-OSX,BSD etc.
- An implementation is present for O_NOFOLLOW for "openat()" call sets
- up errno ELOOP whenever encountered with a symlink and also another
- implementation AT_SYMLINK_NOFOLLOW which is not present for calls like
- "attropen(), openat()"
-
- a snippet of test code which helped us understand this behaviour
- --------------------------------------
- attrfd = attropen (path, key,
- flags|AT_SYMLINK_NOFOLLOW|O_CREAT|O_WRONLY|O_NOFOLLOW, 0777);
- if (attrfd >= 0) {
- ftruncate (attrfd, 0);
- ret = write (attrfd, value, size);
- close (attrfd);
- } else {
- fprintf (stderr, "Couldn't set extended attribute for %s (%d)\n",
- path, errno);
- }
- --------------------------------------
-
-2. Extended attribute support for special files like device files, fifo files
- is not supported under solaris.
-
-Apart from these glitches almost everything regarding porting functionality
-for extended attribute calls has been properly implemented in compat.c
-with writing wrapper around functions over
-"attropen()", "openat()", "unlinkat()"
-
-
-
diff --git a/doc/legacy/stat-prefetch-design.txt b/doc/legacy/stat-prefetch-design.txt
deleted file mode 100644
index 68ed423d3dd..00000000000
--- a/doc/legacy/stat-prefetch-design.txt
+++ /dev/null
@@ -1,154 +0,0 @@
-what is stat-prefetch?
-======================
-It is a translator which caches the dentries read in readdir. This dentry
-list is stored in the context of fd. Later when lookup happens on
-[parent-inode, basename (path)] combination, this list is searched for the
-basename. The dentry thus searched is used to fill up the stat corresponding
-to path being looked upon, thereby short-cutting lookup calls. This cache is
-preserved till closedir is called on the fd. The purpose of this translator
-is to optimize operations like 'ls -l', where a readdir is followed by
-lookup (stat) calls on each directory entry.
-
-1. stat-prefetch harnesses the efficiency of short lookup calls
- (saves network roundtrip time for lookup calls from being accounted to
- the stat call).
-2. To maintain the correctness, it does lookup-behind - lookup is winded to
- underlying translators after it is unwound to upper translators.
- lookup-behind is necessary as inode gets populated in server inode table
- only in lookup-cbk and also because various translators store their
- contexts in inode contexts during lookup calls.
-
-fops to be implemented:
-=======================
-* lookup
- 1. check the dentry cache stored in context of fds opened by the same process
- on parent inode for basename. If found unwind with cached stat, else wind
- the lookup call to underlying translators.
- 2. stat is stored in the context of inode if the path being looked upon
- happens to be directory. This stat will be used to fill postparent stat
- when lookup happens on any of the directory contents.
-
-* readdir
- 1. cache the direntries returned in readdir_cbk in the context of fd.
- 2. if the readdir is happening on non-expected offsets (means a seekdir/rewinddir
- has happened), cache has to be flushed.
- 3. delete the entry corresponding to basename of path on which fd is opened
- from cache stored in parent.
-
-* chmod/fchmod
- delete the entry corresponding to basename from cache stored in context of
- fds opened on parent inode, since these calls change st_mode and st_ctime of
- stat.
-
-* chown/fchown
- delete the entry corresponding to basename from cache stored in context of
- fds opened on parent inode, since these calls change st_uid/st_gid and
- st_ctime of stat.
-
-* truncate/ftruncate
- delete the entry corresponding to basename from cache stored in context of
- fds opened on parent inode, since these calls change st_size/st_mtime of stat.
-
-* utimens
- delete the entry corresponding to basename from cache stored in context of
- fds opened on parent inode, since this call changes st_atime/st_mtime of stat.
-
-* readlink
- delete the entry corresponding to basename from cache stored in context of fds
- opened on parent inode, since this call changes st_atime of stat.
-
-* unlink
- 1. delete the entry corresponding to basename from cache stored in context of
- fds, opened on parent directory containing the file being unlinked.
- 2. delete the entry corresponding to basename of parent directory from cache
- of grand-parent.
-
-* rmdir
- 1. delete the entry corresponding to basename from cache stored in context of
- fds opened on parent inode.
- 2. remove the entire cache from all fds opened on inode corresponding to
- directory being removed.
- 3. delete the entry correspondig to basename of parent from cache stored in
- grand-parent.
-
-* readv
- delete the entry corresponding to basename from cache stored in context of fds
- opened on parent inode, since readv changes st_atime of file.
-
-* writev
- delete the entry corresponding to basename from cache stored in context of fds
- opened on parent inode, since writev can possibly change st_size and definitely
- changes st_mtime of file.
-
-* fsync
- there is a confusion here as to whether fsync updates mtime/ctimes. Disk based
- filesystems (atleast ext2) just writes the times stored in inode to disk
- during fsync and not the time at which fsync is being done. But in glusterfs,
- a translator like write-behind actually sends writes during fsync which will
- change mtime/ctime. Hence stat-prefetch implements fsync to delete the entry
- corresponding to basename from cache stored in context of fds opened on parent
- inode.
-
-* rename
- 1. remove entry corresponding to oldname from cache stored in fd contexts of
- oldparent.
- 2. remove entry corresponding to newname from cache stored in fd contexts of
- newparent.
- 3. remove entry corresponding to oldparent from cache stored in
- old-grand-parent, since removing oldname changes st_mtime and st_ctime
- of oldparent stat.
- 4. remove entry corresponding to newparent from cache stored in
- new-grand-parent, since adding newname changes st_mtime and st_ctime
- of newparent stat.
- 5. if oldname happens to be a directory, remove entire cache from all fds
- opened on it.
-
-* create/mknod/mkdir/symlink/link
- delete entry corresponding to basename of parent directory in which these
- operations are happening, from cache stored in context of fds opened on
- grand-parent, since adding a new entry to a directory changes st_mtime
- and st_ctime of parent directory.
-
-* setxattr/removexattr
- delete the entry corresponding to basename from cache stored in context of
- fds opened on parent inode, since setxattr changes st_ctime of file.
-
-* setdents
- 1. remove entry corresponding to basename of path on which fd is opened from
- cache stored in context of fds opened on parent.
- 2. for each of the entry in the direntry list, delete from cache stored in
- context of fd, the entry corresponding to basename of path being passed.
-
-* getdents
- 1. remove entry corresponding to basename of path on which fd is opened from
- cache stored in parent, since getdents changes st_atime.
- 2. remove entries corresponding to symbolic links from cache, since readlink
- would've changed st_atime.
-
-* checksum
- delete the entry corresponding to basename from cache stored in context of
- fds opened on parent inode, since st_atime is changed during this call.
-
-* xattrop/fxattrop
- delete the entry corresponding to basename from cache stored in context of fds
- opened on parent inode, since these calls modify st_ctime of file.
-
-callbacks to be implemented:
-============================
-* releasedir
- free the context stored in fd.
-
-* forget
- dree the stat if the inode corresponds to a directory.
-
-limitations:
-============
-* since a readdir does not return extended attributes of file, if need_xattr is
- set, short-cutting of lookup does not happen and lookup is passed to
- underlying translators.
-
-* posix_readdir does not check whether the dentries are spanning across multiple
- mount points. Hence it is not transforming inode numbers in stat buffers if
- posix is configured to allow export directory spanning on multiple mountpoints.
- This is a bug which needs to be fixed. posix_readdir should treat dentries the
- same way as if lookup is happening on dentries.
diff --git a/doc/legacy/stripe.odg b/doc/legacy/stripe.odg
deleted file mode 100644
index 79441bf1452..00000000000
--- a/doc/legacy/stripe.odg
+++ /dev/null
Binary files differ
diff --git a/doc/legacy/stripe.pdf b/doc/legacy/stripe.pdf
deleted file mode 100644
index b94446feb56..00000000000
--- a/doc/legacy/stripe.pdf
+++ /dev/null
Binary files differ
diff --git a/doc/legacy/translator-options.txt b/doc/legacy/translator-options.txt
deleted file mode 100644
index 3422c058a5d..00000000000
--- a/doc/legacy/translator-options.txt
+++ /dev/null
@@ -1,224 +0,0 @@
-mount/fuse:
- * direct-io-mode GF_OPTION_TYPE_BOOL on|off|yes|no
- * mount-point (mountpoint) GF_OPTION_TYPE_PATH <any-posix-valid-path>
- * attribute-timeout GF_OPTION_TYPE_DOUBLE 0.0
- * entry-timeout GF_OPTION_TYPE_DOUBLE 0.0
-
-protocol/server:
- * transport-type GF_OPTION_TYPE_STR tcp|socket|ib-verbs|unix|ib-sdp|
- tcp/client|ib-verbs/client
- * volume-filename.* GF_OPTION_TYPE_PATH
- * inode-lru-limit GF_OPTION_TYPE_INT 0-(1 * GF_UNIT_MB)
- * client-volume-filename GF_OPTION_TYPE_PATH
-
-protocol/client:
- * username GF_OPTION_TYPE_ANY
- * password GF_OPTION_TYPE_ANY
- * transport-type GF_OPTION_TYPE_STR tcp|socket|ib-verbs|unix|ib-sdp|
- tcp/client|ib-verbs/client
- * remote-host GF_OPTION_TYPE_ANY
- * remote-subvolume GF_OPTION_TYPE_ANY
- * transport-timeout GF_OPTION_TYPE_TIME 5-1013
-
-cluster/replicate:
- * read-subvolume GF_OPTION_TYPE_XLATOR
- * favorite-child GF_OPTION_TYPE_XLATOR
- * data-self-heal GF_OPTION_TYPE_BOOL
- * metadata-self-heal GF_OPTION_TYPE_BOOL
- * entry-self-heal GF_OPTION_TYPE_BOOL
- * data-change-log GF_OPTION_TYPE_BOOL
- * metadata-change-log GF_OPTION_TYPE_BOOL
- * entry-change-log GF_OPTION_TYPE_BOOL
- * data-lock-server-count GF_OPTION_TYPE_INT 0
- * metadata-lock-server-count GF_OPTION_TYPE_INT 0
- * entry-lock-server-count GF_OPTION_TYPE_INT 0
-
-cluster/distribute:
- * lookup-unhashed GF_OPTION_TYPE_BOOL
-
-cluster/unify:
- * namespace GF_OPTION_TYPE_XLATOR
- * scheduler GF_OPTION_TYPE_STR alu|rr|random|nufa|switch
- * self-heal GF_OPTION_TYPE_STR foreground|background|off
- * optimist GF_OPTION_TYPE_BOOL
-
-cluster/nufa:
- local-volume-name GF_OPTION_TYPE_XLATOR
-
-cluster/stripe:
- * block-size GF_OPTION_TYPE_ANY
- * use-xattr GF_OPTION_TYPE_BOOL
-
-debug/trace:
- * include-ops (include) GF_OPTION_TYPE_STR
- * exclude-ops (exclude) GF_OPTION_TYPE_STR
-
-encryption/rot-13:
- * encrypt-write GF_OPTION_TYPE_BOOL
- * decrypt-read GF_OPTION_TYPE_BOOL
-
-features/path-convertor:
- * start-offset GF_OPTION_TYPE_INT 0-4095
- * end-offset GF_OPTION_TYPE_INT 1-4096
- * replace-with GF_OPTION_TYPE_ANY
-
-features/trash:
- * trash-dir GF_OPTION_TYPE_PATH
-
-features/locks:
- * mandatory-locks (mandatory) GF_OPTION_TYPE_BOOL
-
-features/filter:
- * root-squashing GF_OPTION_TYPE_BOOL
- * read-only GF_OPTION_TYPE_BOOL
- * fixed-uid GF_OPTION_TYPE_INT
- * fixed-gid GF_OPTION_TYPE_INT
- * translate-uid GF_OPTION_TYPE_ANY
- * translate-gid GF_OPTION_TYPE_ANY
- * filter-uid GF_OPTION_TYPE_ANY
- * filter-gid GF_OPTION_TYPE_ANY
-
-features/quota:
- * min-free-disk-limit GF_OPTION_TYPE_PERCENT
- * refresh-interval GF_OPTION_TYPE_TIME
- * disk-usage-limit GF_OPTION_TYPE_SIZET
-
-storage/posix:
- * o-direct GF_OPTION_TYPE_BOOL
- * directory GF_OPTION_TYPE_PATH
- * export-statfs-size GF_OPTION_TYPE_BOOL
- * mandate-attribute GF_OPTION_TYPE_BOOL
-
-storage/bdb:
- * directory GF_OPTION_TYPE_PATH
- * logdir GF_OPTION_TYPE_PATH
- * errfile GF_OPTION_TYPE_PATH
- * dir-mode GF_OPTION_TYPE_ANY
- * file-mode GF_OPTION_TYPE_ANY
- * page-size GF_OPTION_TYPE_SIZET
- * lru-limit GF_OPTION_TYPE_INT
- * lock-timeout GF_OPTION_TYPE_TIME
- * checkpoint-timeout GF_OPTION_TYPE_TIME
- * transaction-timeout GF_OPTION_TYPE_TIME
- * mode GF_OPTION_TYPE_BOOL
- * access-mode GF_OPTION_TYPE_STR
-
-performance/read-ahead:
- * force-atime-update GF_OPTION_TYPE_BOOL
- * page-size GF_OPTION_TYPE_SIZET (64 * GF_UNIT_KB)-(2 * GF_UNIT_MB)
- * page-count GF_OPTION_TYPE_INT 1-16
-
-performance/write-behind:
- * flush-behind GF_OPTION_TYPE_BOOL
- * aggregate-size GF_OPTION_TYPE_SIZET (128 * GF_UNIT_KB)-(4 * GF_UNIT_MB)
- * window-size GF_OPTION_TYPE_SIZET (512 * GF_UNIT_KB)-(1 * GF_UNIT_GB)
- * enable-O_SYNC GF_OPTION_TYPE_BOOL
- * disable-for-first-nbytes GF_OPTION_TYPE_SIZET 1 - (1 * GF_UNIT_MB)
-
-performance/symlink-cache:
-
-performance/io-threads:
- * thread-count GF_OPTION_TYPE_INT 1-32
-
-performance/io-cache:
- * priority GF_OPTION_TYPE_ANY
- * cache-timeout (force-revalidate-timeout) GF_OPTION_TYPE_INT 0-60
- * page-size GF_OPTION_TYPE_SIZET (16 * GF_UNIT_KB)-(4 * GF_UNIT_MB)
- * cache-size GF_OPTION_TYPE_SIZET (4 * GF_UNIT_MB)-(6 * GF_UNIT_GB)
-
-performance/quick-read:
- * cache-timeout GF_OPTION_TYPE_INT 1-60
- * max-file-size GF_OPTION_TYPE_SIZET 0-(1000 * GF_UNIT_KB)
-
-auth:
-- addr:
- * auth.addr.*.allow GF_OPTION_TYPE_ANY
- * auth.addr.*.reject GF_OPTION_TYPE_ANY
-
-- login:
- * auth.login.*.allow GF_OPTION_TYPE_ANY
- * auth.login.*.password GF_OPTION_TYPE_ANY
-
-scheduler/alu:
- * scheduler.alu.order (alu.order)
- GF_OPTION_TYPE_ANY
- * scheduler.alu.disk-usage.entry-threshold (alu.disk-usage.entry-threshold)
- GF_OPTION_TYPE_SIZET
- * scheduler.alu.disk-usage.exit-threshold (alu.disk-usage.exit-threshold)
- GF_OPTION_TYPE_SIZET
- * scheduler.alu.write-usage.entry-threshold (alu.write-usage.entry-threshold)
- GF_OPTION_TYPE_SIZET
- * scheduler.alu.write-usage.exit-threshold (alu.write-usage.exit-threshold)
- GF_OPTION_TYPE_SIZET
- * scheduler.alu.read-usage.entry-threshold (alu.read-usage.entry-threshold)
- GF_OPTION_TYPE_SIZET
- * scheduler.alu.read-usage.exit-threshold (alu.read-usage.exit-threshold)
- GF_OPTION_TYPE_SIZET
- * scheduler.alu.open-files-usage.entry-threshold (alu.open-files-usage.entry-threshold)
- GF_OPTION_TYPE_INT
- * scheduler.alu.open-files-usage.exit-threshold (alu.open-files-usage.exit-threshold)
- GF_OPTION_TYPE_INT
- * scheduler.read-only-subvolumes (alu.read-only-subvolumes)
- GF_OPTION_TYPE_ANY
- * scheduler.refresh-interval (alu.refresh-interval)
- GF_OPTION_TYPE_TIME
- * scheduler.limits.min-free-disk (alu.limits.min-free-disk)
- GF_OPTION_TYPE_PERCENT
- * scheduler.alu.stat-refresh.num-file-create (alu.stat-refresh.num-file-create)
- GF_OPTION_TYPE_INT
-
-scheduler/nufa:
- * scheduler.refresh-interval (nufa.refresh-interval)
- GF_OPTION_TYPE_TIME
- * scheduler.limits.min-free-disk (nufa.limits.min-free-disk)
- GF_OPTION_TYPE_PERCENT
- * scheduler.local-volume-name (nufa.local-volume-name)
- GF_OPTION_TYPE_XLATOR
-
-scheduler/random:
- * scheduler.refresh-interval (random.refresh-interval) GF_OPTION_TYPE_TIME
- * scheduler.limits.min-free-disk (random.limits.min-free-disk) GF_OPTION_TYPE_PERCENT
-
-scheduler/rr:
- * scheduler.refresh-interval (rr.refresh-interval) GF_OPTION_TYPE_TIME
- * scheduler.limits.min-free-disk (rr.limits.min-free-disk) GF_OPTION_TYPE_PERCENT
- * scheduler.read-only-subvolumes (rr.read-only-subvolumes) GF_OPTION_TYPE_ANY
-
-scheduler/switch:
- * scheduler.read-only-subvolumes (switch.read-only-subvolumes) GF_OPTION_TYPE_ANY
- * scheduler.local-volume-name (switch.nufa.local-volume-name) GF_OPTION_TYPE_XLATOR
- * scheduler.switch.case (switch.case) GF_OPTION_TYPE_ANY
-
-transport/ib-verbs:
- * transport.ib-verbs.port (ib-verbs-port) GF_OPTION_TYPE_INT 1-4
- check the option by 'ibv_devinfo'
- * transport.ib-verbs.mtu (ib-verbs-mtu) GF_OPTION_TYPE_INT
- * transport.ib-verbs.device-name (ib-verbs-device-name) GF_OPTION_TYPE_ANY,
- check by 'ibv_devinfo'
- * transport.ib-verbs.work-request-send-size (ib-verbs-work-request-send-size)
- GF_OPTION_TYPE_INT,
- * transport.ib-verbs.work-request-recv-size (ib-verbs-work-request-recv-size)
- GF_OPTION_TYPE_INT
- * transport.ib-verbs.work-request-send-count (ib-verbs-work-request-send-count)
- GF_OPTION_TYPE_INT
- * transport.ib-verbs.work-request-recv-count (ib-verbs-work-request-recv-count)
- GF_OPTION_TYPE_INT
- * remote-port (transport.remote-port,transport.ib-verbs.remote-port)
- GF_OPTION_TYPE_INT
- * transport.ib-verbs.listen-port GF_OPTION_TYPE_INT
- * transport.ib-verbs.connect-path (connect-path) GF_OPTION_TYPE_ANY
- * transport.ib-verbs.bind-path (bind-path) GF_OPTION_TYPE_ANY
- * transport.ib-verbs.listen-path (listen-path) GF_OPTION_TYPE_ANY
- * transport.address-family (address-family) GF_OPTION_TYPE_STR inet|inet6|inet/inet6|
- inet6/inet|unix|inet-sdp
-
-transport/socket:
- * transport.remote-port (remote-port,transport.socket.remote-port) GF_OPTION_TYPE_INT
- * transport.socket.listen-port (listen-port) GF_OPTION_TYPE_INT
- * transport.socket.bind-address (bind-address) GF_OPTION_TYPE_ANY
- * transport.socket.connect-path (connect-path) GF_OPTION_TYPE_ANY
- * transport.socket.bind-path (bind-path) GF_OPTION_TYPE_ANY
- * transport.socket.listen-path (listen-path) GF_OPTION_TYPE_ANY
- * transport.address-family (address-family) GF_OPTION_TYPE_STR inet|inet6|
- inet/inet6|inet6/inet|
- unix|inet-sdp
diff --git a/doc/legacy/unify.odg b/doc/legacy/unify.odg
deleted file mode 100644
index ccaa9bf16f9..00000000000
--- a/doc/legacy/unify.odg
+++ /dev/null
Binary files differ
diff --git a/doc/legacy/unify.pdf b/doc/legacy/unify.pdf
deleted file mode 100644
index c22027f66e7..00000000000
--- a/doc/legacy/unify.pdf
+++ /dev/null
Binary files differ
diff --git a/doc/legacy/user-guide.info b/doc/legacy/user-guide.info
deleted file mode 100644
index eae0ef10b58..00000000000
--- a/doc/legacy/user-guide.info
+++ /dev/null
@@ -1,2697 +0,0 @@
-This is ../../../doc/user-guide/user-guide.info, produced by makeinfo version 4.13 from ../../../doc/user-guide/user-guide.texi.
-
-START-INFO-DIR-ENTRY
-* GlusterFS: (user-guide). GlusterFS distributed filesystem user guide
-END-INFO-DIR-ENTRY
-
- This is the user manual for GlusterFS 2.0.
-
- Copyright (c) 2007-2011 Gluster, Inc. Permission is granted to
-copy, distribute and/or modify this document under the terms of the GNU
-Free Documentation License, Version 1.2 or any later version published
-by the Free Software Foundation; with no Invariant Sections, no
-Front-Cover Texts, and no Back-Cover Texts. A copy of the license is
-included in the chapter entitled "GNU Free Documentation License".
-
-
-File: user-guide.info, Node: Top, Next: Acknowledgements, Up: (dir)
-
-GlusterFS 2.0 User Guide
-************************
-
-This is the user manual for GlusterFS 2.0.
-
- Copyright (c) 2007-2011 Gluster, Inc. Permission is granted to
-copy, distribute and/or modify this document under the terms of the GNU
-Free Documentation License, Version 1.2 or any later version published
-by the Free Software Foundation; with no Invariant Sections, no
-Front-Cover Texts, and no Back-Cover Texts. A copy of the license is
-included in the chapter entitled "GNU Free Documentation License".
-
-* Menu:
-
-* Acknowledgements::
-* Introduction::
-* Installation and Invocation::
-* Concepts::
-* Translators::
-* Usage Scenarios::
-* Troubleshooting::
-* GNU Free Documentation Licence::
-* Index::
-
- --- The Detailed Node Listing ---
-
-Installation and Invocation
-
-* Pre requisites::
-* Getting GlusterFS::
-* Building::
-* Running GlusterFS::
-* A Tutorial Introduction::
-
-Running GlusterFS
-
-* Server::
-* Client::
-
-Concepts
-
-* Filesystems in Userspace::
-* Translator::
-* Volume specification file::
-
-Translators
-
-* Storage Translators::
-* Client and Server Translators::
-* Clustering Translators::
-* Performance Translators::
-* Features Translators::
-
-Storage Translators
-
-* POSIX::
-
-Client and Server Translators
-
-* Transport modules::
-* Client protocol::
-* Server protocol::
-
-Clustering Translators
-
-* Unify::
-* Replicate::
-* Stripe::
-
-Performance Translators
-
-* Read Ahead::
-* Write Behind::
-* IO Threads::
-* IO Cache::
-
-Features Translators
-
-* POSIX Locks::
-* Fixed ID::
-
-Miscellaneous Translators
-
-* ROT-13::
-* Trace::
-
-
-File: user-guide.info, Node: Acknowledgements, Next: Introduction, Prev: Top, Up: Top
-
-Acknowledgements
-****************
-
-GlusterFS continues to be a wonderful and enriching experience for all
-of us involved.
-
- GlusterFS development would not have been possible at this pace if
-not for our enthusiastic users. People from around the world have
-helped us with bug reports, performance numbers, and feature
-suggestions. A huge thanks to them all.
-
- Matthew Paine - for RPMs & general enthu
-
- Leonardo Rodrigues de Mello - for DEBs
-
- Julian Perez & Adam D'Auria - for multi-server tutorial
-
- Paul England - for HA spec
-
- Brent Nelson - for many bug reports
-
- Jacques Mattheij - for Europe mirror.
-
- Patrick Negri - for TCP non-blocking connect.
- http://gluster.org/core-team.php (<list-hacking@gluster.com>)
- Gluster
-
-
-File: user-guide.info, Node: Introduction, Next: Installation and Invocation, Prev: Acknowledgements, Up: Top
-
-1 Introduction
-**************
-
-GlusterFS is a distributed filesystem. It works at the file level, not
-block level.
-
- A network filesystem is one which allows us to access remote files. A
-distributed filesystem is one that stores data on multiple machines and
-makes them all appear to be a part of the same filesystem.
-
- Need for distributed filesystems
-
- * Scalability: A distributed filesystem allows us to store more data
- than what can be stored on a single machine.
-
- * Redundancy: We might want to replicate crucial data on to several
- machines.
-
- * Uniform access: One can mount a remote volume (for example your
- home directory) from any machine and access the same data.
-
-1.1 Contacting us
-=================
-
-You can reach us through the mailing list *gluster-devel*
-(<gluster-devel@nongnu.org>).
-
- You can also find many of the developers on IRC, on the `#gluster'
-channel on Freenode (<irc.freenode.net>).
-
- The GlusterFS documentation wiki is also useful:
-<http://gluster.org/docs/index.php/GlusterFS>
-
- For commercial support, you can contact Gluster at:
-
- 3194 Winding Vista Common
- Fremont, CA 94539
- USA.
-
- Phone: +1 (510) 354 6801
- Toll free: +1 (888) 813 6309
- Fax: +1 (510) 372 0604
-
- You can also email us at <support@gluster.com>.
-
-
-File: user-guide.info, Node: Installation and Invocation, Next: Concepts, Prev: Introduction, Up: Top
-
-2 Installation and Invocation
-*****************************
-
-* Menu:
-
-* Pre requisites::
-* Getting GlusterFS::
-* Building::
-* Running GlusterFS::
-* A Tutorial Introduction::
-
-
-File: user-guide.info, Node: Pre requisites, Next: Getting GlusterFS, Up: Installation and Invocation
-
-2.1 Pre requisites
-==================
-
-Before installing GlusterFS make sure you have the following components
-installed.
-
-2.1.1 FUSE
-----------
-
-You'll need FUSE version 2.6.0 or higher to use GlusterFS. You can omit
-installing FUSE if you want to build _only_ the server. Note that you
-won't be able to mount a GlusterFS filesystem on a machine that does
-not have FUSE installed.
-
- FUSE can be downloaded from: <http://fuse.sourceforge.net/>
-
- To get the best performance from GlusterFS, however, it is
-recommended that you use our patched version of FUSE. See Patched FUSE
-for details.
-
-2.1.2 Patched FUSE
-------------------
-
-The GlusterFS project maintains a patched version of FUSE meant to be
-used with GlusterFS. The patches increase GlusterFS performance. It is
-recommended that all users use the patched FUSE.
-
- The patched FUSE tarball can be downloaded from:
-
- <ftp://ftp.gluster.com/pub/gluster/glusterfs/fuse/>
-
- The specific changes made to FUSE are:
-
- * The communication channel size between FUSE kernel module and
- GlusterFS has been increased to 1MB, permitting large reads and
- writes to be sent in bigger chunks.
-
- * The kernel's read-ahead boundary has been extended up to 1MB.
-
- * Block size returned in the `stat()'/`fstat()' calls tuned to 1MB,
- to make cp and similar commands perform I/O using that block size.
-
- * `flock()' locking support has been added (although some rework in
- GlusterFS is needed for perfect compliance).
-
-2.1.3 libibverbs (optional)
----------------------------
-
-This is only needed if you want GlusterFS to use InfiniBand as the
-interconnect mechanism between server and client. You can get it from:
-
- <http://www.openfabrics.org/downloads.htm>.
-
-2.1.4 Bison and Flex
---------------------
-
-These should be already installed on most Linux systems. If not, use
-your distribution's normal software installation procedures to install
-them. Make sure you install the relevant developer packages also.
-
-
-File: user-guide.info, Node: Getting GlusterFS, Next: Building, Prev: Pre requisites, Up: Installation and Invocation
-
-2.2 Getting GlusterFS
-=====================
-
-There are many ways to get hold of GlusterFS. For a production
-deployment, the recommended method is to download the latest release
-tarball. Release tarballs are available at:
-<http://gluster.org/download.php>.
-
- If you want the bleeding edge development source, you can get them
-from the GNU Arch(1) repository. First you must install GNU Arch
-itself. Then register the GlusterFS archive by doing:
-
- $ tla register-archive http://arch.sv.gnu.org/archives/gluster
-
- Now you can check out the source itself:
-
- $ tla get -A gluster@sv.gnu.org glusterfs--mainline--3.0
-
- ---------- Footnotes ----------
-
- (1) <http://www.gnu.org/software/gnu-arch/>
-
-
-File: user-guide.info, Node: Building, Next: Running GlusterFS, Prev: Getting GlusterFS, Up: Installation and Invocation
-
-2.3 Building
-============
-
-You can skip this section if you're installing from RPMs or DEBs.
-
- GlusterFS uses the Autotools mechanism to build. As such, the
-procedure is straight-forward. First, change into the GlusterFS source
-directory.
-
- $ cd glusterfs-<version>
-
- If you checked out the source from the Arch repository, you'll need
-to run `./autogen.sh' first. Note that you'll need to have Autoconf and
-Automake installed for this.
-
- Run `configure'.
-
- $ ./configure
-
- The configure script accepts the following options:
-
-`--disable-ibverbs'
- Disable the InfiniBand transport mechanism.
-
-`--disable-fuse-client'
- Disable the FUSE client.
-
-`--disable-server'
- Disable building of the GlusterFS server.
-
-`--disable-bdb'
- Disable building of Berkeley DB based storage translator.
-
-`--disable-mod_glusterfs'
- Disable building of Apache/lighttpd glusterfs plugins.
-
-`--disable-epoll'
- Use poll instead of epoll.
-
-`--disable-libglusterfsclient'
- Disable building of libglusterfsclient
-
-
- Build and install GlusterFS.
-
- # make install
-
- The binaries (`glusterfsd' and `glusterfs') will be by default
-installed in `/usr/local/sbin/'. Translator, scheduler, and transport
-shared libraries will be installed in
-`/usr/local/lib/glusterfs/<version>/'. Sample volume specification
-files will be in `/usr/local/etc/glusterfs/'. This document itself can
-be found in `/usr/local/share/doc/glusterfs/'. If you passed the
-`--prefix' argument to the configure script, then replace `/usr/local'
-in the preceding paths with the prefix.
-
-
-File: user-guide.info, Node: Running GlusterFS, Next: A Tutorial Introduction, Prev: Building, Up: Installation and Invocation
-
-2.4 Running GlusterFS
-=====================
-
-* Menu:
-
-* Server::
-* Client::
-
-
-File: user-guide.info, Node: Server, Next: Client, Up: Running GlusterFS
-
-2.4.1 Server
-------------
-
-The GlusterFS server is necessary to export storage volumes to remote
-clients (See *note Server protocol:: for more info). This section
-documents the invocation of the GlusterFS server program and all the
-command-line options accepted by it.
-
- Basic Options
-
-`-f, --volfile=<path>'
- Use the volume file as the volume specification.
-
-`-s, --volfile-server=<hostname>'
- Server to get volume file from. This option overrides -volfile
- option.
-
-`-l, --log-file=<path>'
- Specify the path for the log file.
-
-`-L, --log-level=<level>'
- Set the log level for the server. Log level should be one of DEBUG,
- WARNING, ERROR, CRITICAL, or NONE.
-
- Advanced Options
-
-`--debug'
- Run in debug mode. This option sets -no-daemon, -log-level to
- DEBUG and -log-file to console.
-
-`-N, --no-daemon'
- Run glusterfsd as a foreground process.
-
-`-p, --pid-file=<path>'
- Path for the PID file.
-
-`--volfile-id=<key>'
- 'key' of the volfile to be fetched from server.
-
-`--volfile-server-port=<port-number>'
- Listening port number of volfile server.
-
-`--volfile-server-transport=[tcp|ib-verbs]'
- Transport type to get volfile from server. [default: `tcp']
-
-`--xlator-options=<volume-name.option=value>'
- Add/override a translator option for a volume with specified value.
-
- Miscellaneous Options
-
-`-?, --help'
- Show this help text.
-
-`--usage'
- Display a short usage message.
-
-`-V, --version'
- Show version information.
-
-
-File: user-guide.info, Node: Client, Prev: Server, Up: Running GlusterFS
-
-2.4.2 Client
-------------
-
-The GlusterFS client process is necessary to access remote storage
-volumes and mount them locally using FUSE. This section documents the
-invocation of the client process and all its command-line arguments.
-
- # glusterfs [options] <mountpoint>
-
- The `mountpoint' is the directory where you want the GlusterFS
-filesystem to appear. Example:
-
- # glusterfs -f /usr/local/etc/glusterfs-client.vol /mnt
-
- The command-line options are detailed below.
-
- Basic Options
-
-`-f, --volfile=<path>'
- Use the volume file as the volume specification.
-
-`-s, --volfile-server=<hostname>'
- Server to get volume file from. This option overrides -volfile
- option.
-
-`-l, --log-file=<path>'
- Specify the path for the log file.
-
-`-L, --log-level=<level>'
- Set the log level for the server. Log level should be one of DEBUG,
- WARNING, ERROR, CRITICAL, or NONE.
-
- Advanced Options
-
-`--debug'
- Run in debug mode. This option sets -no-daemon, -log-level to
- DEBUG and -log-file to console.
-
-`-N, --no-daemon'
- Run `glusterfs' as a foreground process.
-
-`-p, --pid-file=<path>'
- Path for the PID file.
-
-`--volfile-id=<key>'
- 'key' of the volfile to be fetched from server.
-
-`--volfile-server-port=<port-number>'
- Listening port number of volfile server.
-
-`--volfile-server-transport=[tcp|ib-verbs]'
- Transport type to get volfile from server. [default: `tcp']
-
-`--xlator-options=<volume-name.option=value>'
- Add/override a translator option for a volume with specified value.
-
-`--volume-name=<volume name>'
- Volume name in client spec to use. Defaults to the root volume.
-
- FUSE Options
-
-`--attribute-timeout=<n>'
- Attribute timeout for inodes in the kernel, in seconds. Defaults
- to 1 second.
-
-`--disable-direct-io-mode'
- Disable direct I/O mode in FUSE kernel module.
-
-`-e, --entry-timeout=<n>'
- Entry timeout for directory entries in the kernel, in seconds.
- Defaults to 1 second.
-
- Missellaneous Options
-
-`-?, --help'
- Show this help information.
-
-`-V, --version'
- Show version information.
-
-
-File: user-guide.info, Node: A Tutorial Introduction, Prev: Running GlusterFS, Up: Installation and Invocation
-
-2.5 A Tutorial Introduction
-===========================
-
-This section will show you how to quickly get GlusterFS up and running.
-We'll configure GlusterFS as a simple network filesystem, with one
-server and one client. In this mode of usage, GlusterFS can serve as a
-replacement for NFS.
-
- We'll make use of two machines; call them _server_ and _client_ (If
-you don't want to setup two machines, just run everything that follows
-on the same machine). In the examples that follow, the shell prompts
-will use these names to clarify the machine on which the command is
-being run. For example, a command that should be run on the server will
-be shown with the prompt:
-
- [root@server]#
-
- Our goal is to make a directory on the _server_ (say, `/export')
-accessible to the _client_.
-
- First of all, get GlusterFS installed on both the machines, as
-described in the previous sections. Make sure you have the FUSE kernel
-module loaded. You can ensure this by running:
-
- [root@server]# modprobe fuse
-
- Before we can run the GlusterFS client or server programs, we need
-to write two files called _volume specifications_ (equivalently referred
-to as _volfiles_). The volfile describes the _translator tree_ on a
-node. The next chapter will explain the concepts of `translator' and
-`volume specification' in detail. For now, just assume that the volfile
-is like an NFS `/etc/export' file.
-
- On the server, create a text file somewhere (we'll assume the path
-`/tmp/glusterfsd.vol') with the following contents.
-
- volume colon-o
- type storage/posix
- option directory /export
- end-volume
-
- volume server
- type protocol/server
- subvolumes colon-o
- option transport-type tcp
- option auth.addr.colon-o.allow *
- end-volume
-
- A brief explanation of the file's contents. The first section
-defines a storage volume, named "colon-o" (the volume names are
-arbitrary), which exports the `/export' directory. The second section
-defines options for the translator which will make the storage volume
-accessible remotely. It specifies `colon-o' as a subvolume. This
-defines the _translator tree_, about which more will be said in the
-next chapter. The two options specify that the TCP protocol is to be
-used (as opposed to InfiniBand, for example), and that access to the
-storage volume is to be provided to clients with any IP address at all.
-If you wanted to restrict access to this server to only your subnet for
-example, you'd specify something like `192.168.1.*' in the second
-option line.
-
- On the client machine, create the following text file (again, we'll
-assume the path to be `/tmp/glusterfs-client.vol'). Replace
-_server-ip-address_ with the IP address of your server machine. If you
-are doing all this on a single machine, use `127.0.0.1'.
-
- volume client
- type protocol/client
- option transport-type tcp
- option remote-host _server-ip-address_
- option remote-subvolume colon-o
- end-volume
-
- Now we need to start both the server and client programs. To start
-the server:
-
- [root@server]# glusterfsd -f /tmp/glusterfs-server.vol
-
- To start the client:
-
- [root@client]# glusterfs -f /tmp/glusterfs-client.vol /mnt/glusterfs
-
- You should now be able to see the files under the server's `/export'
-directory in the `/mnt/glusterfs' directory on the client. That's it;
-GlusterFS is now working as a network file system.
-
-
-File: user-guide.info, Node: Concepts, Next: Translators, Prev: Installation and Invocation, Up: Top
-
-3 Concepts
-**********
-
-* Menu:
-
-* Filesystems in Userspace::
-* Translator::
-* Volume specification file::
-
-
-File: user-guide.info, Node: Filesystems in Userspace, Next: Translator, Up: Concepts
-
-3.1 Filesystems in Userspace
-============================
-
-A filesystem is usually implemented in kernel space. Kernel space
-development is much harder than userspace development. FUSE is a kernel
-module/library that allows us to write a filesystem completely in
-userspace.
-
- FUSE consists of a kernel module which interacts with the userspace
-implementation using a device file `/dev/fuse'. When a process makes a
-syscall on a FUSE filesystem, VFS hands the request to the FUSE module,
-which writes the request to `/dev/fuse'. The userspace implementation
-polls `/dev/fuse', and when a request arrives, processes it and writes
-the result back to `/dev/fuse'. The kernel then reads from the device
-file and returns the result to the user process.
-
- In case of GlusterFS, the userspace program is the GlusterFS client.
-The control flow is shown in the diagram below. The GlusterFS client
-services the request by sending it to the server, which in turn hands
-it to the local POSIX filesystem.
-
-
- Fig 1. Control flow in GlusterFS
-
-
-File: user-guide.info, Node: Translator, Next: Volume specification file, Prev: Filesystems in Userspace, Up: Concepts
-
-3.2 Translator
-==============
-
-The _translator_ is the most important concept in GlusterFS. In fact,
-GlusterFS is nothing but a collection of translators working together,
-forming a translator _tree_.
-
- The idea of a translator is perhaps best understood using an
-analogy. Consider the VFS in the Linux kernel. The VFS abstracts the
-various filesystem implementations (such as EXT3, ReiserFS, XFS, etc.)
-supported by the kernel. When an application calls the kernel to
-perform an operation on a file, the kernel passes the request on to the
-appropriate filesystem implementation.
-
- For example, let's say there are two partitions on a Linux machine:
-`/', which is an EXT3 partition, and `/usr', which is a ReiserFS
-partition. Now if an application wants to open a file called, say,
-`/etc/fstab', then the kernel will internally pass the request to the
-EXT3 implementation. If on the other hand, an application wants to
-read a file called `/usr/src/linux/CREDITS', then the kernel will call
-upon the ReiserFS implementation to do the job.
-
- The "filesystem implementation" objects are analogous to GlusterFS
-translators. A GlusterFS translator implements all the filesystem
-operations. Whereas in VFS there is a two-level tree (with the kernel
-at the root and all the filesystem implementation as its children), in
-GlusterFS there exists a more elaborate tree structure.
-
- We can now define translators more precisely. A GlusterFS translator
-is a shared object (`.so') that implements every filesystem call.
-GlusterFS translators can be arranged in an arbitrary tree structure
-(subject to constraints imposed by the translators). When GlusterFS
-receives a filesystem call, it passes it on to the translator at the
-root of the translator tree. The root translator may in turn pass it on
-to any or all of its children, and so on, until the leaf nodes are
-reached. The result of a filesystem call is communicated in the reverse
-fashion, from the leaf nodes up to the root node, and then on to the
-application.
-
- So what might a translator tree look like?
-
-
- Fig 2. A sample translator tree
-
- The diagram depicts three servers and one GlusterFS client. It is
-important to note that conceptually, the translator tree spans machine
-boundaries. Thus, the client machine in the diagram, `10.0.0.1', can
-access the aggregated storage of the filesystems on the server machines
-`10.0.0.2', `10.0.0.3', and `10.0.0.4'. The translator diagram will
-make more sense once you've read the next chapter and understood the
-functions of the various translators.
-
-
-File: user-guide.info, Node: Volume specification file, Prev: Translator, Up: Concepts
-
-3.3 Volume specification file
-=============================
-
-The volume specification file describes the translator tree for both the
-server and client programs.
-
- A volume specification file is a sequence of volume definitions.
-The syntax of a volume definition is explained below:
-
- *volume* _volume-name_
- *type* _translator-name_
- *option* _option-name_ _option-value_
- ...
- *subvolumes* _subvolume1_ _subvolume2_ ...
- *end-volume*
-
- ...
-
-_volume-name_
- An identifier for the volume. This is just a human-readable name,
- and can contain any alphanumeric character. For instance,
- "storage-1", "colon-o", or "forty-two".
-
-_translator-name_
- Name of one of the available translators. Example:
- `protocol/client', `cluster/unify'.
-
-_option-name_
- Name of a valid option for the translator.
-
-_option-value_
- Value for the option. Everything following the "option" keyword to
- the end of the line is considered the value; it is up to the
- translator to parse it.
-
-_subvolume1_, _subvolume2_, ...
- Volume names of sub-volumes. The sub-volumes must already have
- been defined earlier in the file.
-
- There are a few rules you must follow when writing a volume
-specification file:
-
- * Everything following a ``#'' is considered a comment and is
- ignored. Blank lines are also ignored.
-
- * All names and keywords are case-sensitive.
-
- * The order of options inside a volume definition does not matter.
-
- * An option value may not span multiple lines.
-
- * If an option is not specified, it will assume its default value.
-
- * A sub-volume must have already been defined before it can be
- referenced. This means you have to write the specification file
- "bottom-up", starting from the leaf nodes of the translator tree
- and moving up to the root.
-
- A simple example volume specification file is shown below:
-
- # This is a comment line
- volume client
- type protocol/client
- option transport-type tcp
- option remote-host localhost # Also a comment
- option remote-subvolume brick
- # The subvolumes line may be absent
- end-volume
-
- volume iot
- type performance/io-threads
- option thread-count 4
- subvolumes client
- end-volume
-
- volume wb
- type performance/write-behind
- subvolumes iot
- end-volume
-
-
-File: user-guide.info, Node: Translators, Next: Usage Scenarios, Prev: Concepts, Up: Top
-
-4 Translators
-*************
-
-* Menu:
-
-* Storage Translators::
-* Client and Server Translators::
-* Clustering Translators::
-* Performance Translators::
-* Features Translators::
-* Miscellaneous Translators::
-
- This chapter documents all the available GlusterFS translators in
-detail. Each translator section will show its name (for example,
-`cluster/unify'), briefly describe its purpose and workings, and list
-every option accepted by that translator and their meaning.
-
-
-File: user-guide.info, Node: Storage Translators, Next: Client and Server Translators, Up: Translators
-
-4.1 Storage Translators
-=======================
-
-The storage translators form the "backend" for GlusterFS. Currently,
-the only available storage translator is the POSIX translator, which
-stores files on a normal POSIX filesystem. A pleasant consequence of
-this is that your data will still be accessible if GlusterFS crashes or
-cannot be started.
-
- Other storage backends are planned for the future. One of the
-possibilities is an Amazon S3 translator. Amazon S3 is an unlimited
-online storage service accessible through a web services API. The S3
-translator will allow you to access the storage as a normal POSIX
-filesystem. (1)
-
-* Menu:
-
-* POSIX::
-* BDB::
-
- ---------- Footnotes ----------
-
- (1) Some more discussion about this can be found at:
-
-http://developer.amazonwebservices.com/connect/message.jspa?messageID=52873
-
-
-File: user-guide.info, Node: POSIX, Next: BDB, Up: Storage Translators
-
-4.1.1 POSIX
------------
-
- type storage/posix
-
- The `posix' translator uses a normal POSIX filesystem as its
-"backend" to actually store files and directories. This can be any
-filesystem that supports extended attributes (EXT3, ReiserFS, XFS,
-...). Extended attributes are used by some translators to store
-metadata, for example, by the replicate and stripe translators. See
-*note Replicate:: and *note Stripe::, respectively for details.
-
-`directory <path>'
- The directory on the local filesystem which is to be used for
- storage.
-
-
-File: user-guide.info, Node: BDB, Prev: POSIX, Up: Storage Translators
-
-4.1.2 BDB
----------
-
- type storage/bdb
-
- The `BDB' translator uses a Berkeley DB database as its "backend" to
-actually store files as key-value pair in the database and directories
-as regular POSIX directories. Note that BDB does not provide extended
-attribute support for regular files. Do not use BDB as storage
-translator while using any translator that demands extended attributes
-on "backend".
-
-`directory <path>'
- The directory on the local filesystem which is to be used for
- storage.
-
-`mode [cache|persistent] (cache)'
- When BDB is run in `cache' mode, recovery of back-end is not
- completely guaranteed. `persistent' guarantees that BDB can
- recover back-end from Berkeley DB even if GlusterFS crashes.
-
-`errfile <path>'
- The path of the file to be used as `errfile' for Berkeley DB to
- report detailed error messages, if any. Note that all the contents
- of this file will be written by Berkeley DB, not GlusterFS.
-
-`logdir <path>'
-
-
-File: user-guide.info, Node: Client and Server Translators, Next: Clustering Translators, Prev: Storage Translators, Up: Translators
-
-4.2 Client and Server Translators
-=================================
-
-The client and server translator enable GlusterFS to export a
-translator tree over the network or access a remote GlusterFS server.
-These two translators implement GlusterFS's network protocol.
-
-* Menu:
-
-* Transport modules::
-* Client protocol::
-* Server protocol::
-
-
-File: user-guide.info, Node: Transport modules, Next: Client protocol, Up: Client and Server Translators
-
-4.2.1 Transport modules
------------------------
-
-The client and server translators are capable of using any of the
-pluggable transport modules. Currently available transport modules are
-`tcp', which uses a TCP connection between client and server to
-communicate; `ib-sdp', which uses a TCP connection over InfiniBand, and
-`ibverbs', which uses high-speed InfiniBand connections.
-
- Each transport module comes in two different versions, one to be
-used on the server side and the other on the client side.
-
-4.2.1.1 TCP
-...........
-
-The TCP transport module uses a TCP/IP connection between the server
-and the client.
-
- option transport-type tcp
-
- The TCP client module accepts the following options:
-
-`non-blocking-connect [no|off|on|yes] (on)'
- Whether to make the connection attempt asynchronous.
-
-`remote-port <n> (24007)'
- Server port to connect to.
-
-`remote-host <hostname> *'
- Hostname or IP address of the server. If the host name resolves to
- multiple IP addresses, all of them will be tried in a round-robin
- fashion. This feature can be used to implement fail-over.
-
- The TCP server module accepts the following options:
-
-`bind-address <address> (0.0.0.0)'
- The local interface on which the server should listen to requests.
- Default is to listen on all interfaces.
-
-`listen-port <n> (24007)'
- The local port to listen on.
-
-4.2.1.2 IB-SDP
-..............
-
- option transport-type ib-sdp
-
- kernel implements socket interface for ib hardware. SDP is over
-ib-verbs. This module accepts the same options as `tcp'
-
-4.2.1.3 ibverbs
-...............
-
- option transport-type tcp
-
- InfiniBand is a scalable switched fabric interconnect mechanism
-primarily used in high-performance computing. InfiniBand can deliver
-data throughput of the order of 10 Gbit/s, with latencies of 4-5 ms.
-
- The `ib-verbs' transport accesses the InfiniBand hardware through
-the "verbs" API, which is the lowest level of software access possible
-and which gives the highest performance. On InfiniBand hardware, it is
-always best to use `ib-verbs'. Use `ib-sdp' only if you cannot get
-`ib-verbs' working for some reason.
-
- The `ib-verbs' client module accepts the following options:
-
-`non-blocking-connect [no|off|on|yes] (on)'
- Whether to make the connection attempt asynchronous.
-
-`remote-port <n> (24007)'
- Server port to connect to.
-
-`remote-host <hostname> *'
- Hostname or IP address of the server. If the host name resolves to
- multiple IP addresses, all of them will be tried in a round-robin
- fashion. This feature can be used to implement fail-over.
-
- The `ib-verbs' server module accepts the following options:
-
-`bind-address <address> (0.0.0.0)'
- The local interface on which the server should listen to requests.
- Default is to listen on all interfaces.
-
-`listen-port <n> (24007)'
- The local port to listen on.
-
- The following options are common to both the client and server
-modules:
-
- If you are familiar with InfiniBand jargon, the mode is used by
-GlusterFS is "reliable connection-oriented channel transfer".
-
-`ib-verbs-work-request-send-count <n> (64)'
- Length of the send queue in datagrams. [Reason to
- increase/decrease?]
-
-`ib-verbs-work-request-recv-count <n> (64)'
- Length of the receive queue in datagrams. [Reason to
- increase/decrease?]
-
-`ib-verbs-work-request-send-size <size> (128KB)'
- Size of each datagram that is sent. [Reason to increase/decrease?]
-
-`ib-verbs-work-request-recv-size <size> (128KB)'
- Size of each datagram that is received. [Reason to
- increase/decrease?]
-
-`ib-verbs-port <n> (1)'
- Port number for ib-verbs.
-
-`ib-verbs-mtu [256|512|1024|2048|4096] (2048)'
- The Maximum Transmission Unit [Reason to increase/decrease?]
-
-`ib-verbs-device-name <device-name> (first device in the list)'
- InfiniBand device to be used.
-
- For maximum performance, you should ensure that the send/receive
-counts on both the client and server are the same.
-
- ib-verbs is preferred over ib-sdp.
-
-
-File: user-guide.info, Node: Client protocol, Next: Server protocol, Prev: Transport modules, Up: Client and Server Translators
-
-4.2.2 Client
-------------
-
- type procotol/client
-
- The client translator enables the GlusterFS client to access a
-remote server's translator tree.
-
-`transport-type [tcp,ib-sdp,ib-verbs] (tcp)'
- The transport type to use. You should use the client versions of
- all the transport modules (`tcp', `ib-sdp', `ib-verbs').
-
-`remote-subvolume <volume_name> *'
- The name of the volume on the remote host to attach to. Note that
- this is _not_ the name of the `protocol/server' volume on the
- server. It should be any volume under the server.
-
-`transport-timeout <n> (120- seconds)'
- Inactivity timeout. If a reply is expected and no activity takes
- place on the connection within this time, the transport connection
- will be broken, and a new connection will be attempted.
-
-
-File: user-guide.info, Node: Server protocol, Prev: Client protocol, Up: Client and Server Translators
-
-4.2.3 Server
-------------
-
- type protocol/server
-
- The server translator exports a translator tree and makes it
-accessible to remote GlusterFS clients.
-
-`client-volume-filename <path> (<CONFDIR>/glusterfs-client.vol)'
- The volume specification file to use for the client. This is the
- file the client will receive when it is invoked with the
- `--server' option (*note Client::).
-
-`transport-type [tcp,ib-verbs,ib-sdp] (tcp)'
- The transport to use. You should use the server versions of all
- the transport modules (`tcp', `ib-sdp', `ib-verbs').
-
-`auth.addr.<volume name>.allow <IP address wildcard pattern>'
- IP addresses of the clients that are allowed to attach to the
- specified volume. This can be a wildcard. For example, a wildcard
- of the form `192.168.*.*' allows any host in the `192.168.x.x'
- subnet to connect to the server.
-
-
-
-File: user-guide.info, Node: Clustering Translators, Next: Performance Translators, Prev: Client and Server Translators, Up: Translators
-
-4.3 Clustering Translators
-==========================
-
-The clustering translators are the most important GlusterFS
-translators, since it is these that make GlusterFS a cluster
-filesystem. These translators together enable GlusterFS to access an
-arbitrarily large amount of storage, and provide RAID-like redundancy
-and distribution over the entire cluster.
-
- There are three clustering translators: *unify*, *replicate*, and
-*stripe*. The unify translator aggregates storage from many server
-nodes. The replicate translator provides file replication. The stripe
-translator allows a file to be spread across many server nodes. The
-following sections look at each of these translators in detail.
-
-* Menu:
-
-* Unify::
-* Replicate::
-* Stripe::
-
-
-File: user-guide.info, Node: Unify, Next: Replicate, Up: Clustering Translators
-
-4.3.1 Unify
------------
-
- type cluster/unify
-
- The unify translator presents a `unified' view of all its
-sub-volumes. That is, it makes the union of all its sub-volumes appear
-as a single volume. It is the unify translator that gives GlusterFS the
-ability to access an arbitrarily large amount of storage.
-
- For unify to work correctly, certain invariants need to be
-maintained across the entire network. These are:
-
- * The directory structure of all the sub-volumes must be identical.
-
- * A particular file can exist on only one of the sub-volumes.
- Phrasing it in another way, a pathname such as
- `/home/calvin/homework.txt') is unique across the entire cluster.
-
-
-
-Looking at the second requirement, you might wonder how one can
-accomplish storing redundant copies of a file, if no file can exist
-multiple times. To answer, we must remember that these invariants are
-from _unify's perspective_. A translator such as replicate at a lower
-level in the translator tree than unify may subvert this picture.
-
- The first invariant might seem quite tedious to ensure. We shall see
-later that this is not so, since unify's _self-heal_ mechanism takes
-care of maintaining it.
-
- The second invariant implies that unify needs some way to decide
-which file goes where. Unify makes use of _scheduler_ modules for this
-purpose.
-
- When a file needs to be created, unify's scheduler decides upon the
-sub-volume to be used to store the file. There are many schedulers
-available, each using a different algorithm and suitable for different
-purposes.
-
- The various schedulers are described in detail in the sections that
-follow.
-
-4.3.1.1 ALU
-...........
-
- option scheduler alu
-
- ALU stands for "Adaptive Least Usage". It is the most advanced
-scheduler available in GlusterFS. It balances the load across volumes
-taking several factors in account. It adapts itself to changing I/O
-patterns according to its configuration. When properly configured, it
-can eliminate the need for regular tuning of the filesystem to keep
-volume load nicely balanced.
-
- The ALU scheduler is composed of multiple least-usage
-sub-schedulers. Each sub-scheduler keeps track of a certain type of
-load, for each of the sub-volumes, getting statistics from the
-sub-volumes themselves. The sub-schedulers are these:
-
- * disk-usage: The used and free disk space on the volume.
-
- * read-usage: The amount of reading done from this volume.
-
- * write-usage: The amount of writing done to this volume.
-
- * open-files-usage: The number of files currently open from this
- volume.
-
- * disk-speed-usage: The speed at which the disks are spinning. This
- is a constant value and therefore not very useful.
-
- The ALU scheduler needs to know which of these sub-schedulers to use,
-and in which order to evaluate them. This is done through the `option
-alu.order' configuration directive.
-
- Each sub-scheduler needs to know two things: when to kick in (the
-entry-threshold), and how long to stay in control (the exit-threshold).
-For example: when unifying three disks of 100GB, keeping an exact
-balance of disk-usage is not necessary. Instead, there could be a 1GB
-margin, which can be used to nicely balance other factors, such as
-read-usage. The disk-usage scheduler can be told to kick in only when a
-certain threshold of discrepancy is passed, such as 1GB. When it
-assumes control under this condition, it will write all subsequent data
-to the least-used volume. If it is doing so, it is unwise to stop right
-after the values are below the entry-threshold again, since that would
-make it very likely that the situation will occur again very soon. Such
-a situation would cause the ALU to spend most of its time disk-usage
-scheduling, which is unfair to the other sub-schedulers. The
-exit-threshold therefore defines the amount of data that needs to be
-written to the least-used disk, before control is relinquished again.
-
- In addition to the sub-schedulers, the ALU scheduler also has
-"limits" options. These can stop the creation of new files on a volume
-once values drop below a certain threshold. For example, setting
-`option alu.limits.min-free-disk 5GB' will stop the scheduling of files
-to volumes that have less than 5GB of free disk space, leaving the
-files on that disk some room to grow.
-
- The actual values you assign to the thresholds for sub-schedulers and
-limits depend on your situation. If you have fast-growing files, you'll
-want to stop file-creation on a disk much earlier than when hardly any
-of your files are growing. If you care less about disk-usage balance
-than about read-usage balance, you'll want a bigger disk-usage
-scheduler entry-threshold and a smaller read-usage scheduler
-entry-threshold.
-
- For thresholds defining a size, values specifying "KB", "MB" and "GB"
-are allowed. For example: `option alu.limits.min-free-disk 5GB'.
-
-`alu.order <order> * ("disk-usage:write-usage:read-usage:open-files-usage:disk-speed")'
-
-`alu.disk-usage.entry-threshold <size> (1GB)'
-
-`alu.disk-usage.exit-threshold <size> (512MB)'
-
-`alu.write-usage.entry-threshold <%> (25)'
-
-`alu.write-usage.exit-threshold <%> (5)'
-
-`alu.read-usage.entry-threshold <%> (25)'
-
-`alu.read-usage.exit-threshold <%> (5)'
-
-`alu.open-files-usage.entry-threshold <n> (1000)'
-
-`alu.open-files-usage.exit-threshold <n> (100)'
-
-`alu.limits.min-free-disk <%>'
-
-`alu.limits.max-open-files <n>'
-
-4.3.1.2 Round Robin (RR)
-........................
-
- option scheduler rr
-
- Round-Robin (RR) scheduler creates files in a round-robin fashion.
-Each client will have its own round-robin loop. When your files are
-mostly similar in size and I/O access pattern, this scheduler is a good
-choice. RR scheduler checks for free disk space on the server before
-scheduling, so you can know when to add another server node. The
-default value of min-free-disk is 5% and is checked on file creation
-calls, with atleast 10 seconds (by default) elapsing between two checks.
-
- Options:
-`rr.limits.min-free-disk <%> (5)'
- Minimum free disk space a node must have for RR to schedule a file
- to it.
-
-`rr.refresh-interval <t> (10 seconds)'
- Time between two successive free disk space checks.
-
-4.3.1.3 Random
-..............
-
- option scheduler random
-
- The random scheduler schedules file creation randomly among its
-child nodes. Like the round-robin scheduler, it also checks for a
-minimum amount of free disk space before scheduling a file to a node.
-
-`random.limits.min-free-disk <%> (5)'
- Minimum free disk space a node must have for random to schedule a
- file to it.
-
-`random.refresh-interval <t> (10 seconds)'
- Time between two successive free disk space checks.
-
-4.3.1.4 NUFA
-............
-
- option scheduler nufa
-
- It is common in many GlusterFS computing environments for all
-deployed machines to act as both servers and clients. For example, a
-research lab may have 40 workstations each with its own storage. All of
-these workstations might act as servers exporting a volume as well as
-clients accessing the entire cluster's storage. In such a situation,
-it makes sense to store locally created files on the local workstation
-itself (assuming files are accessed most by the workstation that
-created them). The Non-Uniform File Allocation (NUFA) scheduler
-accomplishes that.
-
- NUFA gives the local system first priority for file creation over
-other nodes. If the local volume does not have more free disk space
-than a specified amount (5% by default) then NUFA schedules files among
-the other child volumes in a round-robin fashion.
-
- NUFA is named after the similar strategy used for memory access,
-NUMA(1).
-
-`nufa.limits.min-free-disk <%> (5)'
- Minimum disk space that must be free (local or remote) for NUFA to
- schedule a file to it.
-
-`nufa.refresh-interval <t> (10 seconds)'
- Time between two successive free disk space checks.
-
-`nufa.local-volume-name <volume>'
- The name of the volume corresponding to the local system. This
- volume must be one of the children of the unify volume. This
- option is mandatory.
-
-4.3.1.5 Namespace
-.................
-
-Namespace volume needed because: - persistent inode numbers. - file
-exists even when node is down.
-
- namespace files are simply touched. on every lookup it is checked.
-
-`namespace <volume> *'
- Name of the namespace volume (which should be one of the unify
- volume's children).
-
-`self-heal [on|off] (on)'
- Enable/disable self-heal. Unless you know what you are doing, do
- not disable self-heal.
-
-4.3.1.6 Self Heal
-.................
-
-* When a 'lookup()/stat()' call is made on directory for the first
-time, a self-heal call is made, which checks for the consistancy of its
-child nodes. If an entry is present in storage node, but not in
-namespace, that entry is created in namespace, and vica-versa. There is
-an writedir() API introduced which is used for the same. It also checks
-for permissions, and uid/gid consistencies.
-
- * This check is also done when an server goes down and comes up.
-
- * If one starts with an empty namespace export, but has data in
-storage nodes, a 'find .>/dev/null' or 'ls -lR >/dev/null' should help
-to build namespace in one shot. Even otherwise, namespace is built on
-demand when a file is looked up for the first time.
-
- NOTE: There are some issues (Kernel 'Oops' msgs) seen with
-fuse-2.6.3, when someone deletes namespace in backend, when glusterfs is
-running. But with fuse-2.6.5, this issue is not there.
-
- ---------- Footnotes ----------
-
- (1) Non-Uniform Memory Access:
-<http://en.wikipedia.org/wiki/Non-Uniform_Memory_Access>
-
-
-File: user-guide.info, Node: Replicate, Next: Stripe, Prev: Unify, Up: Clustering Translators
-
-4.3.2 Replicate (formerly AFR)
-------------------------------
-
- type cluster/replicate
-
- Replicate provides RAID-1 like functionality for GlusterFS.
-Replicate replicates files and directories across the subvolumes. Hence
-if Replicate has four subvolumes, there will be four copies of all
-files and directories. Replicate provides high-availability, i.e., in
-case one of the subvolumes go down (e. g. server crash, network
-disconnection) Replicate will still service the requests using the
-redundant copies.
-
- Replicate also provides self-heal functionality, i.e., in case the
-crashed servers come up, the outdated files and directories will be
-updated with the latest versions. Replicate uses extended attributes of
-the backend file system to track the versioning of files and
-directories and provide the self-heal feature.
-
- volume replicate-example
- type cluster/replicate
- subvolumes brick1 brick2 brick3
- end-volume
-
- This sample configuration will replicate all directories and files on
-brick1, brick2 and brick3.
-
- All the read operations happen from the first alive child. If all the
-three sub-volumes are up, reads will be done from brick1; if brick1 is
-down read will be done from brick2. In case read() was being done on
-brick1 and it goes down, replicate transparently falls back to brick2.
-
- The next release of GlusterFS will add the following features:
- * Ability to specify the sub-volume from which read operations are
- to be done (this will help users who have one of the sub-volumes
- as a local storage volume).
-
- * Allow scheduling of read operations amongst the sub-volumes in a
- round-robin fashion.
-
- The order of the subvolumes list should be same across all the
-'replicate's as they will be used for locking purposes.
-
-4.3.2.1 Self Heal
-.................
-
-Replicate has self-heal feature, which updates the outdated file and
-directory copies by the most recent versions. For example consider the
-following config:
-
- volume replicate-example
- type cluster/replicate
- subvolumes brick1 brick2
- end-volume
-
-4.3.2.2 File self-heal
-......................
-
-Now if we create a file foo.txt on replicate-example, the file will be
-created on brick1 and brick2. The file will have two extended
-attributes associated with it in the backend filesystem. One is
-trusted.afr.createtime and the other is trusted.afr.version. The
-trusted.afr.createtime xattr has the create time (in terms of seconds
-since epoch) and trusted.afr.version is a number that is incremented
-each time a file is modified. This increment happens during close
-(incase any write was done before close).
-
- If brick1 goes down, we edit foo.txt the version gets incremented.
-Now the brick1 comes back up, when we open() on foo.txt replicate will
-check if their versions are same. If they are not same, the outdated
-copy is replaced by the latest copy and its version is updated. After
-the sync the open() proceeds in the usual manner and the application
-calling open() can continue on its access to the file.
-
- If brick1 goes down, we delete foo.txt and create a file with the
-same name again i.e foo.txt. Now brick1 comes back up, clearly there is
-a chance that the version on brick1 being more than the version on
-brick2, this is where createtime extended attribute helps in deciding
-which the outdated copy is. Hence we need to consider both createtime
-and version to decide on the latest copy.
-
- The version attribute is incremented during the close() call. Version
-will not be incremented in case there was no write() done. In case the
-fd that the close() gets was got by create() call, we also create the
-createtime extended attribute.
-
-4.3.2.3 Directory self-heal
-...........................
-
-Suppose brick1 goes down, we delete foo.txt, brick1 comes back up, now
-we should not create foo.txt on brick2 but we should delete foo.txt on
-brick1. We handle this situation by having the createtime and version
-attribute on the directory similar to the file. when lookup() is done
-on the directory, we compare the createtime/version attributes of the
-copies and see which files needs to be deleted and delete those files
-and update the extended attributes of the outdated directory copy.
-Each time a directory is modified (a file or a subdirectory is created
-or deleted inside the directory) and one of the subvols is down, we
-increment the directory's version.
-
- lookup() is a call initiated by the kernel on a file or directory
-just before any access to that file or directory. In glusterfs, by
-default, lookup() will not be called in case it was called in the past
-one second on that particular file or directory.
-
- The extended attributes can be seen in the backend filesystem using
-the `getfattr' command. (`getfattr -n trusted.afr.version <file>')
-
-`debug [on|off] (off)'
-
-`self-heal [on|off] (on)'
-
-`replicate <pattern> (*:1)'
-
-`lock-node <child_volume> (first child is used by default)'
-
-
-File: user-guide.info, Node: Stripe, Prev: Replicate, Up: Clustering Translators
-
-4.3.3 Stripe
-------------
-
- type cluster/stripe
-
- The stripe translator distributes the contents of a file over its
-sub-volumes. It does this by creating a file equal in size to the
-total size of the file on each of its sub-volumes. It then writes only
-a part of the file to each sub-volume, leaving the rest of it empty.
-These empty regions are called `holes' in Unix terminology. The holes
-do not consume any disk space.
-
- The diagram below makes this clear.
-
-
-
-You can configure stripe so that only filenames matching a pattern are
-striped. You can also configure the size of the data to be stored on
-each sub-volume.
-
-`block-size <pattern>:<size> (*:0 no striping)'
- Distribute files matching `<pattern>' over the sub-volumes,
- storing at least `<size>' on each sub-volume. For example,
-
- option block-size *.mpg:1M
-
- distributes all files ending in `.mpg', storing at least 1 MB on
- each sub-volume.
-
- Any number of `block-size' option lines may be present, specifying
- different sizes for different file name patterns.
-
-
-File: user-guide.info, Node: Performance Translators, Next: Features Translators, Prev: Clustering Translators, Up: Translators
-
-4.4 Performance Translators
-===========================
-
-* Menu:
-
-* Read Ahead::
-* Write Behind::
-* IO Threads::
-* IO Cache::
-* Booster::
-
-
-File: user-guide.info, Node: Read Ahead, Next: Write Behind, Up: Performance Translators
-
-4.4.1 Read Ahead
-----------------
-
- type performance/read-ahead
-
- The read-ahead translator pre-fetches data in advance on every read.
-This benefits applications that mostly process files in sequential
-order, since the next block of data will already be available by the
-time the application is done with the current one.
-
- Additionally, the read-ahead translator also behaves as a
-read-aggregator. Many small read operations are combined and issued as
-fewer, larger read requests to the server.
-
- Read-ahead deals in "pages" as the unit of data fetched. The page
-size is configurable, as is the "page count", which is the number of
-pages that are pre-fetched.
-
- Read-ahead is best used with InfiniBand (using the ib-verbs
-transport). On FastEthernet and Gigabit Ethernet networks, GlusterFS
-can achieve the link-maximum throughput even without read-ahead, making
-it quite superflous.
-
- Note that read-ahead only happens if the reads are perfectly
-sequential. If your application accesses data in a random fashion,
-using read-ahead might actually lead to a performance loss, since
-read-ahead will pointlessly fetch pages which won't be used by the
-application.
-
- Options:
-`page-size <n> (256KB)'
- The unit of data that is pre-fetched.
-
-`page-count <n> (2)'
- The number of pages that are pre-fetched.
-
-`force-atime-update [on|off|yes|no] (off|no)'
- Whether to force an access time (atime) update on the file on
- every read. Without this, the atime will be slightly imprecise, as
- it will reflect the time when the read-ahead translator read the
- data, not when the application actually read it.
-
-
-File: user-guide.info, Node: Write Behind, Next: IO Threads, Prev: Read Ahead, Up: Performance Translators
-
-4.4.2 Write Behind
-------------------
-
- type performance/write-behind
-
- The write-behind translator improves the latency of a write
-operation. It does this by relegating the write operation to the
-background and returning to the application even as the write is in
-progress. Using the write-behind translator, successive write requests
-can be pipelined. This mode of write-behind operation is best used on
-the client side, to enable decreased write latency for the application.
-
- The write-behind translator can also aggregate write requests. If the
-`aggregate-size' option is specified, then successive writes up to that
-size are accumulated and written in a single operation. This mode of
-operation is best used on the server side, as this will decrease the
-disk's head movement when multiple files are being written to in
-parallel.
-
- The `aggregate-size' option has a default value of 128KB. Although
-this works well for most users, you should always experiment with
-different values to determine the one that will deliver maximum
-performance. This is because the performance of write-behind depends on
-your interconnect, size of RAM, and the work load.
-
-`aggregate-size <n> (128KB)'
- Amount of data to accumulate before doing a write
-
-`flush-behind [on|yes|off|no] (off|no)'
-
-
-File: user-guide.info, Node: IO Threads, Next: IO Cache, Prev: Write Behind, Up: Performance Translators
-
-4.4.3 IO Threads
-----------------
-
- type performance/io-threads
-
- The IO threads translator is intended to increase the responsiveness
-of the server to metadata operations by doing file I/O (read, write) in
-a background thread. Since the GlusterFS server is single-threaded,
-using the IO threads translator can significantly improve performance.
-This translator is best used on the server side, loaded just below the
-server protocol translator.
-
- IO threads operates by handing out read and write requests to a
-separate thread. The total number of threads in existence at a time is
-constant, and configurable.
-
-`thread-count <n> (1)'
- Number of threads to use.
-
-
-File: user-guide.info, Node: IO Cache, Next: Booster, Prev: IO Threads, Up: Performance Translators
-
-4.4.4 IO Cache
---------------
-
- type performance/io-cache
-
- The IO cache translator caches data that has been read. This is
-useful if many applications read the same data multiple times, and if
-reads are much more frequent than writes (for example, IO caching may be
-useful in a web hosting environment, where most clients will simply
-read some files and only a few will write to them).
-
- The IO cache translator reads data from its child in `page-size'
-chunks. It caches data up to `cache-size' bytes. The cache is
-maintained as a prioritized least-recently-used (LRU) list, with
-priorities determined by user-specified patterns to match filenames.
-
- When the IO cache translator detects a write operation, the cache
-for that file is flushed.
-
- The IO cache translator periodically verifies the consistency of
-cached data, using the modification times on the files. The
-verification timeout is configurable.
-
-`page-size <n> (128KB)'
- Size of a page.
-
-`cache-size (n) (32MB)'
- Total amount of data to be cached.
-
-`force-revalidate-timeout <n> (1)'
- Timeout to force a cache consistency verification, in seconds.
-
-`priority <pattern> (*:0)'
- Filename patterns listed in order of priority.
-
-
-File: user-guide.info, Node: Booster, Prev: IO Cache, Up: Performance Translators
-
-4.4.5 Booster
--------------
-
- type performance/booster
-
- The booster translator gives applications a faster path to
-communicate read and write requests to GlusterFS. Normally, all
-requests to GlusterFS from applications go through FUSE, as indicated
-in *note Filesystems in Userspace::. Using the booster translator in
-conjunction with the GlusterFS booster shared library, an application
-can bypass the FUSE path and send read/write requests directly to the
-GlusterFS client process.
-
- The booster mechanism consists of two parts: the booster translator,
-and the booster shared library. The booster translator is meant to be
-loaded on the client side, usually at the root of the translator tree.
-The booster shared library should be `LD_PRELOAD'ed with the
-application.
-
- The booster translator when loaded opens a Unix domain socket and
-listens for read/write requests on it. The booster shared library
-intercepts read and write system calls and sends the requests to the
-GlusterFS process directly using the Unix domain socket, bypassing FUSE.
-This leads to superior performance.
-
- Once you've loaded the booster translator in your volume
-specification file, you can start your application as:
-
- $ LD_PRELOAD=/usr/local/bin/glusterfs-booster.so your_app
-
- The booster translator accepts no options.
-
-
-File: user-guide.info, Node: Features Translators, Next: Miscellaneous Translators, Prev: Performance Translators, Up: Translators
-
-4.5 Features Translators
-========================
-
-* Menu:
-
-* POSIX Locks::
-* Fixed ID::
-
-
-File: user-guide.info, Node: POSIX Locks, Next: Fixed ID, Up: Features Translators
-
-4.5.1 POSIX Locks
------------------
-
- type features/posix-locks
-
- This translator provides storage independent POSIX record locking
-support (`fcntl' locking). Typically you'll want to load this on the
-server side, just above the POSIX storage translator. Using this
-translator you can get both advisory locking and mandatory locking
-support. It also handles `flock()' locks properly.
-
- Caveat: Consider a file that does not have its mandatory locking bits
-(+setgid, -group execution) turned on. Assume that this file is now
-opened by a process on a client that has the write-behind xlator
-loaded. The write-behind xlator does not cache anything for files which
-have mandatory locking enabled, to avoid incoherence. Let's say that
-mandatory locking is now enabled on this file through another client.
-The former client will not know about this change, and write-behind may
-erroneously report a write as being successful when in fact it would
-fail due to the region it is writing to being locked.
-
- There seems to be no easy way to fix this. To work around this
-problem, it is recommended that you never enable the mandatory bits on
-a file while it is open.
-
-`mandatory [on|off] (on)'
- Turns mandatory locking on.
-
-
-File: user-guide.info, Node: Fixed ID, Prev: POSIX Locks, Up: Features Translators
-
-4.5.2 Fixed ID
---------------
-
- type features/fixed-id
-
- The fixed ID translator makes all filesystem requests from the client
-to appear to be coming from a fixed, specified UID/GID, regardless of
-which user actually initiated the request.
-
-`fixed-uid <n> [if not set, not used]'
- The UID to send to the server
-
-`fixed-gid <n> [if not set, not used]'
- The GID to send to the server
-
-
-File: user-guide.info, Node: Miscellaneous Translators, Prev: Features Translators, Up: Translators
-
-4.6 Miscellaneous Translators
-=============================
-
-* Menu:
-
-* ROT-13::
-* Trace::
-
-
-File: user-guide.info, Node: ROT-13, Next: Trace, Up: Miscellaneous Translators
-
-4.6.1 ROT-13
-------------
-
- type encryption/rot-13
-
- ROT-13 is a toy translator that can "encrypt" and "decrypt" file
-contents using the ROT-13 algorithm. ROT-13 is a trivial algorithm that
-rotates each alphabet by thirteen places. Thus, 'A' becomes 'N', 'B'
-becomes 'O', and 'Z' becomes 'M'.
-
- It goes without saying that you shouldn't use this translator if you
-need _real_ encryption (a future release of GlusterFS will have real
-encryption translators).
-
-`encrypt-write [on|off] (on)'
- Whether to encrypt on write
-
-`decrypt-read [on|off] (on)'
- Whether to decrypt on read
-
-
-File: user-guide.info, Node: Trace, Prev: ROT-13, Up: Miscellaneous Translators
-
-4.6.2 Trace
------------
-
- type debug/trace
-
- The trace translator is intended for debugging purposes. When
-loaded, it logs all the system calls received by the server or client
-(wherever trace is loaded), their arguments, and the results. You must
-use a GlusterFS log level of DEBUG (See *note Running GlusterFS::) for
-trace to work.
-
- Sample trace output (lines have been wrapped for readability):
- 2007-10-30 00:08:58 D [trace.c:1579:trace_opendir] trace: callid: 68
- (*this=0x8059e40, loc=0x8091984 {path=/iozone3_283, inode=0x8091f00},
- fd=0x8091d50)
-
- 2007-10-30 00:08:58 D [trace.c:630:trace_opendir_cbk] trace:
- (*this=0x8059e40, op_ret=4, op_errno=1, fd=0x8091d50)
-
- 2007-10-30 00:08:58 D [trace.c:1602:trace_readdir] trace: callid: 69
- (*this=0x8059e40, size=4096, offset=0 fd=0x8091d50)
-
- 2007-10-30 00:08:58 D [trace.c:215:trace_readdir_cbk] trace:
- (*this=0x8059e40, op_ret=0, op_errno=0, count=4)
-
- 2007-10-30 00:08:58 D [trace.c:1624:trace_closedir] trace: callid: 71
- (*this=0x8059e40, *fd=0x8091d50)
-
- 2007-10-30 00:08:58 D [trace.c:809:trace_closedir_cbk] trace:
- (*this=0x8059e40, op_ret=0, op_errno=1)
-
-
-File: user-guide.info, Node: Usage Scenarios, Next: Troubleshooting, Prev: Translators, Up: Top
-
-5 Usage Scenarios
-*****************
-
-5.1 Advanced Striping
-=====================
-
-This section is based on the Advanced Striping tutorial written by
-Anand Avati on the GlusterFS wiki (1).
-
-5.1.1 Mixed Storage Requirements
---------------------------------
-
-There are two ways of scheduling the I/O. One at file level (using
-unify translator) and other at block level (using stripe translator).
-Striped I/O is good for files that are potentially large and require
-high parallel throughput (for example, a single file of 400GB being
-accessed by 100s and 1000s of systems simultaneously and randomly). For
-most of the cases, file level scheduling works best.
-
- In the real world, it is desirable to mix file level and block level
-scheduling on a single storage volume. Alternatively users can choose
-to have two separate volumes and hence two mount points, but the
-applications may demand a single storage system to host both.
-
- This document explains how to mix file level scheduling with stripe.
-
-5.1.2 Configuration Brief
--------------------------
-
-This setup demonstrates how users can configure unify translator with
-appropriate I/O scheduler for file level scheduling and strip for only
-matching patterns. This way, GlusterFS chooses appropriate I/O profile
-and knows how to efficiently handle both the types of data.
-
- A simple technique to achieve this effect is to create a stripe set
-of unify and stripe blocks, where unify is the first sub-volume. Files
-that do not match the stripe policy passed on to first unify sub-volume
-and inturn scheduled arcoss the cluster using its file level I/O
-scheduler.
-
- 5.1.3 Preparing GlusterFS Envoronment
--------------------------------------
-
-Create the directories /export/namespace, /export/unify and
-/export/stripe on all the storage bricks.
-
- Place the following server and client volume spec file under
-/etc/glusterfs (or appropriate installed path) and replace the IP
-addresses / access control fields to match your environment.
-
- ## file: /etc/glusterfs/glusterfsd.vol
- volume posix-unify
- type storage/posix
- option directory /export/for-unify
- end-volume
-
- volume posix-stripe
- type storage/posix
- option directory /export/for-stripe
- end-volume
-
- volume posix-namespace
- type storage/posix
- option directory /export/for-namespace
- end-volume
-
- volume server
- type protocol/server
- option transport-type tcp
- option auth.addr.posix-unify.allow 192.168.1.*
- option auth.addr.posix-stripe.allow 192.168.1.*
- option auth.addr.posix-namespace.allow 192.168.1.*
- subvolumes posix-unify posix-stripe posix-namespace
- end-volume
-
- ## file: /etc/glusterfs/glusterfs.vol
- volume client-namespace
- type protocol/client
- option transport-type tcp
- option remote-host 192.168.1.1
- option remote-subvolume posix-namespace
- end-volume
-
- volume client-unify-1
- type protocol/client
- option transport-type tcp
- option remote-host 192.168.1.1
- option remote-subvolume posix-unify
- end-volume
-
- volume client-unify-2
- type protocol/client
- option transport-type tcp
- option remote-host 192.168.1.2
- option remote-subvolume posix-unify
- end-volume
-
- volume client-unify-3
- type protocol/client
- option transport-type tcp
- option remote-host 192.168.1.3
- option remote-subvolume posix-unify
- end-volume
-
- volume client-unify-4
- type protocol/client
- option transport-type tcp
- option remote-host 192.168.1.4
- option remote-subvolume posix-unify
- end-volume
-
- volume client-stripe-1
- type protocol/client
- option transport-type tcp
- option remote-host 192.168.1.1
- option remote-subvolume posix-stripe
- end-volume
-
- volume client-stripe-2
- type protocol/client
- option transport-type tcp
- option remote-host 192.168.1.2
- option remote-subvolume posix-stripe
- end-volume
-
- volume client-stripe-3
- type protocol/client
- option transport-type tcp
- option remote-host 192.168.1.3
- option remote-subvolume posix-stripe
- end-volume
-
- volume client-stripe-4
- type protocol/client
- option transport-type tcp
- option remote-host 192.168.1.4
- option remote-subvolume posix-stripe
- end-volume
-
- volume unify
- type cluster/unify
- option scheduler rr
- subvolumes cluster-unify-1 cluster-unify-2 cluster-unify-3 cluster-unify-4
- end-volume
-
- volume stripe
- type cluster/stripe
- option block-size *.img:2MB # All files ending with .img are striped with 2MB stripe block size.
- subvolumes unify cluster-stripe-1 cluster-stripe-2 cluster-stripe-3 cluster-stripe-4
- end-volume
-
- Bring up the Storage
-
- Starting GlusterFS Server: If you have installed through binary
-package, you can start the service through init.d startup script. If
-not:
-
- [root@server]# glusterfsd
-
- Mounting GlusterFS Volumes:
-
- [root@client]# glusterfs -s [BRICK-IP-ADDRESS] /mnt/cluster
-
- Improving upon this Setup
-
- Infiniband Verbs RDMA transport is much faster than TCP/IP GigE
-transport.
-
- Use of performance translators such as read-ahead, write-behind,
-io-cache, io-threads, booster is recommended.
-
- Replace round-robin (rr) scheduler with ALU to handle more dynamic
-storage environments.
-
- ---------- Footnotes ----------
-
- (1)
-http://gluster.org/docs/index.php/Mixing_Striped_and_Regular_Files
-
-
-File: user-guide.info, Node: Troubleshooting, Next: GNU Free Documentation Licence, Prev: Usage Scenarios, Up: Top
-
-6 Troubleshooting
-*****************
-
-This chapter is a general troubleshooting guide to GlusterFS. It lists
-common GlusterFS server and client error messages, debugging hints, and
-concludes with the suggested procedure to report bugs in GlusterFS.
-
-6.1 GlusterFS error messages
-============================
-
-6.1.1 Server errors
--------------------
-
- glusterfsd: FATAL: could not open specfile:
- '/etc/glusterfs/glusterfsd.vol'
-
- The GlusterFS server expects the volume specification file to be at
-`/etc/glusterfs/glusterfsd.vol'. The example specification file will be
-installed as `/etc/glusterfs/glusterfsd.vol.sample'. You need to edit
-it and rename it, or provide a different specification file using the
-`--spec-file' command line option (See *note Server::).
-
- gf_log_init: failed to open logfile "/usr/var/log/glusterfs/glusterfsd.log"
- (Permission denied)
-
- You don't have permission to create files in the
-`/usr/var/log/glusterfs' directory. Make sure you are running GlusterFS
-as root. Alternatively, specify a different path for the log file using
-the `--log-file' option (See *note Server::).
-
-6.1.2 Client errors
--------------------
-
- fusermount: failed to access mountpoint /mnt:
- Transport endpoint is not connected
-
- A previous failed (or hung) mount of GlusterFS is preventing it from
-being mounted again in the same location. The fix is to do:
-
- # umount /mnt
-
- and try mounting again.
-
- *"Transport endpoint is not connected".*
-
- If you get this error when you try a command such as `ls' or `cat',
-it means the GlusterFS mount did not succeed. Try running GlusterFS in
-`DEBUG' logging level and study the log messages to discover the cause.
-
- *"Connect to server failed", "SERVER-ADDRESS: Connection refused".*
-
- GluserFS Server is not running or dead. Check your network
-connections and firewall settings. To check if the server is reachable,
-try:
-
- telnet IP-ADDRESS 24007
-
- If the server is accessible, your `telnet' command should connect and
-block. If not you will see an error message such as `telnet: Unable to
-connect to remote host: Connection refused'. 24007 is the default
-GlusterFS port. If you have changed it, then use the corresponding port
-instead.
-
- gf_log_init: failed to open logfile "/usr/var/log/glusterfs/glusterfs.log"
- (Permission denied)
-
- You don't have permission to create files in the
-`/usr/var/log/glusterfs' directory. Make sure you are running GlusterFS
-as root. Alternatively, specify a different path for the log file using
-the `--log-file' option (See *note Client::).
-
-6.2 FUSE error messages
-=======================
-
-`modprobe fuse' fails with: "Unknown symbol in module, or unknown
-parameter".
-
- If you are using fuse-2.6.x on Redhat Enterprise Linux Work Station 4
-and Advanced Server 4 with 2.6.9-42.ELlargesmp, 2.6.9-42.ELsmp,
-2.6.9-42.EL kernels and get this error while loading FUSE kernel
-module, you need to apply the following patch.
-
- For fuse-2.6.2:
-
-<http://ftp.gluster.com/pub/gluster/glusterfs/fuse/fuse-2.6.2-rhel-build.patch>
-
- For fuse-2.6.3:
-
-<http://ftp.gluster.com/pub/gluster/glusterfs/fuse/fuse-2.6.3-rhel-build.patch>
-
-6.3 AppArmour and GlusterFS
-===========================
-
-Under OpenSuSE GNU/Linux, the AppArmour security feature does not allow
-GlusterFS to create temporary files or network socket connections even
-while running as root. You will see error messages like `Unable to open
-log file: Operation not permitted' or `Connection refused'. Disabling
-AppArmour using YaST or properly configuring AppArmour to recognize
-`glusterfsd' or `glusterfs'/`fusermount' should solve the problem.
-
-6.4 Reporting a bug
-===================
-
-If you encounter a bug in GlusterFS, please follow the below guidelines
-when you report it to the mailing list. Be sure to report it! User
-feedback is crucial to the health of the project and we value it highly.
-
-6.4.1 General instructions
---------------------------
-
-When running GlusterFS in a non-production environment, be sure to
-build it with the following command:
-
- $ make CFLAGS='-g -O0 -DDEBUG'
-
- This includes debugging information which will be helpful in getting
-backtraces (see below) and also disable optimization. Enabling
-optimization can result in incorrect line numbers being reported to gdb.
-
-6.4.2 Volume specification files
---------------------------------
-
-Attach all relevant server and client spec files you were using when
-you encountered the bug. Also tell us details of your setup, i.e., how
-many clients and how many servers.
-
-6.4.3 Log files
----------------
-
-Set the loglevel of your client and server programs to DEBUG (by
-passing the -L DEBUG option) and attach the log files with your bug
-report. Obviously, if only the client is failing (for example), you
-only need to send us the client log file.
-
-6.4.4 Backtrace
----------------
-
-If GlusterFS has encountered a segmentation fault or has crashed for
-some other reason, include the backtrace with the bug report. You can
-get the backtrace using the following procedure.
-
- Run the GlusterFS client or server inside gdb.
-
- $ gdb ./glusterfs
- (gdb) set args -f client.spec -N -l/path/to/log/file -LDEBUG /mnt/point
- (gdb) run
-
- Now when the process segfaults, you can get the backtrace by typing:
-
- (gdb) bt
-
- If the GlusterFS process has crashed and dumped a core file (you can
-find this in / if running as a daemon and in the current directory
-otherwise), you can do:
-
- $ gdb /path/to/glusterfs /path/to/core.<pid>
-
- and then get the backtrace.
-
- If the GlusterFS server or client seems to be hung, then you can get
-the backtrace by attaching gdb to the process. First get the `PID' of
-the process (using ps), and then do:
-
- $ gdb ./glusterfs <pid>
-
- Press Ctrl-C to interrupt the process and then generate the
-backtrace.
-
-6.4.5 Reproducing the bug
--------------------------
-
-If the bug is reproducible, please include the steps necessary to do
-so. If the bug is not reproducible, send us the bug report anyway.
-
-6.4.6 Other information
------------------------
-
-If you think it is relevant, send us also the version of FUSE you're
-using, the kernel version, platform.
-
-
-File: user-guide.info, Node: GNU Free Documentation Licence, Next: Index, Prev: Troubleshooting, Up: Top
-
-Appendix A GNU Free Documentation Licence
-*****************************************
-
- Version 1.2, November 2002
-
- Copyright (C) 2000,2001,2002 Free Software Foundation, Inc.
- 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA
-
- Everyone is permitted to copy and distribute verbatim copies
- of this license document, but changing it is not allowed.
-
- 0. PREAMBLE
-
- The purpose of this License is to make a manual, textbook, or other
- functional and useful document "free" in the sense of freedom: to
- assure everyone the effective freedom to copy and redistribute it,
- with or without modifying it, either commercially or
- noncommercially. Secondarily, this License preserves for the
- author and publisher a way to get credit for their work, while not
- being considered responsible for modifications made by others.
-
- This License is a kind of "copyleft", which means that derivative
- works of the document must themselves be free in the same sense.
- It complements the GNU General Public License, which is a copyleft
- license designed for free software.
-
- We have designed this License in order to use it for manuals for
- free software, because free software needs free documentation: a
- free program should come with manuals providing the same freedoms
- that the software does. But this License is not limited to
- software manuals; it can be used for any textual work, regardless
- of subject matter or whether it is published as a printed book.
- We recommend this License principally for works whose purpose is
- instruction or reference.
-
- 1. APPLICABILITY AND DEFINITIONS
-
- This License applies to any manual or other work, in any medium,
- that contains a notice placed by the copyright holder saying it
- can be distributed under the terms of this License. Such a notice
- grants a world-wide, royalty-free license, unlimited in duration,
- to use that work under the conditions stated herein. The
- "Document", below, refers to any such manual or work. Any member
- of the public is a licensee, and is addressed as "you". You
- accept the license if you copy, modify or distribute the work in a
- way requiring permission under copyright law.
-
- A "Modified Version" of the Document means any work containing the
- Document or a portion of it, either copied verbatim, or with
- modifications and/or translated into another language.
-
- A "Secondary Section" is a named appendix or a front-matter section
- of the Document that deals exclusively with the relationship of the
- publishers or authors of the Document to the Document's overall
- subject (or to related matters) and contains nothing that could
- fall directly within that overall subject. (Thus, if the Document
- is in part a textbook of mathematics, a Secondary Section may not
- explain any mathematics.) The relationship could be a matter of
- historical connection with the subject or with related matters, or
- of legal, commercial, philosophical, ethical or political position
- regarding them.
-
- The "Invariant Sections" are certain Secondary Sections whose
- titles are designated, as being those of Invariant Sections, in
- the notice that says that the Document is released under this
- License. If a section does not fit the above definition of
- Secondary then it is not allowed to be designated as Invariant.
- The Document may contain zero Invariant Sections. If the Document
- does not identify any Invariant Sections then there are none.
-
- The "Cover Texts" are certain short passages of text that are
- listed, as Front-Cover Texts or Back-Cover Texts, in the notice
- that says that the Document is released under this License. A
- Front-Cover Text may be at most 5 words, and a Back-Cover Text may
- be at most 25 words.
-
- A "Transparent" copy of the Document means a machine-readable copy,
- represented in a format whose specification is available to the
- general public, that is suitable for revising the document
- straightforwardly with generic text editors or (for images
- composed of pixels) generic paint programs or (for drawings) some
- widely available drawing editor, and that is suitable for input to
- text formatters or for automatic translation to a variety of
- formats suitable for input to text formatters. A copy made in an
- otherwise Transparent file format whose markup, or absence of
- markup, has been arranged to thwart or discourage subsequent
- modification by readers is not Transparent. An image format is
- not Transparent if used for any substantial amount of text. A
- copy that is not "Transparent" is called "Opaque".
-
- Examples of suitable formats for Transparent copies include plain
- ASCII without markup, Texinfo input format, LaTeX input format,
- SGML or XML using a publicly available DTD, and
- standard-conforming simple HTML, PostScript or PDF designed for
- human modification. Examples of transparent image formats include
- PNG, XCF and JPG. Opaque formats include proprietary formats that
- can be read and edited only by proprietary word processors, SGML or
- XML for which the DTD and/or processing tools are not generally
- available, and the machine-generated HTML, PostScript or PDF
- produced by some word processors for output purposes only.
-
- The "Title Page" means, for a printed book, the title page itself,
- plus such following pages as are needed to hold, legibly, the
- material this License requires to appear in the title page. For
- works in formats which do not have any title page as such, "Title
- Page" means the text near the most prominent appearance of the
- work's title, preceding the beginning of the body of the text.
-
- A section "Entitled XYZ" means a named subunit of the Document
- whose title either is precisely XYZ or contains XYZ in parentheses
- following text that translates XYZ in another language. (Here XYZ
- stands for a specific section name mentioned below, such as
- "Acknowledgements", "Dedications", "Endorsements", or "History".)
- To "Preserve the Title" of such a section when you modify the
- Document means that it remains a section "Entitled XYZ" according
- to this definition.
-
- The Document may include Warranty Disclaimers next to the notice
- which states that this License applies to the Document. These
- Warranty Disclaimers are considered to be included by reference in
- this License, but only as regards disclaiming warranties: any other
- implication that these Warranty Disclaimers may have is void and
- has no effect on the meaning of this License.
-
- 2. VERBATIM COPYING
-
- You may copy and distribute the Document in any medium, either
- commercially or noncommercially, provided that this License, the
- copyright notices, and the license notice saying this License
- applies to the Document are reproduced in all copies, and that you
- add no other conditions whatsoever to those of this License. You
- may not use technical measures to obstruct or control the reading
- or further copying of the copies you make or distribute. However,
- you may accept compensation in exchange for copies. If you
- distribute a large enough number of copies you must also follow
- the conditions in section 3.
-
- You may also lend copies, under the same conditions stated above,
- and you may publicly display copies.
-
- 3. COPYING IN QUANTITY
-
- If you publish printed copies (or copies in media that commonly
- have printed covers) of the Document, numbering more than 100, and
- the Document's license notice requires Cover Texts, you must
- enclose the copies in covers that carry, clearly and legibly, all
- these Cover Texts: Front-Cover Texts on the front cover, and
- Back-Cover Texts on the back cover. Both covers must also clearly
- and legibly identify you as the publisher of these copies. The
- front cover must present the full title with all words of the
- title equally prominent and visible. You may add other material
- on the covers in addition. Copying with changes limited to the
- covers, as long as they preserve the title of the Document and
- satisfy these conditions, can be treated as verbatim copying in
- other respects.
-
- If the required texts for either cover are too voluminous to fit
- legibly, you should put the first ones listed (as many as fit
- reasonably) on the actual cover, and continue the rest onto
- adjacent pages.
-
- If you publish or distribute Opaque copies of the Document
- numbering more than 100, you must either include a
- machine-readable Transparent copy along with each Opaque copy, or
- state in or with each Opaque copy a computer-network location from
- which the general network-using public has access to download
- using public-standard network protocols a complete Transparent
- copy of the Document, free of added material. If you use the
- latter option, you must take reasonably prudent steps, when you
- begin distribution of Opaque copies in quantity, to ensure that
- this Transparent copy will remain thus accessible at the stated
- location until at least one year after the last time you
- distribute an Opaque copy (directly or through your agents or
- retailers) of that edition to the public.
-
- It is requested, but not required, that you contact the authors of
- the Document well before redistributing any large number of
- copies, to give them a chance to provide you with an updated
- version of the Document.
-
- 4. MODIFICATIONS
-
- You may copy and distribute a Modified Version of the Document
- under the conditions of sections 2 and 3 above, provided that you
- release the Modified Version under precisely this License, with
- the Modified Version filling the role of the Document, thus
- licensing distribution and modification of the Modified Version to
- whoever possesses a copy of it. In addition, you must do these
- things in the Modified Version:
-
- A. Use in the Title Page (and on the covers, if any) a title
- distinct from that of the Document, and from those of
- previous versions (which should, if there were any, be listed
- in the History section of the Document). You may use the
- same title as a previous version if the original publisher of
- that version gives permission.
-
- B. List on the Title Page, as authors, one or more persons or
- entities responsible for authorship of the modifications in
- the Modified Version, together with at least five of the
- principal authors of the Document (all of its principal
- authors, if it has fewer than five), unless they release you
- from this requirement.
-
- C. State on the Title page the name of the publisher of the
- Modified Version, as the publisher.
-
- D. Preserve all the copyright notices of the Document.
-
- E. Add an appropriate copyright notice for your modifications
- adjacent to the other copyright notices.
-
- F. Include, immediately after the copyright notices, a license
- notice giving the public permission to use the Modified
- Version under the terms of this License, in the form shown in
- the Addendum below.
-
- G. Preserve in that license notice the full lists of Invariant
- Sections and required Cover Texts given in the Document's
- license notice.
-
- H. Include an unaltered copy of this License.
-
- I. Preserve the section Entitled "History", Preserve its Title,
- and add to it an item stating at least the title, year, new
- authors, and publisher of the Modified Version as given on
- the Title Page. If there is no section Entitled "History" in
- the Document, create one stating the title, year, authors,
- and publisher of the Document as given on its Title Page,
- then add an item describing the Modified Version as stated in
- the previous sentence.
-
- J. Preserve the network location, if any, given in the Document
- for public access to a Transparent copy of the Document, and
- likewise the network locations given in the Document for
- previous versions it was based on. These may be placed in
- the "History" section. You may omit a network location for a
- work that was published at least four years before the
- Document itself, or if the original publisher of the version
- it refers to gives permission.
-
- K. For any section Entitled "Acknowledgements" or "Dedications",
- Preserve the Title of the section, and preserve in the
- section all the substance and tone of each of the contributor
- acknowledgements and/or dedications given therein.
-
- L. Preserve all the Invariant Sections of the Document,
- unaltered in their text and in their titles. Section numbers
- or the equivalent are not considered part of the section
- titles.
-
- M. Delete any section Entitled "Endorsements". Such a section
- may not be included in the Modified Version.
-
- N. Do not retitle any existing section to be Entitled
- "Endorsements" or to conflict in title with any Invariant
- Section.
-
- O. Preserve any Warranty Disclaimers.
-
- If the Modified Version includes new front-matter sections or
- appendices that qualify as Secondary Sections and contain no
- material copied from the Document, you may at your option
- designate some or all of these sections as invariant. To do this,
- add their titles to the list of Invariant Sections in the Modified
- Version's license notice. These titles must be distinct from any
- other section titles.
-
- You may add a section Entitled "Endorsements", provided it contains
- nothing but endorsements of your Modified Version by various
- parties--for example, statements of peer review or that the text
- has been approved by an organization as the authoritative
- definition of a standard.
-
- You may add a passage of up to five words as a Front-Cover Text,
- and a passage of up to 25 words as a Back-Cover Text, to the end
- of the list of Cover Texts in the Modified Version. Only one
- passage of Front-Cover Text and one of Back-Cover Text may be
- added by (or through arrangements made by) any one entity. If the
- Document already includes a cover text for the same cover,
- previously added by you or by arrangement made by the same entity
- you are acting on behalf of, you may not add another; but you may
- replace the old one, on explicit permission from the previous
- publisher that added the old one.
-
- The author(s) and publisher(s) of the Document do not by this
- License give permission to use their names for publicity for or to
- assert or imply endorsement of any Modified Version.
-
- 5. COMBINING DOCUMENTS
-
- You may combine the Document with other documents released under
- this License, under the terms defined in section 4 above for
- modified versions, provided that you include in the combination
- all of the Invariant Sections of all of the original documents,
- unmodified, and list them all as Invariant Sections of your
- combined work in its license notice, and that you preserve all
- their Warranty Disclaimers.
-
- The combined work need only contain one copy of this License, and
- multiple identical Invariant Sections may be replaced with a single
- copy. If there are multiple Invariant Sections with the same name
- but different contents, make the title of each such section unique
- by adding at the end of it, in parentheses, the name of the
- original author or publisher of that section if known, or else a
- unique number. Make the same adjustment to the section titles in
- the list of Invariant Sections in the license notice of the
- combined work.
-
- In the combination, you must combine any sections Entitled
- "History" in the various original documents, forming one section
- Entitled "History"; likewise combine any sections Entitled
- "Acknowledgements", and any sections Entitled "Dedications". You
- must delete all sections Entitled "Endorsements."
-
- 6. COLLECTIONS OF DOCUMENTS
-
- You may make a collection consisting of the Document and other
- documents released under this License, and replace the individual
- copies of this License in the various documents with a single copy
- that is included in the collection, provided that you follow the
- rules of this License for verbatim copying of each of the
- documents in all other respects.
-
- You may extract a single document from such a collection, and
- distribute it individually under this License, provided you insert
- a copy of this License into the extracted document, and follow
- this License in all other respects regarding verbatim copying of
- that document.
-
- 7. AGGREGATION WITH INDEPENDENT WORKS
-
- A compilation of the Document or its derivatives with other
- separate and independent documents or works, in or on a volume of
- a storage or distribution medium, is called an "aggregate" if the
- copyright resulting from the compilation is not used to limit the
- legal rights of the compilation's users beyond what the individual
- works permit. When the Document is included in an aggregate, this
- License does not apply to the other works in the aggregate which
- are not themselves derivative works of the Document.
-
- If the Cover Text requirement of section 3 is applicable to these
- copies of the Document, then if the Document is less than one half
- of the entire aggregate, the Document's Cover Texts may be placed
- on covers that bracket the Document within the aggregate, or the
- electronic equivalent of covers if the Document is in electronic
- form. Otherwise they must appear on printed covers that bracket
- the whole aggregate.
-
- 8. TRANSLATION
-
- Translation is considered a kind of modification, so you may
- distribute translations of the Document under the terms of section
- 4. Replacing Invariant Sections with translations requires special
- permission from their copyright holders, but you may include
- translations of some or all Invariant Sections in addition to the
- original versions of these Invariant Sections. You may include a
- translation of this License, and all the license notices in the
- Document, and any Warranty Disclaimers, provided that you also
- include the original English version of this License and the
- original versions of those notices and disclaimers. In case of a
- disagreement between the translation and the original version of
- this License or a notice or disclaimer, the original version will
- prevail.
-
- If a section in the Document is Entitled "Acknowledgements",
- "Dedications", or "History", the requirement (section 4) to
- Preserve its Title (section 1) will typically require changing the
- actual title.
-
- 9. TERMINATION
-
- You may not copy, modify, sublicense, or distribute the Document
- except as expressly provided for under this License. Any other
- attempt to copy, modify, sublicense or distribute the Document is
- void, and will automatically terminate your rights under this
- License. However, parties who have received copies, or rights,
- from you under this License will not have their licenses
- terminated so long as such parties remain in full compliance.
-
- 10. FUTURE REVISIONS OF THIS LICENSE
-
- The Free Software Foundation may publish new, revised versions of
- the GNU Free Documentation License from time to time. Such new
- versions will be similar in spirit to the present version, but may
- differ in detail to address new problems or concerns. See
- `http://www.gnu.org/copyleft/'.
-
- Each version of the License is given a distinguishing version
- number. If the Document specifies that a particular numbered
- version of this License "or any later version" applies to it, you
- have the option of following the terms and conditions either of
- that specified version or of any later version that has been
- published (not as a draft) by the Free Software Foundation. If
- the Document does not specify a version number of this License,
- you may choose any version ever published (not as a draft) by the
- Free Software Foundation.
-
-A.0.1 ADDENDUM: How to use this License for your documents
-----------------------------------------------------------
-
-To use this License in a document you have written, include a copy of
-the License in the document and put the following copyright and license
-notices just after the title page:
-
- Copyright (C) YEAR YOUR NAME.
- Permission is granted to copy, distribute and/or modify this document
- under the terms of the GNU Free Documentation License, Version 1.2
- or any later version published by the Free Software Foundation;
- with no Invariant Sections, no Front-Cover Texts, and no Back-Cover
- Texts. A copy of the license is included in the section entitled ``GNU
- Free Documentation License''.
-
- If you have Invariant Sections, Front-Cover Texts and Back-Cover
-Texts, replace the "with...Texts." line with this:
-
- with the Invariant Sections being LIST THEIR TITLES, with
- the Front-Cover Texts being LIST, and with the Back-Cover Texts
- being LIST.
-
- If you have Invariant Sections without Cover Texts, or some other
-combination of the three, merge those two alternatives to suit the
-situation.
-
- If your document contains nontrivial examples of program code, we
-recommend releasing these examples in parallel under your choice of
-free software license, such as the GNU General Public License, to
-permit their use in free software.
-
-
-File: user-guide.info, Node: Index, Prev: GNU Free Documentation Licence, Up: Top
-
-Index
-*****
-
-
-* Menu:
-
-* alu (scheduler): Unify. (line 49)
-* AppArmour: Troubleshooting. (line 96)
-* arch: Getting GlusterFS. (line 6)
-* booster: Booster. (line 6)
-* commercial support: Introduction. (line 36)
-* DNS round robin: Transport modules. (line 29)
-* fcntl: POSIX Locks. (line 6)
-* FDL, GNU Free Documentation License: GNU Free Documentation Licence.
- (line 6)
-* fixed-id (translator): Fixed ID. (line 6)
-* GlusterFS client: Client. (line 6)
-* GlusterFS mailing list: Introduction. (line 28)
-* GlusterFS server: Server. (line 6)
-* infiniband transport: Transport modules. (line 58)
-* InfiniBand, installation: Pre requisites. (line 51)
-* io-cache (translator): IO Cache. (line 6)
-* io-threads (translator): IO Threads. (line 6)
-* IRC channel, #gluster: Introduction. (line 31)
-* libibverbs: Pre requisites. (line 51)
-* namespace: Unify. (line 207)
-* nufa (scheduler): Unify. (line 175)
-* OpenSuSE: Troubleshooting. (line 96)
-* posix-locks (translator): POSIX Locks. (line 6)
-* random (scheduler): Unify. (line 159)
-* read-ahead (translator): Read Ahead. (line 6)
-* record locking: POSIX Locks. (line 6)
-* Redhat Enterprise Linux: Troubleshooting. (line 78)
-* Replicate: Replicate. (line 6)
-* rot-13 (translator): ROT-13. (line 6)
-* rr (scheduler): Unify. (line 138)
-* scheduler (unify): Unify. (line 6)
-* self heal (replicate): Replicate. (line 46)
-* self heal (unify): Unify. (line 223)
-* stripe (translator): Stripe. (line 6)
-* trace (translator): Trace. (line 6)
-* unify (translator): Unify. (line 6)
-* unify invariants: Unify. (line 16)
-* write-behind (translator): Write Behind. (line 6)
-* Gluster, Inc.: Introduction. (line 36)
-
-
-
-Tag Table:
-Node: Top704
-Node: Acknowledgements2304
-Node: Introduction3214
-Node: Installation and Invocation4649
-Node: Pre requisites4933
-Node: Getting GlusterFS7023
-Ref: Getting GlusterFS-Footnote-17809
-Node: Building7857
-Node: Running GlusterFS9559
-Node: Server9770
-Node: Client11358
-Node: A Tutorial Introduction13564
-Node: Concepts17101
-Node: Filesystems in Userspace17316
-Node: Translator18457
-Node: Volume specification file21160
-Node: Translators23632
-Node: Storage Translators24201
-Ref: Storage Translators-Footnote-125008
-Node: POSIX25142
-Node: BDB25765
-Node: Client and Server Translators26822
-Node: Transport modules27298
-Node: Client protocol31445
-Node: Server protocol32384
-Node: Clustering Translators33373
-Node: Unify34260
-Ref: Unify-Footnote-143859
-Node: Replicate43951
-Node: Stripe49006
-Node: Performance Translators50164
-Node: Read Ahead50438
-Node: Write Behind52170
-Node: IO Threads53579
-Node: IO Cache54367
-Node: Booster55691
-Node: Features Translators57105
-Node: POSIX Locks57333
-Node: Fixed ID58650
-Node: Miscellaneous Translators59136
-Node: ROT-1359334
-Node: Trace60013
-Node: Usage Scenarios61282
-Ref: Usage Scenarios-Footnote-167215
-Node: Troubleshooting67290
-Node: GNU Free Documentation Licence73638
-Node: Index96087
-
-End Tag Table
diff --git a/doc/legacy/user-guide.pdf b/doc/legacy/user-guide.pdf
deleted file mode 100644
index ed7bd2a9907..00000000000
--- a/doc/legacy/user-guide.pdf
+++ /dev/null
Binary files differ
diff --git a/doc/legacy/user-guide.texi b/doc/legacy/user-guide.texi
deleted file mode 100644
index 31ff9cf304e..00000000000
--- a/doc/legacy/user-guide.texi
+++ /dev/null
@@ -1,2246 +0,0 @@
-\input texinfo
-@setfilename user-guide.info
-@settitle GlusterFS 2.0 User Guide
-@afourpaper
-
-@direntry
-* GlusterFS: (user-guide). GlusterFS distributed filesystem user guide
-@end direntry
-
-@copying
-This is the user manual for GlusterFS 2.0.
-
-Copyright @copyright{} 2007-2011 @email{@b{Gluster}} , Inc. Permission is granted to
-copy, distribute and/or modify this document under the terms of the
-@acronym{GNU} Free Documentation License, Version 1.2 or any later
-version published by the Free Software Foundation; with no Invariant
-Sections, no Front-Cover Texts, and no Back-Cover Texts. A copy of the
-license is included in the chapter entitled ``@acronym{GNU} Free
-Documentation License''.
-@end copying
-
-@titlepage
-@title GlusterFS 2.0 User Guide [DRAFT]
-@subtitle January 15, 2008
-@author http://gluster.org/core-team.php
-@author @email{@b{Gluster}}
-@page
-@vskip 0pt plus 1filll
-@insertcopying
-@end titlepage
-
-@c Info stuff
-@ifnottex
-@node Top
-@top GlusterFS 2.0 User Guide
-
-@insertcopying
-@menu
-* Acknowledgements::
-* Introduction::
-* Installation and Invocation::
-* Concepts::
-* Translators::
-* Usage Scenarios::
-* Troubleshooting::
-* GNU Free Documentation Licence::
-* Index::
-
-@detailmenu
- --- The Detailed Node Listing ---
-
-Installation and Invocation
-
-* Pre requisites::
-* Getting GlusterFS::
-* Building::
-* Running GlusterFS::
-* A Tutorial Introduction::
-
-Running GlusterFS
-
-* Server::
-* Client::
-
-Concepts
-
-* Filesystems in Userspace::
-* Translator::
-* Volume specification file::
-
-Translators
-
-* Storage Translators::
-* Client and Server Translators::
-* Clustering Translators::
-* Performance Translators::
-* Features Translators::
-
-Storage Translators
-
-* POSIX::
-
-Client and Server Translators
-
-* Transport modules::
-* Client protocol::
-* Server protocol::
-
-Clustering Translators
-
-* Unify::
-* Replicate::
-* Stripe::
-
-Performance Translators
-
-* Read Ahead::
-* Write Behind::
-* IO Threads::
-* IO Cache::
-
-Features Translators
-
-* POSIX Locks::
-* Fixed ID::
-
-Miscellaneous Translators
-
-* ROT-13::
-* Trace::
-
-@end detailmenu
-@end menu
-
-@end ifnottex
-@c Info stuff end
-
-@contents
-
-@node Acknowledgements
-@unnumbered Acknowledgements
-GlusterFS continues to be a wonderful and enriching experience for all
-of us involved.
-
-GlusterFS development would not have been possible at this pace if
-not for our enthusiastic users. People from around the world have
-helped us with bug reports, performance numbers, and feature suggestions.
-A huge thanks to them all.
-
-Matthew Paine - for RPMs & general enthu
-
-Leonardo Rodrigues de Mello - for DEBs
-
-Julian Perez & Adam D'Auria - for multi-server tutorial
-
-Paul England - for HA spec
-
-Brent Nelson - for many bug reports
-
-Jacques Mattheij - for Europe mirror.
-
-Patrick Negri - for TCP non-blocking connect.
-@flushright
-http://gluster.org/core-team.php (@email{list-hacking@@gluster.com})
-@email{@b{Gluster}}
-@end flushright
-
-@node Introduction
-@chapter Introduction
-
-GlusterFS is a distributed filesystem. It works at the file level,
-not block level.
-
-A network filesystem is one which allows us to access remote files. A
-distributed filesystem is one that stores data on multiple machines
-and makes them all appear to be a part of the same filesystem.
-
-Need for distributed filesystems
-
-@itemize @bullet
-@item Scalability: A distributed filesystem allows us to store more data than what can be stored on a single machine.
-
-@item Redundancy: We might want to replicate crucial data on to several machines.
-
-@item Uniform access: One can mount a remote volume (for example your home directory) from any machine and access the same data.
-@end itemize
-
-@section Contacting us
-You can reach us through the mailing list @strong{gluster-devel}
-(@email{gluster-devel@@nongnu.org}).
-@cindex GlusterFS mailing list
-
-You can also find many of the developers on @acronym{IRC}, on the @code{#gluster}
-channel on Freenode (@indicateurl{irc.freenode.net}).
-@cindex IRC channel, #gluster
-
-The GlusterFS documentation wiki is also useful: @*
-@indicateurl{http://gluster.org/docs/index.php/GlusterFS}
-
-For commercial support, you can contact @email{@b{Gluster}} at:
-@cindex commercial support
-@cindex Gluster, Inc.
-
-@display
-3194 Winding Vista Common
-Fremont, CA 94539
-USA.
-
-Phone: +1 (510) 354 6801
-Toll free: +1 (888) 813 6309
-Fax: +1 (510) 372 0604
-@end display
-
-You can also email us at @email{support@@gluster.com}.
-
-@node Installation and Invocation
-@chapter Installation and Invocation
-
-@menu
-* Pre requisites::
-* Getting GlusterFS::
-* Building::
-* Running GlusterFS::
-* A Tutorial Introduction::
-@end menu
-
-@node Pre requisites
-@section Pre requisites
-
-Before installing GlusterFS make sure you have the
-following components installed.
-
-@subsection @acronym{FUSE}
-GlusterFS has now built-in support for the @acronym{FUSE} protocol.
-You need a kernel with @acronym{FUSE} support to mount GlusterFS.
-You do not need the @acronym{FUSE} package (library and utilities),
-but be aware of the following issues:
-
-@itemize
-@item If you want unprivileged users to be able to mount GlusterFS filesystems,
-you need a recent version of the @command{fusermount} utility. You already have
-it if you have @acronym{FUSE} version 2.7.0 or higher installed; if that's not
-the case, one will be compiled along with GlusterFS if you pass
-@command{--enable-fusermount} to the @command{configure} script. @item You
-need to ensure @acronym{FUSE} support is configured properly on your system. In
-details:
-@itemize
-@item If your kernel has @acronym{FUSE} as a loadable module, make sure it's
-loaded.
-@item Create @command{/dev/fuse} (major 10, minor 229) either by means of udev
-rules or by hand.
-@item Optionally, if you want runtime control over your @acronym{FUSE} mounts,
-mount the fusectl auxiliary filesystem:
-
-@example
-# mount -t fusectl none /sys/fs/fuse/connections
-@end example
-@end itemize
-
-The @acronym{FUSE} packages shipped by the various distributions usually take care
-about these things, so the easiest way to get the above tasks handled is still
-installing the @acronym{FUSE} package(s).
-@end itemize
-
-To get the best performance from GlusterFS,it is recommended that you use
-our patched version of the @acronym{FUSE} kernel module. See Patched FUSE for details.
-
-@subsection Patched FUSE
-
-The GlusterFS project maintains a patched version of @acronym{FUSE} meant to be used
-with GlusterFS. The patches increase GlusterFS performance. It is recommended that
-all users use the patched @acronym{FUSE}.
-
-The patched @acronym{FUSE} tarball can be downloaded from:
-
-@indicateurl{ftp://ftp.gluster.com/pub/gluster/glusterfs/fuse/}
-
-The specific changes made to @acronym{FUSE} are:
-
-@itemize
-@item The communication channel size between @acronym{FUSE} kernel module and GlusterFS has been increased to 1MB, permitting large reads and writes to be sent in bigger chunks.
-
-@item The kernel's read-ahead boundary has been extended up to 1MB.
-
-@item Block size returned in the @command{stat()}/@command{fstat()} calls tuned to 1MB, to make cp and similar commands perform I/O using that block size.
-
-@item @command{flock()} locking support has been added (although some rework in GlusterFS is needed for perfect compliance).
-@end itemize
-
-@subsection libibverbs (optional)
-@cindex InfiniBand, installation
-@cindex libibverbs
-This is only needed if you want GlusterFS to use InfiniBand as the
-interconnect mechanism between server and client. You can get it from:
-
-@indicateurl{http://www.openfabrics.org/downloads.htm}.
-
-@subsection Bison and Flex
-These should be already installed on most Linux systems. If not, use your distribution's
-normal software installation procedures to install them. Make sure you install the
-relevant developer packages also.
-
-@node Getting GlusterFS
-@section Getting GlusterFS
-@cindex arch
-There are many ways to get hold of GlusterFS. For a production deployment,
-the recommended method is to download the latest release tarball.
-Release tarballs are available at: @indicateurl{http://gluster.org/download.php}.
-
-If you want the bleeding edge development source, you can get them
-from the Git
-@footnote{@indicateurl{http://git-scm.com}}
-repository. First you must install Git itself. Then
-you can check out the source
-
-@example
-$ git clone git://git.sv.gnu.org/gluster.git glusterfs
-@end example
-
-@node Building
-@section Building
-You can skip this section if you're installing from @acronym{RPM}s
-or @acronym{DEB}s.
-
-GlusterFS uses the Autotools mechanism to build. As such, the procedure
-is straight-forward. First, change into the GlusterFS source directory.
-
-@example
-$ cd glusterfs-<version>
-@end example
-
-If you checked out the source from the Arch repository, you'll need
-to run @command{./autogen.sh} first. Note that you'll need to have
-Autoconf and Automake installed for this.
-
-Run @command{configure}.
-
-@example
-$ ./configure
-@end example
-
-The configure script accepts the following options:
-
-@cartouche
-@table @code
-
-@item --disable-ibverbs
-Disable the InfiniBand transport mechanism.
-
-@item --disable-fuse-client
-Disable the @acronym{FUSE} client.
-
-@item --disable-server
-Disable building of the GlusterFS server.
-
-@item --disable-bdb
-Disable building of Berkeley DB based storage translator.
-
-@item --disable-mod_glusterfs
-Disable building of Apache/lighttpd glusterfs plugins.
-
-@item --disable-epoll
-Use poll instead of epoll.
-
-@item --disable-libglusterfsclient
-Disable building of libglusterfsclient
-
-@item --enable-fusermount
-Build fusermount
-
-@end table
-@end cartouche
-
-Build and install GlusterFS.
-
-@example
-# make install
-@end example
-
-The binaries (@command{glusterfsd} and @command{glusterfs}) will be by
-default installed in @command{/usr/local/sbin/}. Translator,
-scheduler, and transport shared libraries will be installed in
-@command{/usr/local/lib/glusterfs/<version>/}. Sample volume
-specification files will be in @command{/usr/local/etc/glusterfs/}.
-This document itself can be found in
-@command{/usr/local/share/doc/glusterfs/}. If you passed the @command{--prefix}
-argument to the configure script, then replace @command{/usr/local} in the preceding
-paths with the prefix.
-
-@node Running GlusterFS
-@section Running GlusterFS
-
-@menu
-* Server::
-* Client::
-@end menu
-
-@node Server
-@subsection Server
-@cindex GlusterFS server
-
-The GlusterFS server is necessary to export storage volumes to remote clients
-(See @ref{Server protocol} for more info). This section documents the invocation
-of the GlusterFS server program and all the command-line options accepted by it.
-
-@cartouche
-@table @code
-Basic Options
-@item -f, --volfile=<path>
- Use the volume file as the volume specification.
-
-@item -s, --volfile-server=<hostname>
- Server to get volume file from. This option overrides --volfile option.
-
-@item -l, --log-file=<path>
- Specify the path for the log file.
-
-@item -L, --log-level=<level>
- Set the log level for the server. Log level should be one of @acronym{DEBUG},
-@acronym{WARNING}, @acronym{ERROR}, @acronym{CRITICAL}, or @acronym{NONE}.
-
-Advanced Options
-@item --debug
- Run in debug mode. This option sets --no-daemon, --log-level to DEBUG and
- --log-file to console.
-
-@item -N, --no-daemon
- Run glusterfsd as a foreground process.
-
-@item -p, --pid-file=<path>
- Path for the @acronym{PID} file.
-
-@item --volfile-id=<key>
- 'key' of the volfile to be fetched from server.
-
-@item --volfile-server-port=<port-number>
- Listening port number of volfile server.
-
-@item --volfile-server-transport=[tcp|ib-verbs]
- Transport type to get volfile from server. [default: @command{tcp}]
-
-@item --xlator-options=<volume-name.option=value>
- Add/override a translator option for a volume with specified value.
-
-Miscellaneous Options
-@item -?, --help
- Show this help text.
-
-@item --usage
- Display a short usage message.
-
-@item -V, --version
- Show version information.
-@end table
-@end cartouche
-
-@node Client
-@subsection Client
-@cindex GlusterFS client
-
-The GlusterFS client process is necessary to access remote storage volumes and
-mount them locally using @acronym{FUSE}. This section documents the invocation of the
-client process and all its command-line arguments.
-
-@example
- # glusterfs [options] <mountpoint>
-@end example
-
-The @command{mountpoint} is the directory where you want the GlusterFS
-filesystem to appear. Example:
-
-@example
- # glusterfs -f /usr/local/etc/glusterfs-client.vol /mnt
-@end example
-
-The command-line options are detailed below.
-
-@tex
-\vfill
-@end tex
-@page
-
-@cartouche
-@table @code
-
-Basic Options
-@item -f, --volfile=<path>
- Use the volume file as the volume specification.
-
-@item -s, --volfile-server=<hostname>
- Server to get volume file from. This option overrides --volfile option.
-
-@item -l, --log-file=<path>
- Specify the path for the log file.
-
-@item -L, --log-level=<level>
- Set the log level for the server. Log level should be one of @acronym{DEBUG},
-@acronym{WARNING}, @acronym{ERROR}, @acronym{CRITICAL}, or @acronym{NONE}.
-
-Advanced Options
-@item --debug
- Run in debug mode. This option sets --no-daemon, --log-level to DEBUG and
- --log-file to console.
-
-@item -N, --no-daemon
- Run @command{glusterfs} as a foreground process.
-
-@item -p, --pid-file=<path>
- Path for the @acronym{PID} file.
-
-@item --volfile-id=<key>
- 'key' of the volfile to be fetched from server.
-
-@item --volfile-server-port=<port-number>
- Listening port number of volfile server.
-
-@item --volfile-server-transport=[tcp|ib-verbs]
- Transport type to get volfile from server. [default: @command{tcp}]
-
-@item --xlator-options=<volume-name.option=value>
- Add/override a translator option for a volume with specified value.
-
-@item --volume-name=<volume name>
- Volume name in client spec to use. Defaults to the root volume.
-
-@acronym{FUSE} Options
-@item --attribute-timeout=<n>
- Attribute timeout for inodes in the kernel, in seconds. Defaults to 1 second.
-
-@item --disable-direct-io-mode
- Disable direct @acronym{I/O} mode in @acronym{FUSE} kernel module. This is set
- automatically if kernel supports big writes (>= 2.6.26).
-
-@item -e, --entry-timeout=<n>
- Entry timeout for directory entries in the kernel, in seconds.
- Defaults to 1 second.
-
-Missellaneous Options
-@item -?, --help
- Show this help information.
-
-@item -V, --version
- Show version information.
-@end table
-@end cartouche
-
-@node A Tutorial Introduction
-@section A Tutorial Introduction
-
-This section will show you how to quickly get GlusterFS up and running. We'll
-configure GlusterFS as a simple network filesystem, with one server and one client.
-In this mode of usage, GlusterFS can serve as a replacement for NFS.
-
-We'll make use of two machines; call them @emph{server} and
-@emph{client} (If you don't want to setup two machines, just run
-everything that follows on the same machine). In the examples that
-follow, the shell prompts will use these names to clarify the machine
-on which the command is being run. For example, a command that should
-be run on the server will be shown with the prompt:
-
-@example
-[root@@server]#
-@end example
-
-Our goal is to make a directory on the @emph{server} (say, @command{/export})
-accessible to the @emph{client}.
-
-First of all, get GlusterFS installed on both the machines, as described in the
-previous sections. Make sure you have the @acronym{FUSE} kernel module loaded. You
-can ensure this by running:
-
-@example
-[root@@server]# modprobe fuse
-@end example
-
-Before we can run the GlusterFS client or server programs, we need to write
-two files called @emph{volume specifications} (equivalently referred to as @emph{volfiles}).
-The volfile describes the @emph{translator tree} on a node. The next chapter will
-explain the concepts of `translator' and `volume specification' in detail. For now,
-just assume that the volfile is like an NFS @command{/etc/export} file.
-
-On the server, create a text file somewhere (we'll assume the path
-@command{/tmp/glusterfsd.vol}) with the following contents.
-
-@cartouche
-@example
-volume colon-o
- type storage/posix
- option directory /export
-end-volume
-
-volume server
- type protocol/server
- subvolumes colon-o
- option transport-type tcp
- option auth.addr.colon-o.allow *
-end-volume
-@end example
-@end cartouche
-
-A brief explanation of the file's contents. The first section defines a storage
-volume, named ``colon-o'' (the volume names are arbitrary), which exports the
-@command{/export} directory. The second section defines options for the translator
-which will make the storage volume accessible remotely. It specifies @command{colon-o} as
-a subvolume. This defines the @emph{translator tree}, about which more will be said
-in the next chapter. The two options specify that the @acronym{TCP} protocol is to be
-used (as opposed to InfiniBand, for example), and that access to the storage volume
-is to be provided to clients with any @acronym{IP} address at all. If you wanted to
-restrict access to this server to only your subnet for example, you'd specify
-something like @command{192.168.1.*} in the second option line.
-
-On the client machine, create the following text file (again, we'll assume
-the path to be @command{/tmp/glusterfs-client.vol}). Replace
-@emph{server-ip-address} with the @acronym{IP} address of your server machine. If you
-are doing all this on a single machine, use @command{127.0.0.1}.
-
-@cartouche
-@example
-volume client
- type protocol/client
- option transport-type tcp
- option remote-host @emph{server-ip-address}
- option remote-subvolume colon-o
-end-volume
-@end example
-@end cartouche
-
-Now we need to start both the server and client programs. To start the server:
-
-@example
-[root@@server]# glusterfsd -f /tmp/glusterfs-server.vol
-@end example
-
-To start the client:
-
-@example
-[root@@client]# glusterfs -f /tmp/glusterfs-client.vol /mnt/glusterfs
-@end example
-
-You should now be able to see the files under the server's @command{/export} directory
-in the @command{/mnt/glusterfs} directory on the client. That's it; GlusterFS is now
-working as a network file system.
-
-@node Concepts
-@chapter Concepts
-
-@menu
-* Filesystems in Userspace::
-* Translator::
-* Volume specification file::
-@end menu
-
-@node Filesystems in Userspace
-@section Filesystems in Userspace
-
-A filesystem is usually implemented in kernel space. Kernel space
-development is much harder than userspace development. @acronym{FUSE}
-is a kernel module/library that allows us to write a filesystem
-completely in userspace.
-
-@acronym{FUSE} consists of a kernel module which interacts with the userspace
-implementation using a device file @code{/dev/fuse}. When a process
-makes a syscall on a @acronym{FUSE} filesystem, @acronym{VFS} hands the request to the
-@acronym{FUSE} module, which writes the request to @code{/dev/fuse}. The
-userspace implementation polls @code{/dev/fuse}, and when a request arrives,
-processes it and writes the result back to @code{/dev/fuse}. The kernel then
-reads from the device file and returns the result to the user process.
-
-In case of GlusterFS, the userspace program is the GlusterFS client.
-The control flow is shown in the diagram below. The GlusterFS client
-services the request by sending it to the server, which in turn
-hands it to the local @acronym{POSIX} filesystem.
-
-@center @image{fuse,44pc,,,.pdf}
-@center Fig 1. Control flow in GlusterFS
-
-@node Translator
-@section Translator
-
-The @emph{translator} is the most important concept in GlusterFS. In
-fact, GlusterFS is nothing but a collection of translators working
-together, forming a translator @emph{tree}.
-
-The idea of a translator is perhaps best understood using an
-analogy. Consider the @acronym{VFS} in the Linux kernel. The
-@acronym{VFS} abstracts the various filesystem implementations (such
-as @acronym{EXT3}, ReiserFS, @acronym{XFS}, etc.) supported by the
-kernel. When an application calls the kernel to perform an operation
-on a file, the kernel passes the request on to the appropriate
-filesystem implementation.
-
-For example, let's say there are two partitions on a Linux machine:
-@command{/}, which is an @acronym{EXT3} partition, and @command{/usr},
-which is a ReiserFS partition. Now if an application wants to open a
-file called, say, @command{/etc/fstab}, then the kernel will
-internally pass the request to the @acronym{EXT3} implementation. If
-on the other hand, an application wants to read a file called
-@command{/usr/src/linux/CREDITS}, then the kernel will call upon the
-ReiserFS implementation to do the job.
-
-The ``filesystem implementation'' objects are analogous to GlusterFS
-translators. A GlusterFS translator implements all the filesystem
-operations. Whereas in @acronym{VFS} there is a two-level tree (with
-the kernel at the root and all the filesystem implementation as its
-children), in GlusterFS there exists a more elaborate tree structure.
-
-We can now define translators more precisely. A GlusterFS translator
-is a shared object (@command{.so}) that implements every filesystem
-call. GlusterFS translators can be arranged in an arbitrary tree
-structure (subject to constraints imposed by the translators). When
-GlusterFS receives a filesystem call, it passes it on to the
-translator at the root of the translator tree. The root translator may
-in turn pass it on to any or all of its children, and so on, until the
-leaf nodes are reached. The result of a filesystem call is
-communicated in the reverse fashion, from the leaf nodes up to the
-root node, and then on to the application.
-
-So what might a translator tree look like?
-
-@tex
-\vfill
-@end tex
-@page
-
-@center @image{xlator,44pc,,,.pdf}
-@center Fig 2. A sample translator tree
-
-The diagram depicts three servers and one GlusterFS client. It is important
-to note that conceptually, the translator tree spans machine boundaries.
-Thus, the client machine in the diagram, @command{10.0.0.1}, can access
-the aggregated storage of the filesystems on the server machines @command{10.0.0.2},
-@command{10.0.0.3}, and @command{10.0.0.4}. The translator diagram will make more
-sense once you've read the next chapter and understood the functions of the
-various translators.
-
-@node Volume specification file
-@section Volume specification file
-The volume specification file describes the translator tree for both the
-server and client programs.
-
-A volume specification file is a sequence of volume definitions.
-The syntax of a volume definition is explained below:
-
-@cartouche
-@example
-@strong{volume} @emph{volume-name}
- @strong{type} @emph{translator-name}
- @strong{option} @emph{option-name} @emph{option-value}
- @dots{}
- @strong{subvolumes} @emph{subvolume1} @emph{subvolume2} @dots{}
-@strong{end-volume}
-@end example
-
-@dots{}
-@end cartouche
-
-@table @asis
-@item @emph{volume-name}
- An identifier for the volume. This is just a human-readable name,
-and can contain any alphanumeric character. For instance, ``storage-1'', ``colon-o'',
-or ``forty-two''.
-
-@item @emph{translator-name}
- Name of one of the available translators. Example: @command{protocol/client},
-@command{cluster/unify}.
-
-@item @emph{option-name}
- Name of a valid option for the translator.
-
-@item @emph{option-value}
- Value for the option. Everything following the ``option'' keyword to the end of the
-line is considered the value; it is up to the translator to parse it.
-
-@item @emph{subvolume1}, @emph{subvolume2}, @dots{}
- Volume names of sub-volumes. The sub-volumes must already have been defined earlier
-in the file.
-@end table
-
-There are a few rules you must follow when writing a volume specification file:
-
-@itemize
-@item Everything following a `@command{#}' is considered a comment and is ignored. Blank lines are also ignored.
-@item All names and keywords are case-sensitive.
-@item The order of options inside a volume definition does not matter.
-@item An option value may not span multiple lines.
-@item If an option is not specified, it will assume its default value.
-@item A sub-volume must have already been defined before it can be referenced. This means you have to write the specification file ``bottom-up'', starting from the leaf nodes of the translator tree and moving up to the root.
-@end itemize
-
-A simple example volume specification file is shown below:
-
-@cartouche
-@example
-# This is a comment line
-volume client
- type protocol/client
- option transport-type tcp
- option remote-host localhost # Also a comment
- option remote-subvolume brick
-# The subvolumes line may be absent
-end-volume
-
-volume iot
- type performance/io-threads
- option thread-count 4
- subvolumes client
-end-volume
-
-volume wb
- type performance/write-behind
- subvolumes iot
-end-volume
-@end example
-@end cartouche
-
-@node Translators
-@chapter Translators
-
-@menu
-* Storage Translators::
-* Client and Server Translators::
-* Clustering Translators::
-* Performance Translators::
-* Features Translators::
-* Miscellaneous Translators::
-@end menu
-
-This chapter documents all the available GlusterFS translators in detail.
-Each translator section will show its name (for example, @command{cluster/unify}),
-briefly describe its purpose and workings, and list every option accepted by
-that translator and their meaning.
-
-@node Storage Translators
-@section Storage Translators
-
-The storage translators form the ``backend'' for GlusterFS. Currently,
-the only available storage translator is the @acronym{POSIX}
-translator, which stores files on a normal @acronym{POSIX}
-filesystem. A pleasant consequence of this is that your data will
-still be accessible if GlusterFS crashes or cannot be started.
-
-Other storage backends are planned for the future. One of the possibilities is an
-Amazon S3 translator. Amazon S3 is an unlimited online storage service accessible
-through a web services @acronym{API}. The S3 translator will allow you to access
-the storage as a normal @acronym{POSIX} filesystem.
-@footnote{Some more discussion about this can be found at:
-
-http://developer.amazonwebservices.com/connect/message.jspa?messageID=52873}
-
-@menu
-* POSIX::
-* BDB::
-@end menu
-
-@node POSIX
-@subsection POSIX
-@example
-type storage/posix
-@end example
-
-The @command{posix} translator uses a normal @acronym{POSIX}
-filesystem as its ``backend'' to actually store files and
-directories. This can be any filesystem that supports extended
-attributes (@acronym{EXT3}, ReiserFS, @acronym{XFS}, ...). Extended
-attributes are used by some translators to store metadata, for
-example, by the replicate and stripe translators. See
-@ref{Replicate} and @ref{Stripe}, respectively for details.
-
-@cartouche
-@table @code
-@item directory <path>
-The directory on the local filesystem which is to be used for storage.
-@end table
-@end cartouche
-
-@node BDB
-@subsection BDB
-@example
-type storage/bdb
-@end example
-
-The @command{BDB} translator uses a @acronym{Berkeley DB} database as its
-``backend'' to actually store files as key-value pair in the database and
-directories as regular @acronym{POSIX} directories. Note that @acronym{BDB}
-does not provide extended attribute support for regular files. Do not use
-@acronym{BDB} as storage translator while using any translator that demands
-extended attributes on ``backend''.
-
-@cartouche
-@table @code
-@item directory <path>
-The directory on the local filesystem which is to be used for storage.
-@item mode [cache|persistent] (cache)
-When @acronym{BDB} is run in @command{cache} mode, recovery of back-end is not completely
-guaranteed. @command{persistent} guarantees that @acronym{BDB} can recover back-end from
-@acronym{Berkeley DB} even if GlusterFS crashes.
-@item errfile <path>
-The path of the file to be used as @command{errfile} for @acronym{Berkeley DB} to report
-detailed error messages, if any. Note that all the contents of this file will be written
-by @acronym{Berkeley DB}, not GlusterFS.
-@item logdir <path>
-
-
-@end table
-@end cartouche
-
-@node Client and Server Translators, Clustering Translators, Storage Translators, Translators
-@section Client and Server Translators
-
-The client and server translator enable GlusterFS to export a
-translator tree over the network or access a remote GlusterFS
-server. These two translators implement GlusterFS's network protocol.
-
-@menu
-* Transport modules::
-* Client protocol::
-* Server protocol::
-@end menu
-
-@node Transport modules
-@subsection Transport modules
-The client and server translators are capable of using any of the
-pluggable transport modules. Currently available transport modules are
-@command{tcp}, which uses a @acronym{TCP} connection between client
-and server to communicate; @command{ib-sdp}, which uses a
-@acronym{TCP} connection over InfiniBand, and @command{ibverbs}, which
-uses high-speed InfiniBand connections.
-
-Each transport module comes in two different versions, one to be used on
-the server side and the other on the client side.
-
-@subsubsection TCP
-
-The @acronym{TCP} transport module uses a @acronym{TCP/IP} connection between
-the server and the client.
-
-@example
- option transport-type tcp
-@end example
-
-The @acronym{TCP} client module accepts the following options:
-
-@cartouche
-@table @code
-@item non-blocking-connect [no|off|on|yes] (on)
-Whether to make the connection attempt asynchronous.
-@item remote-port <n> (24007)
-Server port to connect to.
-@cindex DNS round robin
-@item remote-host <hostname> *
-Hostname or @acronym{IP} address of the server. If the host name resolves to
-multiple IP addresses, all of them will be tried in a round-robin fashion. This
-feature can be used to implement fail-over.
-@end table
-@end cartouche
-
-The @acronym{TCP} server module accepts the following options:
-
-@cartouche
-@table @code
-@item bind-address <address> (0.0.0.0)
-The local interface on which the server should listen to requests. Default is to
-listen on all interfaces.
-@item listen-port <n> (24007)
-The local port to listen on.
-@end table
-@end cartouche
-
-@subsubsection IB-SDP
-@example
- option transport-type ib-sdp
-@end example
-
-kernel implements socket interface for ib hardware. SDP is over ib-verbs.
-This module accepts the same options as @command{tcp}
-
-@subsubsection ibverbs
-
-@example
- option transport-type tcp
-@end example
-
-@cindex infiniband transport
-
-InfiniBand is a scalable switched fabric interconnect mechanism
-primarily used in high-performance computing. InfiniBand can deliver
-data throughput of the order of 10 Gbit/s, with latencies of 4-5 ms.
-
-The @command{ib-verbs} transport accesses the InfiniBand hardware through
-the ``verbs'' @acronym{API}, which is the lowest level of software access possible
-and which gives the highest performance. On InfiniBand hardware, it is always
-best to use @command{ib-verbs}. Use @command{ib-sdp} only if you cannot get
-@command{ib-verbs} working for some reason.
-
-The @command{ib-verbs} client module accepts the following options:
-
-@cartouche
-@table @code
-@item non-blocking-connect [no|off|on|yes] (on)
-Whether to make the connection attempt asynchronous.
-@item remote-port <n> (24007)
-Server port to connect to.
-@cindex DNS round robin
-@item remote-host <hostname> *
-Hostname or @acronym{IP} address of the server. If the host name resolves to
-multiple IP addresses, all of them will be tried in a round-robin fashion. This
-feature can be used to implement fail-over.
-@end table
-@end cartouche
-
-The @command{ib-verbs} server module accepts the following options:
-
-@cartouche
-@table @code
-@item bind-address <address> (0.0.0.0)
-The local interface on which the server should listen to requests. Default is to
-listen on all interfaces.
-@item listen-port <n> (24007)
-The local port to listen on.
-@end table
-@end cartouche
-
-The following options are common to both the client and server modules:
-
-If you are familiar with InfiniBand jargon,
-the mode is used by GlusterFS is ``reliable connection-oriented channel transfer''.
-
-@cartouche
-@table @code
-@item ib-verbs-work-request-send-count <n> (64)
-Length of the send queue in datagrams. [Reason to increase/decrease?]
-
-@item ib-verbs-work-request-recv-count <n> (64)
-Length of the receive queue in datagrams. [Reason to increase/decrease?]
-
-@item ib-verbs-work-request-send-size <size> (128KB)
-Size of each datagram that is sent. [Reason to increase/decrease?]
-
-@item ib-verbs-work-request-recv-size <size> (128KB)
-Size of each datagram that is received. [Reason to increase/decrease?]
-
-@item ib-verbs-port <n> (1)
-Port number for ib-verbs.
-
-@item ib-verbs-mtu [256|512|1024|2048|4096] (2048)
-The Maximum Transmission Unit [Reason to increase/decrease?]
-
-@item ib-verbs-device-name <device-name> (first device in the list)
-InfiniBand device to be used.
-@end table
-@end cartouche
-
-For maximum performance, you should ensure that the send/receive counts on both
-the client and server are the same.
-
-ib-verbs is preferred over ib-sdp.
-
-@node Client protocol
-@subsection Client
-@example
-type procotol/client
-@end example
-
-The client translator enables the GlusterFS client to access a remote server's
-translator tree.
-
-@cartouche
-@table @code
-
-@item transport-type [tcp,ib-sdp,ib-verbs] (tcp)
-The transport type to use. You should use the client versions of all the
-transport modules (@command{tcp}, @command{ib-sdp},
-@command{ib-verbs}).
-@item remote-subvolume <volume_name> *
-The name of the volume on the remote host to attach to. Note that
-this is @emph{not} the name of the @command{protocol/server} volume on the
-server. It should be any volume under the server.
-@item transport-timeout <n> (120- seconds)
-Inactivity timeout. If a reply is expected and no activity takes place
-on the connection within this time, the transport connection will be
-broken, and a new connection will be attempted.
-@end table
-@end cartouche
-
-@node Server protocol
-@subsection Server
-@example
-type protocol/server
-@end example
-
-The server translator exports a translator tree and makes it accessible to
-remote GlusterFS clients.
-
-@cartouche
-@table @code
-@item client-volume-filename <path> (<CONFDIR>/glusterfs-client.vol)
-The volume specification file to use for the client. This is the file the
-client will receive when it is invoked with the @command{--server} option
-(@ref{Client}).
-
-@item transport-type [tcp,ib-verbs,ib-sdp] (tcp)
-The transport to use. You should use the server versions of all the transport
-modules (@command{tcp}, @command{ib-sdp}, @command{ib-verbs}).
-
-@item auth.addr.<volume name>.allow <IP address wildcard pattern>
-IP addresses of the clients that are allowed to attach to the specified volume.
-This can be a wildcard. For example, a wildcard of the form @command{192.168.*.*}
-allows any host in the @command{192.168.x.x} subnet to connect to the server.
-
-@end table
-@end cartouche
-
-@node Clustering Translators
-@section Clustering Translators
-
-The clustering translators are the most important GlusterFS
-translators, since it is these that make GlusterFS a cluster
-filesystem. These translators together enable GlusterFS to access an
-arbitrarily large amount of storage, and provide @acronym{RAID}-like
-redundancy and distribution over the entire cluster.
-
-There are three clustering translators: @strong{unify}, @strong{replicate},
-and @strong{stripe}. The unify translator aggregates storage from
-many server nodes. The replicate translator provides file replication. The stripe
-translator allows a file to be spread across many server nodes. The following sections
-look at each of these translators in detail.
-
-@menu
-* Unify::
-* Replicate::
-* Stripe::
-@end menu
-
-@node Unify
-@subsection Unify
-@cindex unify (translator)
-@cindex scheduler (unify)
-@example
-type cluster/unify
-@end example
-
-The unify translator presents a `unified' view of all its sub-volumes. That is,
-it makes the union of all its sub-volumes appear as a single volume. It is the
-unify translator that gives GlusterFS the ability to access an arbitrarily
-large amount of storage.
-
-For unify to work correctly, certain invariants need to be maintained across
-the entire network. These are:
-
-@cindex unify invariants
-@itemize
-@item The directory structure of all the sub-volumes must be identical.
-@item A particular file can exist on only one of the sub-volumes. Phrasing it in another way, a pathname such as @command{/home/calvin/homework.txt}) is unique across the entire cluster.
-@end itemize
-
-@tex
-\vfill
-@end tex
-@page
-
-@center @image{unify,44pc,,,.pdf}
-
-Looking at the second requirement, you might wonder how one can
-accomplish storing redundant copies of a file, if no file can exist
-multiple times. To answer, we must remember that these invariants are
-from @emph{unify's perspective}. A translator such as replicate at a lower
-level in the translator tree than unify may subvert this picture.
-
-The first invariant might seem quite tedious to ensure. We shall see
-later that this is not so, since unify's @emph{self-heal} mechanism
-takes care of maintaining it.
-
-The second invariant implies that unify needs some way to decide which file goes where.
-Unify makes use of @emph{scheduler} modules for this purpose.
-
-When a file needs to be created, unify's scheduler decides upon the
-sub-volume to be used to store the file. There are many schedulers
-available, each using a different algorithm and suitable for different
-purposes.
-
-The various schedulers are described in detail in the sections that follow.
-
-@subsubsection ALU
-@cindex alu (scheduler)
-
-@example
- option scheduler alu
-@end example
-
-ALU stands for "Adaptive Least Usage". It is the most advanced
-scheduler available in GlusterFS. It balances the load across volumes
-taking several factors in account. It adapts itself to changing I/O
-patterns according to its configuration. When properly configured, it
-can eliminate the need for regular tuning of the filesystem to keep
-volume load nicely balanced.
-
-The ALU scheduler is composed of multiple least-usage
-sub-schedulers. Each sub-scheduler keeps track of a certain type of
-load, for each of the sub-volumes, getting statistics from
-the sub-volumes themselves. The sub-schedulers are these:
-
-@itemize
-@item disk-usage: The used and free disk space on the volume.
-
-@item read-usage: The amount of reading done from this volume.
-
-@item write-usage: The amount of writing done to this volume.
-
-@item open-files-usage: The number of files currently open from this volume.
-
-@item disk-speed-usage: The speed at which the disks are spinning. This is a constant value and therefore not very useful.
-@end itemize
-
-The ALU scheduler needs to know which of these sub-schedulers to use,
-and in which order to evaluate them. This is done through the
-@command{option alu.order} configuration directive.
-
-Each sub-scheduler needs to know two things: when to kick in (the
-entry-threshold), and how long to stay in control (the
-exit-threshold). For example: when unifying three disks of 100GB,
-keeping an exact balance of disk-usage is not necessary. Instead, there
-could be a 1GB margin, which can be used to nicely balance other
-factors, such as read-usage. The disk-usage scheduler can be told to
-kick in only when a certain threshold of discrepancy is passed, such
-as 1GB. When it assumes control under this condition, it will write
-all subsequent data to the least-used volume. If it is doing so, it is
-unwise to stop right after the values are below the entry-threshold
-again, since that would make it very likely that the situation will
-occur again very soon. Such a situation would cause the ALU to spend
-most of its time disk-usage scheduling, which is unfair to the other
-sub-schedulers. The exit-threshold therefore defines the amount of
-data that needs to be written to the least-used disk, before control
-is relinquished again.
-
-In addition to the sub-schedulers, the ALU scheduler also has "limits"
-options. These can stop the creation of new files on a volume once
-values drop below a certain threshold. For example, setting
-@command{option alu.limits.min-free-disk 5GB} will stop the scheduling
-of files to volumes that have less than 5GB of free disk space,
-leaving the files on that disk some room to grow.
-
-The actual values you assign to the thresholds for sub-schedulers and
-limits depend on your situation. If you have fast-growing files,
-you'll want to stop file-creation on a disk much earlier than when
-hardly any of your files are growing. If you care less about
-disk-usage balance than about read-usage balance, you'll want a bigger
-disk-usage scheduler entry-threshold and a smaller read-usage
-scheduler entry-threshold.
-
-For thresholds defining a size, values specifying "KB", "MB" and "GB"
-are allowed. For example: @command{option alu.limits.min-free-disk 5GB}.
-
-@cartouche
-@table @code
-@item alu.order <order> * ("disk-usage:write-usage:read-usage:open-files-usage:disk-speed")
-@item alu.disk-usage.entry-threshold <size> (1GB)
-@item alu.disk-usage.exit-threshold <size> (512MB)
-@item alu.write-usage.entry-threshold <%> (25)
-@item alu.write-usage.exit-threshold <%> (5)
-@item alu.read-usage.entry-threshold <%> (25)
-@item alu.read-usage.exit-threshold <%> (5)
-@item alu.open-files-usage.entry-threshold <n> (1000)
-@item alu.open-files-usage.exit-threshold <n> (100)
-@item alu.limits.min-free-disk <%>
-@item alu.limits.max-open-files <n>
-@end table
-@end cartouche
-
-@subsubsection Round Robin (RR)
-@cindex rr (scheduler)
-
-@example
- option scheduler rr
-@end example
-
-Round-Robin (RR) scheduler creates files in a round-robin
-fashion. Each client will have its own round-robin loop. When your
-files are mostly similar in size and I/O access pattern, this
-scheduler is a good choice. RR scheduler checks for free disk space
-on the server before scheduling, so you can know when to add
-another server node. The default value of min-free-disk is 5% and is
-checked on file creation calls, with atleast 10 seconds (by default)
-elapsing between two checks.
-
-Options:
-@cartouche
-@table @code
-@item rr.limits.min-free-disk <%> (5)
-Minimum free disk space a node must have for RR to schedule a file to it.
-@item rr.refresh-interval <t> (10 seconds)
-Time between two successive free disk space checks.
-@end table
-@end cartouche
-
-@subsubsection Random
-@cindex random (scheduler)
-
-@example
- option scheduler random
-@end example
-
-The random scheduler schedules file creation randomly among its child nodes.
-Like the round-robin scheduler, it also checks for a minimum amount of free disk
-space before scheduling a file to a node.
-
-@cartouche
-@table @code
-@item random.limits.min-free-disk <%> (5)
-Minimum free disk space a node must have for random to schedule a file to it.
-@item random.refresh-interval <t> (10 seconds)
-Time between two successive free disk space checks.
-@end table
-@end cartouche
-
-@subsubsection NUFA
-@cindex nufa (scheduler)
-
-@example
- option scheduler nufa
-@end example
-
-It is common in many GlusterFS computing environments for all deployed
-machines to act as both servers and clients. For example, a
-research lab may have 40 workstations each with its own storage. All
-of these workstations might act as servers exporting a volume as well
-as clients accessing the entire cluster's storage. In such a
-situation, it makes sense to store locally created files on the local
-workstation itself (assuming files are accessed most by the
-workstation that created them). The Non-Uniform File Allocation (@acronym{NUFA})
-scheduler accomplishes that.
-
-@acronym{NUFA} gives the local system first priority for file creation
-over other nodes. If the local volume does not have more free disk space
-than a specified amount (5% by default) then @acronym{NUFA} schedules files
-among the other child volumes in a round-robin fashion.
-
-@acronym{NUFA} is named after the similar strategy used for memory access,
-@acronym{NUMA}@footnote{Non-Uniform Memory Access:
-@indicateurl{http://en.wikipedia.org/wiki/Non-Uniform_Memory_Access}}.
-
-@cartouche
-@table @code
-@item nufa.limits.min-free-disk <%> (5)
-Minimum disk space that must be free (local or remote) for @acronym{NUFA} to schedule a
-file to it.
-@item nufa.refresh-interval <t> (10 seconds)
-Time between two successive free disk space checks.
-@item nufa.local-volume-name <volume>
-The name of the volume corresponding to the local system. This volume must be
-one of the children of the unify volume. This option is mandatory.
-@end table
-@end cartouche
-
-@cindex namespace
-@subsubsection Namespace
-Namespace volume needed because:
- - persistent inode numbers.
- - file exists even when node is down.
-
-namespace files are simply touched. on every lookup it is checked.
-
-@cartouche
-@table @code
-@item namespace <volume> *
-Name of the namespace volume (which should be one of the unify volume's children).
-@item self-heal [on|off] (on)
-Enable/disable self-heal. Unless you know what you are doing, do not disable self-heal.
-@end table
-@end cartouche
-
-@cindex self heal (unify)
-@subsubsection Self Heal
- * When a 'lookup()/stat()' call is made on directory for the first
-time, a self-heal call is made, which checks for the consistancy of
-its child nodes. If an entry is present in storage node, but not in
-namespace, that entry is created in namespace, and vica-versa. There
-is an writedir() API introduced which is used for the same. It also
-checks for permissions, and uid/gid consistencies.
-
- * This check is also done when an server goes down and comes up.
-
- * If one starts with an empty namespace export, but has data in
-storage nodes, a 'find .>/dev/null' or 'ls -lR >/dev/null' should help
-to build namespace in one shot. Even otherwise, namespace is built on
-demand when a file is looked up for the first time.
-
-NOTE: There are some issues (Kernel 'Oops' msgs) seen with fuse-2.6.3,
-when someone deletes namespace in backend, when glusterfs is
-running. But with fuse-2.6.5, this issue is not there.
-
-@node Replicate
-@subsection Replicate (formerly AFR)
-@cindex Replicate
-@example
-type cluster/replicate
-@end example
-
-Replicate provides @acronym{RAID}-1 like functionality for
-GlusterFS. Replicate replicates files and directories across the
-subvolumes. Hence if Replicate has four subvolumes, there will be
-four copies of all files and directories. Replicate provides
-high-availability, i.e., in case one of the subvolumes go down
-(e. g. server crash, network disconnection) Replicate will still
-service the requests using the redundant copies.
-
-Replicate also provides self-heal functionality, i.e., in case the
-crashed servers come up, the outdated files and directories will be
-updated with the latest versions. Replicate uses extended
-attributes of the backend file system to track the versioning of files
-and directories and provide the self-heal feature.
-
-@example
-volume replicate-example
- type cluster/replicate
- subvolumes brick1 brick2 brick3
-end-volume
-@end example
-
-This sample configuration will replicate all directories and files on
-brick1, brick2 and brick3.
-
-All the read operations happen from the first alive child. If all the
-three sub-volumes are up, reads will be done from brick1; if brick1 is
-down read will be done from brick2. In case read() was being done on
-brick1 and it goes down, replicate transparently falls back to
-brick2.
-
-The next release of GlusterFS will add the following features:
-@itemize
-@item Ability to specify the sub-volume from which read operations are to be done (this will help users who have one of the sub-volumes as a local storage volume).
-@item Allow scheduling of read operations amongst the sub-volumes in a round-robin fashion.
-@end itemize
-
-The order of the subvolumes list should be same across all the 'replicate's as
-they will be used for locking purposes.
-
-@cindex self heal (replicate)
-@subsubsection Self Heal
-Replicate has self-heal feature, which updates the outdated file and
-directory copies by the most recent versions. For example consider the
-following config:
-
-@example
-volume replicate-example
- type cluster/replicate
- subvolumes brick1 brick2
-end-volume
-@end example
-
-@subsubsection File self-heal
-
-Now if we create a file foo.txt on replicate-example, the file will be created
-on brick1 and brick2. The file will have two extended attributes associated
-with it in the backend filesystem. One is trusted.afr.createtime and the
-other is trusted.afr.version. The trusted.afr.createtime xattr has the
-create time (in terms of seconds since epoch) and trusted.afr.version
-is a number that is incremented each time a file is modified. This increment
-happens during close (incase any write was done before close).
-
-If brick1 goes down, we edit foo.txt the version gets incremented. Now
-the brick1 comes back up, when we open() on foo.txt replicate will check if
-their versions are same. If they are not same, the outdated copy is
-replaced by the latest copy and its version is updated. After the sync
-the open() proceeds in the usual manner and the application calling open()
-can continue on its access to the file.
-
-If brick1 goes down, we delete foo.txt and create a file with the same
-name again i.e foo.txt. Now brick1 comes back up, clearly there is a
-chance that the version on brick1 being more than the version on brick2,
-this is where createtime extended attribute helps in deciding which
-the outdated copy is. Hence we need to consider both createtime and
-version to decide on the latest copy.
-
-The version attribute is incremented during the close() call. Version
-will not be incremented in case there was no write() done. In case the
-fd that the close() gets was got by create() call, we also create
-the createtime extended attribute.
-
-@subsubsection Directory self-heal
-
-Suppose brick1 goes down, we delete foo.txt, brick1 comes back up, now
-we should not create foo.txt on brick2 but we should delete foo.txt
-on brick1. We handle this situation by having the createtime and version
-attribute on the directory similar to the file. when lookup() is done
-on the directory, we compare the createtime/version attributes of the
-copies and see which files needs to be deleted and delete those files
-and update the extended attributes of the outdated directory copy.
-Each time a directory is modified (a file or a subdirectory is created
-or deleted inside the directory) and one of the subvols is down, we
-increment the directory's version.
-
-lookup() is a call initiated by the kernel on a file or directory
-just before any access to that file or directory. In glusterfs, by
-default, lookup() will not be called in case it was called in the
-past one second on that particular file or directory.
-
-The extended attributes can be seen in the backend filesystem using
-the @command{getfattr} command. (@command{getfattr -n trusted.afr.version <file>})
-
-@cartouche
-@table @code
-@item debug [on|off] (off)
-@item self-heal [on|off] (on)
-@item replicate <pattern> (*:1)
-@item lock-node <child_volume> (first child is used by default)
-@end table
-@end cartouche
-
-@node Stripe
-@subsection Stripe
-@cindex stripe (translator)
-@example
-type cluster/stripe
-@end example
-
-The stripe translator distributes the contents of a file over its
-sub-volumes. It does this by creating a file equal in size to the
-total size of the file on each of its sub-volumes. It then writes only
-a part of the file to each sub-volume, leaving the rest of it empty.
-These empty regions are called `holes' in Unix terminology. The holes
-do not consume any disk space.
-
-The diagram below makes this clear.
-
-@center @image{stripe,44pc,,,.pdf}
-
-You can configure stripe so that only filenames matching a pattern
-are striped. You can also configure the size of the data to be stored
-on each sub-volume.
-
-@cartouche
-@table @code
-@item block-size <pattern>:<size> (*:0 no striping)
-Distribute files matching @command{<pattern>} over the sub-volumes,
-storing at least @command{<size>} on each sub-volume. For example,
-
-@example
- option block-size *.mpg:1M
-@end example
-
-distributes all files ending in @command{.mpg}, storing at least 1 MB on
-each sub-volume.
-
-Any number of @command{block-size} option lines may be present, specifying
-different sizes for different file name patterns.
-@end table
-@end cartouche
-
-@node Performance Translators
-@section Performance Translators
-
-@menu
-* Read Ahead::
-* Write Behind::
-* IO Threads::
-* IO Cache::
-* Booster::
-@end menu
-
-@node Read Ahead
-@subsection Read Ahead
-@cindex read-ahead (translator)
-@example
-type performance/read-ahead
-@end example
-
-The read-ahead translator pre-fetches data in advance on every read.
-This benefits applications that mostly process files in sequential order,
-since the next block of data will already be available by the time the
-application is done with the current one.
-
-Additionally, the read-ahead translator also behaves as a read-aggregator.
-Many small read operations are combined and issued as fewer, larger read
-requests to the server.
-
-Read-ahead deals in ``pages'' as the unit of data fetched. The page size
-is configurable, as is the ``page count'', which is the number of pages
-that are pre-fetched.
-
-Read-ahead is best used with InfiniBand (using the ib-verbs transport).
-On FastEthernet and Gigabit Ethernet networks,
-GlusterFS can achieve the link-maximum throughput even without
-read-ahead, making it quite superflous.
-
-Note that read-ahead only happens if the reads are perfectly
-sequential. If your application accesses data in a random fashion,
-using read-ahead might actually lead to a performance loss, since
-read-ahead will pointlessly fetch pages which won't be used by the
-application.
-
-@cartouche
-Options:
-@table @code
-@item page-size <n> (256KB)
-The unit of data that is pre-fetched.
-@item page-count <n> (2)
-The number of pages that are pre-fetched.
-@item force-atime-update [on|off|yes|no] (off|no)
-Whether to force an access time (atime) update on the file on every read. Without
-this, the atime will be slightly imprecise, as it will reflect the time when
-the read-ahead translator read the data, not when the application actually read it.
-@end table
-@end cartouche
-
-@node Write Behind
-@subsection Write Behind
-@cindex write-behind (translator)
-@example
-type performance/write-behind
-@end example
-
-The write-behind translator improves the latency of a write operation.
-It does this by relegating the write operation to the background and
-returning to the application even as the write is in progress. Using the
-write-behind translator, successive write requests can be pipelined.
-This mode of write-behind operation is best used on the client side, to
-enable decreased write latency for the application.
-
-The write-behind translator can also aggregate write requests. If the
-@command{aggregate-size} option is specified, then successive writes up to that
-size are accumulated and written in a single operation. This mode of operation
-is best used on the server side, as this will decrease the disk's head movement
-when multiple files are being written to in parallel.
-
-The @command{aggregate-size} option has a default value of 128KB. Although
-this works well for most users, you should always experiment with different values
-to determine the one that will deliver maximum performance. This is because the
-performance of write-behind depends on your interconnect, size of RAM, and the
-work load.
-
-@cartouche
-@table @code
-@item aggregate-size <n> (128KB)
-Amount of data to accumulate before doing a write
-@item flush-behind [on|yes|off|no] (off|no)
-
-@end table
-@end cartouche
-
-@node IO Threads
-@subsection IO Threads
-@cindex io-threads (translator)
-@example
-type performance/io-threads
-@end example
-
-The IO threads translator is intended to increase the responsiveness
-of the server to metadata operations by doing file I/O (read, write)
-in a background thread. Since the GlusterFS server is
-single-threaded, using the IO threads translator can significantly
-improve performance. This translator is best used on the server side,
-loaded just below the server protocol translator.
-
-IO threads operates by handing out read and write requests to a separate thread.
-The total number of threads in existence at a time is constant, and configurable.
-
-@cartouche
-@table @code
-@item thread-count <n> (1)
-Number of threads to use.
-@end table
-@end cartouche
-
-@node IO Cache
-@subsection IO Cache
-@cindex io-cache (translator)
-@example
-type performance/io-cache
-@end example
-
-The IO cache translator caches data that has been read. This is useful
-if many applications read the same data multiple times, and if reads
-are much more frequent than writes (for example, IO caching may be
-useful in a web hosting environment, where most clients will simply
-read some files and only a few will write to them).
-
-The IO cache translator reads data from its child in @command{page-size} chunks.
-It caches data up to @command{cache-size} bytes. The cache is maintained as
-a prioritized least-recently-used (@acronym{LRU}) list, with priorities determined
-by user-specified patterns to match filenames.
-
-When the IO cache translator detects a write operation, the
-cache for that file is flushed.
-
-The IO cache translator periodically verifies the consistency of
-cached data, using the modification times on the files. The verification timeout
-is configurable.
-
-@cartouche
-@table @code
-@item page-size <n> (128KB)
-Size of a page.
-@item cache-size (n) (32MB)
-Total amount of data to be cached.
-@item force-revalidate-timeout <n> (1)
-Timeout to force a cache consistency verification, in seconds.
-@item priority <pattern> (*:0)
-Filename patterns listed in order of priority.
-@end table
-@end cartouche
-
-@node Booster
-@subsection Booster
-@cindex booster
-@example
- type performance/booster
-@end example
-
-The booster translator gives applications a faster path to communicate
-read and write requests to GlusterFS. Normally, all requests to GlusterFS from
-applications go through FUSE, as indicated in @ref{Filesystems in Userspace}.
-Using the booster translator in conjunction with the GlusterFS booster shared
-library, an application can bypass the FUSE path and send read/write requests
-directly to the GlusterFS client process.
-
-The booster mechanism consists of two parts: the booster translator,
-and the booster shared library. The booster translator is meant to be
-loaded on the client side, usually at the root of the translator tree.
-The booster shared library should be @command{LD_PRELOAD}ed with the
-application.
-
-The booster translator when loaded opens a Unix domain socket and
-listens for read/write requests on it. The booster shared library
-intercepts read and write system calls and sends the requests to the
-GlusterFS process directly using the Unix domain socket, bypassing FUSE.
-This leads to superior performance.
-
-Once you've loaded the booster translator in your volume specification file, you
-can start your application as:
-
-@example
- $ LD_PRELOAD=/usr/local/bin/glusterfs-booster.so your_app
-@end example
-
-The booster translator accepts no options.
-
-@node Features Translators
-@section Features Translators
-
-@menu
-* POSIX Locks::
-* Fixed ID::
-@end menu
-
-@node POSIX Locks
-@subsection POSIX Locks
-@cindex record locking
-@cindex fcntl
-@cindex posix-locks (translator)
-@example
-type features/posix-locks
-@end example
-
-This translator provides storage independent POSIX record locking
-support (@command{fcntl} locking). Typically you'll want to load this on the
-server side, just above the @acronym{POSIX} storage translator. Using this
-translator you can get both advisory locking and mandatory locking
-support. It also handles @command{flock()} locks properly.
-
-Caveat: Consider a file that does not have its mandatory locking bits
-(+setgid, -group execution) turned on. Assume that this file is now
-opened by a process on a client that has the write-behind xlator
-loaded. The write-behind xlator does not cache anything for files
-which have mandatory locking enabled, to avoid incoherence. Let's say
-that mandatory locking is now enabled on this file through another
-client. The former client will not know about this change, and
-write-behind may erroneously report a write as being successful when
-in fact it would fail due to the region it is writing to being locked.
-
-There seems to be no easy way to fix this. To work around this
-problem, it is recommended that you never enable the mandatory bits on
-a file while it is open.
-
-@cartouche
-@table @code
-@item mandatory [on|off] (on)
-Turns mandatory locking on.
-@end table
-@end cartouche
-
-@node Fixed ID
-@subsection Fixed ID
-@cindex fixed-id (translator)
-@example
-type features/fixed-id
-@end example
-
-The fixed ID translator makes all filesystem requests from the client
-to appear to be coming from a fixed, specified
-@acronym{UID}/@acronym{GID}, regardless of which user actually
-initiated the request.
-
-@cartouche
-@table @code
-@item fixed-uid <n> [if not set, not used]
-The @acronym{UID} to send to the server
-@item fixed-gid <n> [if not set, not used]
-The @acronym{GID} to send to the server
-@end table
-@end cartouche
-
-@node Miscellaneous Translators
-@section Miscellaneous Translators
-
-@menu
-* ROT-13::
-* Trace::
-@end menu
-
-@node ROT-13
-@subsection ROT-13
-@cindex rot-13 (translator)
-@example
-type encryption/rot-13
-@end example
-
-@acronym{ROT-13} is a toy translator that can ``encrypt'' and ``decrypt'' file
-contents using the @acronym{ROT-13} algorithm. @acronym{ROT-13} is a trivial
-algorithm that rotates each alphabet by thirteen places. Thus, 'A' becomes 'N',
-'B' becomes 'O', and 'Z' becomes 'M'.
-
-It goes without saying that you shouldn't use this translator if you need
-@emph{real} encryption (a future release of GlusterFS will have real encryption
-translators).
-
-@cartouche
-@table @code
-@item encrypt-write [on|off] (on)
-Whether to encrypt on write
-@item decrypt-read [on|off] (on)
-Whether to decrypt on read
-@end table
-@end cartouche
-
-@node Trace
-@subsection Trace
-@cindex trace (translator)
-@example
-type debug/trace
-@end example
-
-The trace translator is intended for debugging purposes. When loaded, it
-logs all the system calls received by the server or client (wherever
-trace is loaded), their arguments, and the results. You must use a GlusterFS log
-level of DEBUG (See @ref{Running GlusterFS}) for trace to work.
-
-Sample trace output (lines have been wrapped for readability):
-@cartouche
-@example
-2007-10-30 00:08:58 D [trace.c:1579:trace_opendir] trace: callid: 68
-(*this=0x8059e40, loc=0x8091984 @{path=/iozone3_283, inode=0x8091f00@},
- fd=0x8091d50)
-
-2007-10-30 00:08:58 D [trace.c:630:trace_opendir_cbk] trace:
-(*this=0x8059e40, op_ret=4, op_errno=1, fd=0x8091d50)
-
-2007-10-30 00:08:58 D [trace.c:1602:trace_readdir] trace: callid: 69
-(*this=0x8059e40, size=4096, offset=0 fd=0x8091d50)
-
-2007-10-30 00:08:58 D [trace.c:215:trace_readdir_cbk] trace:
-(*this=0x8059e40, op_ret=0, op_errno=0, count=4)
-
-2007-10-30 00:08:58 D [trace.c:1624:trace_closedir] trace: callid: 71
-(*this=0x8059e40, *fd=0x8091d50)
-
-2007-10-30 00:08:58 D [trace.c:809:trace_closedir_cbk] trace:
-(*this=0x8059e40, op_ret=0, op_errno=1)
-@end example
-@end cartouche
-
-@node Usage Scenarios
-@chapter Usage Scenarios
-
-@section Advanced Striping
-
-This section is based on the Advanced Striping tutorial written by
-Anand Avati on the GlusterFS wiki
-@footnote{http://gluster.org/docs/index.php/Mixing_Striped_and_Regular_Files}.
-
-@subsection Mixed Storage Requirements
-
-There are two ways of scheduling the I/O. One at file level (using
-unify translator) and other at block level (using stripe
-translator). Striped I/O is good for files that are potentially large
-and require high parallel throughput (for example, a single file of
-400GB being accessed by 100s and 1000s of systems simultaneously and
-randomly). For most of the cases, file level scheduling works best.
-
-In the real world, it is desirable to mix file level and block level
-scheduling on a single storage volume. Alternatively users can choose
-to have two separate volumes and hence two mount points, but the
-applications may demand a single storage system to host both.
-
-This document explains how to mix file level scheduling with stripe.
-
-@subsection Configuration Brief
-
-This setup demonstrates how users can configure unify translator with
-appropriate I/O scheduler for file level scheduling and strip for only
-matching patterns. This way, GlusterFS chooses appropriate I/O profile
-and knows how to efficiently handle both the types of data.
-
-A simple technique to achieve this effect is to create a stripe set of
-unify and stripe blocks, where unify is the first sub-volume. Files
-that do not match the stripe policy passed on to first unify
-sub-volume and inturn scheduled arcoss the cluster using its file
-level I/O scheduler.
-
-@image{advanced-stripe,44pc,,,.pdf}
-
-@subsection Preparing GlusterFS Envoronment
-
-Create the directories /export/namespace, /export/unify and
-/export/stripe on all the storage bricks.
-
- Place the following server and client volume spec file under
-/etc/glusterfs (or appropriate installed path) and replace the IP
-addresses / access control fields to match your environment.
-
-@cartouche
-@example
- ## file: /etc/glusterfs/glusterfsd.vol
- volume posix-unify
- type storage/posix
- option directory /export/for-unify
- end-volume
-
- volume posix-stripe
- type storage/posix
- option directory /export/for-stripe
- end-volume
-
- volume posix-namespace
- type storage/posix
- option directory /export/for-namespace
- end-volume
-
- volume server
- type protocol/server
- option transport-type tcp
- option auth.addr.posix-unify.allow 192.168.1.*
- option auth.addr.posix-stripe.allow 192.168.1.*
- option auth.addr.posix-namespace.allow 192.168.1.*
- subvolumes posix-unify posix-stripe posix-namespace
- end-volume
-@end example
-@end cartouche
-
-@cartouche
-@example
- ## file: /etc/glusterfs/glusterfs.vol
- volume client-namespace
- type protocol/client
- option transport-type tcp
- option remote-host 192.168.1.1
- option remote-subvolume posix-namespace
- end-volume
-
- volume client-unify-1
- type protocol/client
- option transport-type tcp
- option remote-host 192.168.1.1
- option remote-subvolume posix-unify
- end-volume
-
- volume client-unify-2
- type protocol/client
- option transport-type tcp
- option remote-host 192.168.1.2
- option remote-subvolume posix-unify
- end-volume
-
- volume client-unify-3
- type protocol/client
- option transport-type tcp
- option remote-host 192.168.1.3
- option remote-subvolume posix-unify
- end-volume
-
- volume client-unify-4
- type protocol/client
- option transport-type tcp
- option remote-host 192.168.1.4
- option remote-subvolume posix-unify
- end-volume
-
- volume client-stripe-1
- type protocol/client
- option transport-type tcp
- option remote-host 192.168.1.1
- option remote-subvolume posix-stripe
- end-volume
-
- volume client-stripe-2
- type protocol/client
- option transport-type tcp
- option remote-host 192.168.1.2
- option remote-subvolume posix-stripe
- end-volume
-
- volume client-stripe-3
- type protocol/client
- option transport-type tcp
- option remote-host 192.168.1.3
- option remote-subvolume posix-stripe
- end-volume
-
- volume client-stripe-4
- type protocol/client
- option transport-type tcp
- option remote-host 192.168.1.4
- option remote-subvolume posix-stripe
- end-volume
-
- volume unify
- type cluster/unify
- option scheduler rr
- subvolumes cluster-unify-1 cluster-unify-2 cluster-unify-3 cluster-unify-4
- end-volume
-
- volume stripe
- type cluster/stripe
- option block-size *.img:2MB # All files ending with .img are striped with 2MB stripe block size.
- subvolumes unify cluster-stripe-1 cluster-stripe-2 cluster-stripe-3 cluster-stripe-4
- end-volume
-@end example
-@end cartouche
-
-
-Bring up the Storage
-
-Starting GlusterFS Server: If you have installed through binary
-package, you can start the service through init.d startup script. If
-not:
-
-@example
-[root@@server]# glusterfsd
-@end example
-
-Mounting GlusterFS Volumes:
-
-@example
-[root@@client]# glusterfs -s [BRICK-IP-ADDRESS] /mnt/cluster
-@end example
-
-Improving upon this Setup
-
-Infiniband Verbs RDMA transport is much faster than TCP/IP GigE
-transport.
-
-Use of performance translators such as read-ahead, write-behind,
-io-cache, io-threads, booster is recommended.
-
-Replace round-robin (rr) scheduler with ALU to handle more dynamic
-storage environments.
-
-@node Troubleshooting
-@chapter Troubleshooting
-
-This chapter is a general troubleshooting guide to GlusterFS. It lists
-common GlusterFS server and client error messages, debugging hints, and
-concludes with the suggested procedure to report bugs in GlusterFS.
-
-@section GlusterFS error messages
-
-@subsection Server errors
-
-@example
-glusterfsd: FATAL: could not open specfile:
-'/etc/glusterfs/glusterfsd.vol'
-@end example
-
-The GlusterFS server expects the volume specification file to be
-at @command{/etc/glusterfs/glusterfsd.vol}. The example
-specification file will be installed as
-@command{/etc/glusterfs/glusterfsd.vol.sample}. You need to edit
-it and rename it, or provide a different specification file using
-the @command{--spec-file} command line option (See @ref{Server}).
-
-@vskip 4ex
-
-@example
-gf_log_init: failed to open logfile "/usr/var/log/glusterfs/glusterfsd.log"
- (Permission denied)
-@end example
-
-You don't have permission to create files in the
-@command{/usr/var/log/glusterfs} directory. Make sure you are running
-GlusterFS as root. Alternatively, specify a different path for the log
-file using the @command{--log-file} option (See @ref{Server}).
-
-@subsection Client errors
-
-@example
-fusermount: failed to access mountpoint /mnt:
- Transport endpoint is not connected
-@end example
-
-A previous failed (or hung) mount of GlusterFS is preventing it from being
-mounted again in the same location. The fix is to do:
-
-@example
-# umount /mnt
-@end example
-
-and try mounting again.
-
-@vskip 4ex
-
-@strong{``Transport endpoint is not connected''.}
-
-If you get this error when you try a command such as @command{ls} or @command{cat},
-it means the GlusterFS mount did not succeed. Try running GlusterFS in @command{DEBUG}
-logging level and study the log messages to discover the cause.
-
-@vskip 4ex
-
-@strong{``Connect to server failed'', ``SERVER-ADDRESS: Connection refused''.}
-
-GluserFS Server is not running or dead. Check your network
-connections and firewall settings. To check if the server is reachable,
-try:
-
-@example
-telnet IP-ADDRESS 24007
-@end example
-
-If the server is accessible, your `telnet' command should connect and
-block. If not you will see an error message such as @command{telnet: Unable to
-connect to remote host: Connection refused}. 24007 is the default
-GlusterFS port. If you have changed it, then use the corresponding
-port instead.
-
-@vskip 4ex
-
-@example
-gf_log_init: failed to open logfile "/usr/var/log/glusterfs/glusterfs.log"
- (Permission denied)
-@end example
-
-You don't have permission to create files in the
-@command{/usr/var/log/glusterfs} directory. Make sure you are running
-GlusterFS as root. Alternatively, specify a different path for the log
-file using the @command{--log-file} option (See @ref{Client}).
-
-@section FUSE error messages
-@command{modprobe fuse} fails with: ``Unknown symbol in module, or unknown parameter''.
-@cindex Redhat Enterprise Linux
-
-If you are using fuse-2.6.x on Redhat Enterprise Linux Work Station 4
-and Advanced Server 4 with 2.6.9-42.ELlargesmp, 2.6.9-42.ELsmp,
-2.6.9-42.EL kernels and get this error while loading @acronym{FUSE} kernel
-module, you need to apply the following patch.
-
-For fuse-2.6.2:
-
-@indicateurl{http://ftp.gluster.com/pub/gluster/glusterfs/fuse/fuse-2.6.2-rhel-build.patch}
-
-For fuse-2.6.3:
-
-@indicateurl{http://ftp.gluster.com/pub/gluster/glusterfs/fuse/fuse-2.6.3-rhel-build.patch}
-
-@section AppArmour and GlusterFS
-@cindex AppArmour
-@cindex OpenSuSE
-Under OpenSuSE GNU/Linux, the AppArmour security feature does not
-allow GlusterFS to create temporary files or network socket
-connections even while running as root. You will see error messages
-like `Unable to open log file: Operation not permitted' or `Connection
-refused'. Disabling AppArmour using YaST or properly configuring
-AppArmour to recognize @command{glusterfsd} or @command{glusterfs}/@command{fusermount}
-should solve the problem.
-
-@section Reporting a bug
-
-If you encounter a bug in GlusterFS, please follow the below
-guidelines when you report it to the mailing list. Be sure to report
-it! User feedback is crucial to the health of the project and we value
-it highly.
-
-@subsection General instructions
-
-When running GlusterFS in a non-production environment, be sure to
-build it with the following command:
-
-@example
- $ make CFLAGS='-g -O0 -DDEBUG'
-@end example
-
-This includes debugging information which will be helpful in getting
-backtraces (see below) and also disable optimization. Enabling
-optimization can result in incorrect line numbers being reported to
-gdb.
-
-@subsection Volume specification files
-
-Attach all relevant server and client spec files you were using when
-you encountered the bug. Also tell us details of your setup, i.e., how
-many clients and how many servers.
-
-@subsection Log files
-
-Set the loglevel of your client and server programs to @acronym{DEBUG} (by
-passing the -L @acronym{DEBUG} option) and attach the log files with your bug
-report. Obviously, if only the client is failing (for example), you
-only need to send us the client log file.
-
-@subsection Backtrace
-
-If GlusterFS has encountered a segmentation fault or has crashed for
-some other reason, include the backtrace with the bug report. You can
-get the backtrace using the following procedure.
-
-Run the GlusterFS client or server inside gdb.
-
-@example
- $ gdb ./glusterfs
- (gdb) set args -f client.spec -N -l/path/to/log/file -LDEBUG /mnt/point
- (gdb) run
-@end example
-
-Now when the process segfaults, you can get the backtrace by typing:
-
-@example
- (gdb) bt
-@end example
-
-If the GlusterFS process has crashed and dumped a core file (you can
-find this in / if running as a daemon and in the current directory
-otherwise), you can do:
-
-@example
- $ gdb /path/to/glusterfs /path/to/core.<pid>
-@end example
-
-and then get the backtrace.
-
-If the GlusterFS server or client seems to be hung, then you can get
-the backtrace by attaching gdb to the process. First get the @command{PID} of
-the process (using ps), and then do:
-
-@example
- $ gdb ./glusterfs <pid>
-@end example
-
-Press Ctrl-C to interrupt the process and then generate the backtrace.
-
-@subsection Reproducing the bug
-
-If the bug is reproducible, please include the steps necessary to do
-so. If the bug is not reproducible, send us the bug report anyway.
-
-@subsection Other information
-
-If you think it is relevant, send us also the version of @acronym{FUSE} you're
-using, the kernel version, platform.
-
-@node GNU Free Documentation Licence
-@appendix GNU Free Documentation Licence
-@include fdl.texi
-
-@node Index
-@unnumbered Index
-@printindex cp
-
-@bye
diff --git a/doc/legacy/xlator.odg b/doc/legacy/xlator.odg
deleted file mode 100644
index 179a65f6e26..00000000000
--- a/doc/legacy/xlator.odg
+++ /dev/null
Binary files differ
diff --git a/doc/legacy/xlator.pdf b/doc/legacy/xlator.pdf
deleted file mode 100644
index a07e14d67d2..00000000000
--- a/doc/legacy/xlator.pdf
+++ /dev/null
Binary files differ
diff --git a/doc/tools/gfind_missing_files.md b/doc/tools/gfind_missing_files.md
deleted file mode 100644
index 47241be5ac6..00000000000
--- a/doc/tools/gfind_missing_files.md
+++ /dev/null
@@ -1,67 +0,0 @@
-Introduction
-========
-The tool gfind_missing_files.sh can be used to find the missing files in a
-GlusterFS geo-replicated slave volume. The tool uses a multi-threaded crawler
-operating on the backend .glusterfs of a brickpath which is passed as one of
-the parameters to the tool. It does a stat on each entry in the slave volume
-mount to check for the presence of a file. The tool uses the aux-gfid-mount
-thereby avoiding path conversions and potentially saving time.
-
-This tool should be run on every node and each brickpath in a geo-replicated
-master volume to find the missing files on the slave volume.
-
-The script gfind_missing_files.sh is a wrapper script that in turn uses the
-gcrawler binary to do the backend crawling. The script detects the gfids of
-the missing files and runs the gfid-to-path conversion script to list out the
-missing files with their full pathnames.
-
-Usage
-=====
-```sh
-$bash gfind_missing_files.sh <BRICK_PATH> <SLAVE_HOST> <SLAVE_VOL> <OUTFILE>
- BRICK_PATH - Full path of the brick
- SLAVE_HOST - Hostname of gluster volume
- SLAVE_VOL - Gluster volume name
- OUTFILE - Output file which contains gfids of the missing files
-```
-
-The gfid-to-path conversion uses a quicker algorithm for converting gfids to
-paths and it is possible that in some cases all missing gfids may not be
-converted to their respective paths.
-
-Example output(126733 missing files)
-===================================
-```sh
-$ionice -c 2 -n 7 ./gfind_missing_files.sh /bricks/m3 acdc slave-vol ~/test_results/m3-4.txt
-Calling crawler...
-Crawl Complete.
-gfids of skipped files are available in the file /root/test_results/m3-4.txt
-Starting gfid to path conversion
-Path names of skipped files are available in the file /root/test_results/m3-4.txt_pathnames
-WARNING: Unable to convert some GFIDs to Paths, GFIDs logged to /root/test_results/m3-4.txt_gfids
-Use bash gfid_to_path.sh <brick-path> /root/test_results/m3-4.txt_gfids to convert those GFIDs to Path
-Total Missing File Count : 126733
-```
-In such cases, an additional step is needed to convert those gfids to paths.
-This can be used as shown below:
-```sh
- $bash gfid_to_path.sh <BRICK_PATH> <GFID_FILE>
- BRICK_PATH - Full path of the brick.
- GFID_FILE - OUTFILE_gfids got from gfind_missing_files.sh
-```
-Things to keep in mind when running the tool
-============================================
-1. Running this tool can result in a crawl of the backend filesystem at each
- brick which can be intensive. To ensure there is no impact on ongoing I/O on
- RHS volumes, we recommend that this tool be run at a low I/O scheduling class
- (best-effort) and priority.
-```sh
-$ionice -c 2 -p <pid of gfind_missing_files.sh>
-```
-
-2. We do not recommend interrupting the tool when it is running
- (e.g. by doing CTRL^C). It is better to wait for the tool to finish
- execution. In case it is interupted, manually unmount the Slave Volume.
-```sh
- umount <MOUNT_POINT>
-```
diff --git a/doc/tools/glusterfind.md b/doc/tools/glusterfind.md
deleted file mode 100644
index b70c229ddf4..00000000000
--- a/doc/tools/glusterfind.md
+++ /dev/null
@@ -1,148 +0,0 @@
-# glusterfind - A tool to find Modified files/dirs
-
-A tool which helps to get full/incremental list of files/dirs from GlusterFS Volume using Changelog/Find. In Gluster volumes, detecting the modified files is challenging. Readdir on a directory leads to multiple network calls since files in a directory are distributed accross nodes.
-
-This tool should be run in one of the node, which will get Volume info and gets the list of nodes and brick paths. For each brick, it spawns the process and runs crawler command in respective node. Crawler will be run in brick FS(xfs, ext4 etc) and not in Gluster Mount. Crawler generates output file with the list of files modified after last run or after the session creation.
-
-## Session Management
-
-Create a glusterfind session to remember the time when last sync or processing complete. For example, your backup application runs every day and gets incremental results on each run. The tool maintains session in `$GLUSTERD_WORKDIR/glusterfind/`, for each session it creates and directory and creates a sub directory with Volume name. (Default working directory is /var/lib/glusterd, in some systems this location may change. To find Working dir location run `grep working-directory /etc/glusterfs/glusterd.vol` or `grep working-directory /usr/local/etc/glusterfs/glusterd.vol` if source install)
-
-For example, if the session name is "backup" and volume name is "datavol", then the tool creates `$GLUSTERD_WORKDIR/glusterfind/backup/datavol`. Now onwards we refer this directory as `$SESSION_DIR`.
-
- create => pre => post => [delete]
-
-Once the session is created, we can run the tool with two steps Pre and Post. To collect the list of modified files after the create time or last run time, we need to call pre command. Pre command finds the modified files and generates output file. Consumer can check the exit code of pre command and start processing those files. As a post processing step run the post command to update the session time as per latest run.
-
-For example, backup utility runs Pre command and gets the list of files/directories changed. Sync those files to backup target and inform to glusterfind by calling Post command.
-
-At the end of Pre command, `$SESSION_DIR/status.pre` status file will get created. Pre status file stores the time when current crawl is started, and get all the files/dirs modified till that time. Once Post is called, `$SESSION_DIR/status.pre` will be renamed to `$SESSION_DIR/status`. content of this file will be used as start time for the next crawl.
-
-During Pre, we can force the tool to do full find instead of incremental find. Tool uses `find` command in brick backend to get list of files/dirs.
-
-When `glusterfind create`, in that node it generates ssh key($GLUSTERD_WORKDIR/glusterfind.secret.pem) and distributes to all Peers via Glusterd. Once ssh key is distributed in Trusted pool, tool can run ssh commands and copy files from other Volume nodes.
-
-When `glusterfind pre` is run, it internally runs `gluster volume info` to get list of nodes and respective brick paths. For each brick, it calls respective node agents via ssh to find the modified files/dirs which are local them. Once each node agents generates output file, glusterfind collects all the files via scp and merges it into given output file.
-
-When `glusterfind post` is run, it renames `$SESSION_DIR/status.pre` file to `$SESSION_DIR/status`.
-
-## Changelog Mode and GFID to Path conversion
-
-Incremental find uses Changelogs to get the list of GFIDs modified/created. Any application expects file path instead of GFID. Their is no standard/easy way to convert from GFID to Path.
-
-If we set build-pgfid option in Volume GlusterFS starts recording each files parent directory GFID as xattr in file on any ENTRY fop.
-
- trusted.pgfid.<GFID>=NUM_LINKS
-
-To convert from GFID to path, we can mount Volume with aux-gfid-mount option, and get Path information by a getfattr query.
-
- getfattr -n glusterfs.ancestry.path -e text /mnt/datavol/.gfid/<GFID>
-
-This approach is slow, for a requested file gets parent GFID via xattr and reads that directory to gets the file which is having same inode number as of GFID file. To improve the performance, glusterfind uses build-pgfid option, but instead of using getfattr on mount it gets the details from brick backend. glusterfind collects all parent GFIDs at once and starts crawling each directory. Instead of processing one GFID to Path conversion, it gets inode numbers of all input GFIDs and filter while reading parent directory.
-
-Above method is fast compared to `find -samefile` since it crawls only required directories to find files with same inode number as GFID file. But pgfid information only available when a lookup is made or any ENTRY fop to a file after enabling build-pgfid. The files created before build-pgfid enable will not get converted to path from GFID with this approach.
-
-Tool collects the list of GFIDs failed to convert with above method and does a full crawl to convert it to path. Find command is used to crawl entire namespace. Instead of calling find command for every GFID, glusterfind uses an efficient way to convert all GFID to path with single call to `find` command.
-
-## Usage
-
-### Create the session
-
- glusterfind create SESSION_NAME VOLNAME [--force]
- glusterfind create --help
-
-Where, SESSION_NAME is any name without space to identify when run second time. When a node is added to Volume then the tool expects ssh keys to be copied to new node(s) also. Run Create command with `--force` to distribute keys again.
-
-Examples,
-
- glusterfind create --help
- glusterfind create backup datavol
- glusterfind create antivirus_scanner datavol
- glusterfind create backup datavol --force
-
-### Pre Command
-
- glusterfind pre SESSION_NAME VOLUME_NAME OUTFILE
- glusterfind pre --help
-
-We need not specify Volume name since session already has the details. List of files will be populated in OUTFILE.
-
-To trigger the full find, call the pre command with `--full` argument. Multiple crawlers are available for incremental find, we can choose crawl type with `--crawl` argument.
-
-Examples,
-
- glusterfind pre backup datavol /root/backup.txt
- glusterfind pre backup datavol /root/backup.txt --full
-
- # Changelog based crawler, works only for incremental
- glusterfind pre backup datavol /root/backup.txt --crawler=changelog
-
- # Find based crawler, works for both full and incremental
- glusterfind pre backup datavol /root/backup.txt --crawler=brickfind
-
-Output file contains list of files/dirs relative to the Volume mount, if we need to prefix with any path to have absolute path then,
-
- glusterfind pre backup datavol /root/backup.txt --file-prefix=/mnt/datavol/
-
-### List Command
-
-To get the list of sessions and respective session time,
-
- glusterfind list [--session SESSION_NAME] [--volume VOLUME_NAME]
-
-Examples,
-
- glusterfind list
- glusterfind list --session backup
-
-Example output,
-
- SESSION VOLUME SESSION TIME
- ---------------------------------------------------------------------------
- backup datavol 2015-03-04 17:35:34
-
-### Post Command
-
- glusterfind post SESSION_NAME VOLUME_NAME
-
-Examples,
-
- glusterfind post backup datavol
-
-### Delete Command
-
- glusterfind delete SESSION_NAME VOLUME_NAME
-
-Examples,
-
- glusterfind delete backup datavol
-
-
-## Adding more Crawlers
-
-Adding more crawlers is very simple, Add an entry in `$GLUSTERD_WORKDIR/glusterfind.conf`. glusterfind can choose your crawler using `--crawl` argument.
-
- [crawlers]
- changelog=/usr/libexec/glusterfs/glusterfind/changelog.py
- brickfind=/usr/libexec/glusterfs/glusterfind/brickfind.py
-
-For example, if you have a multithreaded brick crawler, say `parallelbrickcrawl` add it to the conf file.
-
- [crawlers]
- changelog=/usr/libexec/glusterfs/glusterfind/changelog.py
- brickfind=/usr/libexec/glusterfs/glusterfind/brickfind.py
- parallelbrickcrawl=/root/parallelbrickcrawl
-
-Custom crawler can be executable script/binary which accepts volume name, brick path, output_file and start time(and optional debug flag)
-
-For example,
-
- /root/parallelbrickcrawl SESSION_NAME VOLUME BRICK_PATH OUTFILE START_TIME [--debug]
-
-Where `START_TIME` is in unix epoch format, `START_TIME` will be zero for full find.
-
-## Known Issues
-
-1. Deleted files will not get listed, since we can't convert GFID to Path if file/dir is deleted.
-2. Only new name will get listed if Renamed.
-3. All hardlinks will get listed.
diff --git a/doc/upgrade-guide/upgrade_to_3.5.md b/doc/upgrade-guide/upgrade_to_3.5.md
deleted file mode 100644
index 0f9b712423a..00000000000
--- a/doc/upgrade-guide/upgrade_to_3.5.md
+++ /dev/null
@@ -1,2 +0,0 @@
-Refer below url to upgrade GlusterFS from its earlier versions to GlusterFS 3.5 version.
-[upgrade to 3.5](http://www.gluster.org/community/documentation/index.php/Upgrade_to_3.5)
diff --git a/doc/upgrade-guide/upgrade_to_3.6.md b/doc/upgrade-guide/upgrade_to_3.6.md
deleted file mode 100644
index f2a812491dc..00000000000
--- a/doc/upgrade-guide/upgrade_to_3.6.md
+++ /dev/null
@@ -1,2 +0,0 @@
-Refer below url to upgrade GlusterFS from its earlier versions to GlusterFS 3.6 version.
-[upgrade to 3.6](http://www.gluster.org/community/documentation/index.php/Upgrade_to_3.6)
diff --git a/doc/xlators/meta.md b/doc/xlators/meta.md
deleted file mode 100644
index da0d62ae3d7..00000000000
--- a/doc/xlators/meta.md
+++ /dev/null
@@ -1,206 +0,0 @@
-Meta translator
-===============
-
-Introduction
-------------
-
-Meta xlator provides an interface similar to the Linux procfs, for GlusterFS
-runtime and configuration. This document lists some useful information about
-GlusterFS internals that could be accessed via the meta xlator. This is not
-exhaustive at the moment. Contributors are welcome to improve this.
-
-Note: Meta xlator is loaded automatically in the client graph, ie. in the
-mount process' graph.
-
-### GlusterFS native mount version
-
->[root@trantor codebase]# cat $META/version
->{
-> "Package Version": "3.7dev"
->}
-
-### Listing of some files under the `meta` folder
-
->[root@trantor codebase]# mount -t glusterfs trantor:/vol /mnt/fuse
->[root@trantor codebase]# ls $META
->cmdline frames graphs logging mallinfo master measure_latency process_uuid version
-
-### GlusterFS' process identifier
-
->[root@trantor codebase]# cat $META/process_uuid
->trantor-11149-2014/07/25-18:48:50:468259
->
-This identifier appears in connection establishment log messages.
-For eg.,
-
->[2014-07-25 18:48:49.017927] I [server-handshake.c:585:server_setvolume] 0-vol-server: accepted client from trantor-11087-2014/07/25-18:48:48:779656-vol-client-0-0-0 (version: 3.7dev)
->
-
-### GlusterFS command line
-
->[root@trantor codebase]# cat $META/cmdline
->{
-> "Cmdlinestr": "/usr/local/sbin/glusterfs --volfile-server=trantor --volfile-id=/vol /mnt/fuse"
->}
-
-### GlusterFS volume graph
-
-The following directory structure reveals the way xlators are stacked in a
-graph like fashion. Each (virtual) file under a xlator directory provides
-runtime information of that xlator. For eg. 'name' contains the name of the
-xlator.
-
-```
-/mnt/fuse/.meta/graphs/active
-|-- meta-autoload
-| |-- history
-| |-- meminfo
-| |-- name
-| |-- options
-| |-- private
-| |-- profile
-| |-- subvolumes
-| | `-- 0 -> ../../vol
-| |-- type
-| `-- view
-|-- top -> meta-autoload
-|-- vol
-| |-- history
-| |-- meminfo
-| |-- name
-| |-- options
-| | |-- count-fop-hits
-| | `-- latency-measurement
-| |-- private
-| |-- profile
-| |-- subvolumes
-| | `-- 0 -> ../../vol-md-cache
-| |-- type
-| `-- view
-|-- vol-client-0
-| |-- history
-| |-- meminfo
-| |-- name
-| |-- options
-| | |-- client-version
-| | |-- clnt-lk-version
-| | |-- fops-version
-| | |-- password
-| | |-- ping-timeout
-| | |-- process-uuid
-| | |-- remote-host
-| | |-- remote-subvolume
-| | |-- send-gids
-| | |-- transport-type
-| | |-- username
-| | |-- volfile-checksum
-| | `-- volfile-key
-| |-- private
-| |-- profile
-| |-- subvolumes
-| |-- type
-| `-- view
-|-- vol-client-1
-| |-- history
-| |-- meminfo
-| |-- name
-| |-- options
-| | |-- client-version
-| | |-- clnt-lk-version
-| | |-- fops-version
-| | |-- password
-| | |-- ping-timeout
-| | |-- process-uuid
-| | |-- remote-host
-| | |-- remote-subvolume
-| | |-- send-gids
-| | |-- transport-type
-| | |-- username
-| | |-- volfile-checksum
-| | `-- volfile-key
-| |-- private
-| |-- profile
-| |-- subvolumes
-| |-- type
-| `-- view
-|-- vol-dht
-| |-- history
-| |-- meminfo
-| |-- name
-| |-- options
-| |-- private
-| |-- profile
-| |-- subvolumes
-| | |-- 0 -> ../../vol-client-0
-| | `-- 1 -> ../../vol-client-1
-| |-- type
-| `-- view
-|-- volfile
-|-- vol-io-cache
-| |-- history
-| |-- meminfo
-| |-- name
-| |-- options
-| |-- private
-| |-- profile
-| |-- subvolumes
-| | `-- 0 -> ../../vol-read-ahead
-| |-- type
-| `-- view
-|-- vol-md-cache
-| |-- history
-| |-- meminfo
-| |-- name
-| |-- options
-| |-- private
-| |-- profile
-| |-- subvolumes
-| | `-- 0 -> ../../vol-open-behind
-| |-- type
-| `-- view
-|-- vol-open-behind
-| |-- history
-| |-- meminfo
-| |-- name
-| |-- options
-| |-- private
-| |-- profile
-| |-- subvolumes
-| | `-- 0 -> ../../vol-quick-read
-| |-- type
-| `-- view
-|-- vol-quick-read
-| |-- history
-| |-- meminfo
-| |-- name
-| |-- options
-| |-- private
-| |-- profile
-| |-- subvolumes
-| | `-- 0 -> ../../vol-io-cache
-| |-- type
-| `-- view
-|-- vol-read-ahead
-| |-- history
-| |-- meminfo
-| |-- name
-| |-- options
-| |-- private
-| |-- profile
-| |-- subvolumes
-| | `-- 0 -> ../../vol-write-behind
-| |-- type
-| `-- view
-`-- vol-write-behind
- |-- history
- |-- meminfo
- |-- name
- |-- options
- |-- private
- |-- profile
- |-- subvolumes
- | `-- 0 -> ../../vol-dht
- |-- type
- `-- view
-
-```