summaryrefslogtreecommitdiffstats
path: root/etc
diff options
context:
space:
mode:
Diffstat (limited to 'etc')
-rw-r--r--etc/account-server/1.conf-gluster19
-rw-r--r--etc/container-server/1.conf-gluster21
-rw-r--r--etc/fs.conf-gluster17
-rw-r--r--etc/object-server/1.conf-gluster36
-rw-r--r--etc/proxy-server.conf-gluster69
-rw-r--r--etc/swift.conf-gluster91
6 files changed, 253 insertions, 0 deletions
diff --git a/etc/account-server/1.conf-gluster b/etc/account-server/1.conf-gluster
new file mode 100644
index 0000000..da8f317
--- /dev/null
+++ b/etc/account-server/1.conf-gluster
@@ -0,0 +1,19 @@
+[DEFAULT]
+devices = /mnt/gluster-object
+mount_check = true
+bind_port = 6012
+user = root
+log_facility = LOG_LOCAL2
+
+[pipeline:main]
+pipeline = account-server
+
+[app:account-server]
+use = egg:gluster_swift_ufo#account
+
+[account-replicator]
+vm_test_mode = yes
+
+[account-auditor]
+
+[account-reaper]
diff --git a/etc/container-server/1.conf-gluster b/etc/container-server/1.conf-gluster
new file mode 100644
index 0000000..acad621
--- /dev/null
+++ b/etc/container-server/1.conf-gluster
@@ -0,0 +1,21 @@
+[DEFAULT]
+devices = /mnt/gluster-object
+mount_check = true
+bind_port = 6011
+user = root
+log_facility = LOG_LOCAL2
+
+[pipeline:main]
+pipeline = container-server
+
+[app:container-server]
+use = egg:gluster_swift_ufo#container
+
+[container-replicator]
+vm_test_mode = yes
+
+[container-updater]
+
+[container-auditor]
+
+[container-sync]
diff --git a/etc/fs.conf-gluster b/etc/fs.conf-gluster
new file mode 100644
index 0000000..71a9b03
--- /dev/null
+++ b/etc/fs.conf-gluster
@@ -0,0 +1,17 @@
+[DEFAULT]
+# IP address of a GlusterFS volume server member. By default, we assume the
+# local host.
+mount_ip = localhost
+
+# By default it is assumed the Gluster volumes can be accessed using other
+# methods besides UFO (not object only), which disables a caching
+# optimizations in order to keep in sync with file system changes.
+object_only = no
+
+# Performance optimization parameter. When turned off, the filesystem will
+# see a reduced number of stat calls, resulting in substantially faster
+# response time for GET and HEAD container requests on containers with large
+# numbers of objects, at the expense of an accurate count of combined bytes
+# used by all objects in the container. For most installations "off" works
+# fine.
+accurate_size_in_listing = off \ No newline at end of file
diff --git a/etc/object-server/1.conf-gluster b/etc/object-server/1.conf-gluster
new file mode 100644
index 0000000..0d85546
--- /dev/null
+++ b/etc/object-server/1.conf-gluster
@@ -0,0 +1,36 @@
+[DEFAULT]
+devices = /mnt/gluster-object
+mount_check = true
+bind_port = 6010
+# If not doing the above, setting this value initially to match the number of
+# CPUs is a good starting point for determining the right value.
+workers = 1
+
+[pipeline:main]
+pipeline = object-server
+
+[app:object-server]
+use = egg:gluster_swift_ufo#object
+user = root
+log_facility = LOG_LOCAL2
+# Timeout clients that don't read or write to the proxy server after 5
+# seconds.
+conn_timeout = 5
+# For high load situations, once connected to a container server, allow for
+# delays communicating with it.
+node_timeout = 60
+# Adjust this value to match the stripe width of the underlying storage array
+# (not the stripe element size). This will provide a reasonable starting point
+# for tuning this value.
+disk_chunk_size = 65536
+# Adjust this value match whatever is set for the disk_chunk_size
+# initially. This will provide a reasonable starting point for tuning this
+# value.
+network_chunk_size = 65556
+
+[object-replicator]
+vm_test_mode = yes
+
+[object-updater]
+
+[object-auditor]
diff --git a/etc/proxy-server.conf-gluster b/etc/proxy-server.conf-gluster
new file mode 100644
index 0000000..e04efec
--- /dev/null
+++ b/etc/proxy-server.conf-gluster
@@ -0,0 +1,69 @@
+[DEFAULT]
+bind_port = 8080
+user = root
+log_facility = LOG_LOCAL1
+# Consider using 1 worker per CPU
+workers = 1
+
+[pipeline:main]
+pipeline = healthcheck cache tempauth proxy-server
+
+[app:proxy-server]
+use = egg:gluster_swift_ufo#proxy
+log_facility = LOG_LOCAL1
+# The API allows for account creation and deletion, but since Gluster/Swift
+# automounts a Gluster volume for a given account, there is no way to create
+# or delete an account. So leave this off.
+allow_account_management = false
+account_autocreate = true
+# Only need to recheck the account exists once a day
+recheck_account_existence = 86400
+# May want to consider bumping this up if containers are created and destroyed
+# infrequently.
+recheck_container_existence = 60
+# Timeout clients that don't read or write to the proxy server after 5
+# seconds.
+client_timeout = 5
+# Give more time to connect to the object, container or account servers in
+# cases of high load.
+conn_timeout = 5
+# For high load situations, once connected to an object, container or account
+# server, allow for delays communicating with them.
+node_timeout = 60
+# May want to consider bumping up this value to 1 - 4 MB depending on how much
+# traffic is for multi-megabyte or gigabyte requests; perhaps matching the
+# stripe width (not stripe element size) of your storage volume is a good
+# starting point. See below for sizing information.
+object_chunk_size = 65536
+# If you do decide to increase the object_chunk_size, then consider lowering
+# this value to one. Up to "put_queue_length" object_chunk_size'd buffers can
+# be queued to the object server for processing. Given one proxy server worker
+# can handle up to 1,024 connections, by default, it will consume 10 * 65,536
+# * 1,024 bytes of memory in the worse case (default values). Be sure the
+# amount of memory available on the system can accommodate increased values
+# for object_chunk_size.
+put_queue_depth = 10
+
+[filter:tempauth]
+use = egg:swift#tempauth
+# Here you need to add users explicitly. See the OpenStack Swift Deployment
+# Guide for more information. The user and user64 directives take the
+# following form:
+# user_<account>_<username> = <key> [group] [group] [...] [storage_url]
+# user64_<account_b64>_<username_b64> = <key> [group] [group] [...] [storage_url]
+# Where you use user64 for accounts and/or usernames that include underscores.
+#
+# NOTE (and WARNING): The account name must match the device name specified
+# when generating the account, container, and object build rings.
+#
+# E.g.
+# user_ufo0_admin = abc123 .admin
+
+[filter:healthcheck]
+use = egg:swift#healthcheck
+
+[filter:cache]
+use = egg:swift#memcache
+# Update this line to contain a comma separated list of memcache servers
+# shared by all nodes running the proxy-server service.
+memcache_servers = localhost:11211
diff --git a/etc/swift.conf-gluster b/etc/swift.conf-gluster
new file mode 100644
index 0000000..25c3ca1
--- /dev/null
+++ b/etc/swift.conf-gluster
@@ -0,0 +1,91 @@
+[DEFAULT]
+
+
+[swift-hash]
+# random unique string that can never change (DO NOT LOSE)
+swift_hash_path_suffix = gluster
+
+
+# The swift-constraints section sets the basic constraints on data
+# saved in the swift cluster.
+
+[swift-constraints]
+
+# max_file_size is the largest "normal" object that can be saved in
+# the cluster. This is also the limit on the size of each segment of
+# a "large" object when using the large object manifest support.
+# This value is set in bytes. Setting it to lower than 1MiB will cause
+# some tests to fail. It is STRONGLY recommended to leave this value at
+# the default (5 * 2**30 + 2).
+
+# FIXME: Really? Gluster can handle a 2^64 sized file? And can the fronting
+# web service handle such a size? I think with UFO, we need to keep with the
+# default size from Swift and encourage users to research what size their web
+# services infrastructure can handle.
+
+max_file_size = 18446744073709551616
+
+
+# max_meta_name_length is the max number of bytes in the utf8 encoding
+# of the name portion of a metadata header.
+
+#max_meta_name_length = 128
+
+
+# max_meta_value_length is the max number of bytes in the utf8 encoding
+# of a metadata value
+
+#max_meta_value_length = 256
+
+
+# max_meta_count is the max number of metadata keys that can be stored
+# on a single account, container, or object
+
+#max_meta_count = 90
+
+
+# max_meta_overall_size is the max number of bytes in the utf8 encoding
+# of the metadata (keys + values)
+
+#max_meta_overall_size = 4096
+
+
+# max_object_name_length is the max number of bytes in the utf8 encoding of an
+# object name: Gluster FS can handle much longer file names, but the length
+# between the slashes of the URL is handled below. Remember that most web
+# clients can't handle anything greater than 2048, and those that do are
+# rather clumsy.
+
+max_object_name_length = 2048
+
+# max_object_name_component_length (GlusterFS) is the max number of bytes in
+# the utf8 encoding of an object name component (the part between the
+# slashes); this is a limit imposed by the underlying file system (for XFS it
+# is 255 bytes).
+
+max_object_name_component_length = 255
+
+# container_listing_limit is the default (and max) number of items
+# returned for a container listing request
+
+#container_listing_limit = 10000
+
+
+# account_listing_limit is the default (and max) number of items returned
+# for an account listing request
+
+#account_listing_limit = 10000
+
+
+# max_account_name_length is the max number of bytes in the utf8 encoding of
+# an account name: Gluster FS Filename limit (XFS limit?), must be the same
+# size as max_object_name_component_length above.
+
+max_account_name_length = 255
+
+
+# max_container_name_length is the max number of bytes in the utf8 encoding
+# of a container name: Gluster FS Filename limit (XFS limit?), must be the same
+# size as max_object_name_component_length above.
+
+max_container_name_length = 255