summaryrefslogtreecommitdiffstats
path: root/test/functional_auth/keystone
diff options
context:
space:
mode:
Diffstat (limited to 'test/functional_auth/keystone')
-rw-r--r--test/functional_auth/keystone/conf/account-server.conf32
-rw-r--r--test/functional_auth/keystone/conf/container-server.conf35
-rw-r--r--test/functional_auth/keystone/conf/fs.conf19
-rw-r--r--test/functional_auth/keystone/conf/object-server.conf48
-rw-r--r--test/functional_auth/keystone/conf/proxy-server.conf27
-rw-r--r--test/functional_auth/keystone/conf/swift.conf85
6 files changed, 14 insertions, 232 deletions
diff --git a/test/functional_auth/keystone/conf/account-server.conf b/test/functional_auth/keystone/conf/account-server.conf
deleted file mode 100644
index 4996367..0000000
--- a/test/functional_auth/keystone/conf/account-server.conf
+++ /dev/null
@@ -1,32 +0,0 @@
-[DEFAULT]
-devices = /mnt/gluster-object
-#
-# Once you are confident that your startup processes will always have your
-# gluster volumes properly mounted *before* the account-server workers start,
-# you can *consider* setting this value to "false" to reduce the per-request
-# overhead it can incur.
-#
-# *** Keep false for Functional Tests ***
-mount_check = false
-bind_port = 6012
-#
-# Override swift's default behaviour for fallocate.
-disable_fallocate = true
-#
-# One or two workers should be sufficient for almost any installation of
-# Gluster.
-workers = 1
-
-[pipeline:main]
-pipeline = account-server
-
-[app:account-server]
-use = egg:gluster_swift#account
-user = root
-log_facility = LOG_LOCAL2
-log_level = WARN
-#
-# After ensuring things are running in a stable manner, you can turn off
-# normal request logging for the account server to unclutter the log
-# files. Warnings and errors will still be logged.
-log_requests = off
diff --git a/test/functional_auth/keystone/conf/container-server.conf b/test/functional_auth/keystone/conf/container-server.conf
deleted file mode 100644
index 122d97e..0000000
--- a/test/functional_auth/keystone/conf/container-server.conf
+++ /dev/null
@@ -1,35 +0,0 @@
-[DEFAULT]
-devices = /mnt/gluster-object
-#
-# Once you are confident that your startup processes will always have your
-# gluster volumes properly mounted *before* the container-server workers
-# start, you can *consider* setting this value to "false" to reduce the
-# per-request overhead it can incur.
-#
-# *** Keep false for Functional Tests ***
-mount_check = false
-bind_port = 6011
-#
-# Override swift's default behaviour for fallocate.
-disable_fallocate = true
-#
-# One or two workers should be sufficient for almost any installation of
-# Gluster.
-workers = 1
-
-[pipeline:main]
-pipeline = container-server
-
-[app:container-server]
-use = egg:gluster_swift#container
-user = root
-log_facility = LOG_LOCAL2
-log_level = WARN
-#
-# After ensuring things are running in a stable manner, you can turn off
-# normal request logging for the container server to unclutter the log
-# files. Warnings and errors will still be logged.
-log_requests = off
-
-#enable object versioning for functional test
-allow_versions = on
diff --git a/test/functional_auth/keystone/conf/fs.conf b/test/functional_auth/keystone/conf/fs.conf
deleted file mode 100644
index b06a854..0000000
--- a/test/functional_auth/keystone/conf/fs.conf
+++ /dev/null
@@ -1,19 +0,0 @@
-[DEFAULT]
-#
-# IP address of a node in the GlusterFS server cluster hosting the
-# volumes to be served via Swift API.
-mount_ip = localhost
-
-# Performance optimization parameter. When turned off, the filesystem will
-# see a reduced number of stat calls, resulting in substantially faster
-# response time for GET and HEAD container requests on containers with large
-# numbers of objects, at the expense of an accurate count of combined bytes
-# used by all objects in the container. For most installations "off" works
-# fine.
-#
-# *** Keep on for Functional Tests ***
-accurate_size_in_listing = on
-
-# *** Keep on for Functional Tests ***
-container_update_object_count = on
-account_update_container_count = on
diff --git a/test/functional_auth/keystone/conf/object-server.conf b/test/functional_auth/keystone/conf/object-server.conf
deleted file mode 100644
index 3cb9ead..0000000
--- a/test/functional_auth/keystone/conf/object-server.conf
+++ /dev/null
@@ -1,48 +0,0 @@
-[DEFAULT]
-devices = /mnt/gluster-object
-#
-# Once you are confident that your startup processes will always have your
-# gluster volumes properly mounted *before* the object-server workers start,
-# you can *consider* setting this value to "false" to reduce the per-request
-# overhead it can incur.
-#
-# *** Keep false for Functional Tests ***
-mount_check = false
-bind_port = 6010
-#
-# Maximum number of clients one worker can process simultaneously (it will
-# actually accept N + 1). Setting this to one (1) will only handle one request
-# at a time, without accepting another request concurrently. By increasing the
-# number of workers to a much higher value, one can prevent slow file system
-# operations for one request from starving other requests.
-max_clients = 1024
-#
-# If not doing the above, setting this value initially to match the number of
-# CPUs is a good starting point for determining the right value.
-workers = 1
-# Override swift's default behaviour for fallocate.
-disable_fallocate = true
-
-[pipeline:main]
-pipeline = object-server
-
-[app:object-server]
-use = egg:gluster_swift#object
-user = root
-log_facility = LOG_LOCAL2
-log_level = WARN
-#
-# For performance, after ensuring things are running in a stable manner, you
-# can turn off normal request logging for the object server to reduce the
-# per-request overhead and unclutter the log files. Warnings and errors will
-# still be logged.
-log_requests = off
-#
-# Adjust this value to match the stripe width of the underlying storage array
-# (not the stripe element size). This will provide a reasonable starting point
-# for tuning this value.
-disk_chunk_size = 65536
-#
-# Adjust this value match whatever is set for the disk_chunk_size initially.
-# This will provide a reasonable starting point for tuning this value.
-network_chunk_size = 65556
diff --git a/test/functional_auth/keystone/conf/proxy-server.conf b/test/functional_auth/keystone/conf/proxy-server.conf
index 084e6a5..72a84da 100644
--- a/test/functional_auth/keystone/conf/proxy-server.conf
+++ b/test/functional_auth/keystone/conf/proxy-server.conf
@@ -6,7 +6,7 @@ workers = 1
[pipeline:main]
#pipeline = catch_errors healthcheck proxy-logging cache tempauth proxy-logging proxy-server
-pipeline = catch_errors healthcheck proxy-logging cache authtoken keystoneauth proxy-logging proxy-server
+pipeline = catch_errors healthcheck proxy-logging cache tempurl authtoken keystoneauth proxy-logging proxy-server
[app:proxy-server]
use = egg:gluster_swift#proxy
@@ -49,6 +49,10 @@ object_chunk_size = 65536
# amount of memory available on the system can accommodate increased values
# for object_chunk_size.
put_queue_depth = 10
+# The following parameter is used by object-expirer and needs to be same
+# across all conf files!
+auto_create_account_prefix = gs
+expiring_objects_account_name = expiring
[filter:catch_errors]
use = egg:swift#catch_errors
@@ -59,13 +63,14 @@ use = egg:swift#proxy_logging
[filter:healthcheck]
use = egg:swift#healthcheck
-[filter:tempauth]
-use = egg:swift#tempauth
-user_admin_admin = admin .admin .reseller_admin
-user_d4dde08c621a4f0fb4cde0ac6a62aa0c_tester = testing .admin
-user_test_tester = testing .admin
-user_test2_tester2 = testing2 .admin
-user_test_tester3 = testing3
+[filter:cache]
+use = egg:swift#memcache
+# Update this line to contain a comma separated list of memcache servers
+# shared by all nodes running the proxy-server service.
+memcache_servers = localhost:11211
+
+[filter:tempurl]
+use = egg:swift#tempurl
[filter:authtoken]
paste.filter_factory = keystoneclient.middleware.auth_token:filter_factory
@@ -87,8 +92,4 @@ operator_roles = admin
is_admin = true
cache = swift.cache
-[filter:cache]
-use = egg:swift#memcache
-# Update this line to contain a comma separated list of memcache servers
-# shared by all nodes running the proxy-server service.
-memcache_servers = localhost:11211
+
diff --git a/test/functional_auth/keystone/conf/swift.conf b/test/functional_auth/keystone/conf/swift.conf
deleted file mode 100644
index ce9a4d0..0000000
--- a/test/functional_auth/keystone/conf/swift.conf
+++ /dev/null
@@ -1,85 +0,0 @@
-[DEFAULT]
-
-
-[swift-hash]
-# random unique string that can never change (DO NOT LOSE)
-swift_hash_path_suffix = gluster
-
-
-# The swift-constraints section sets the basic constraints on data
-# saved in the swift cluster.
-
-[swift-constraints]
-
-# max_file_size is the largest "normal" object that can be saved in
-# the cluster. This is also the limit on the size of each segment of
-# a "large" object when using the large object manifest support.
-# This value is set in bytes. Setting it to lower than 1MiB will cause
-# some tests to fail.
-# Default is 1 TiB = 2**30*1024
-max_file_size = 1099511627776
-
-
-# max_meta_name_length is the max number of bytes in the utf8 encoding
-# of the name portion of a metadata header.
-
-#max_meta_name_length = 128
-
-
-# max_meta_value_length is the max number of bytes in the utf8 encoding
-# of a metadata value
-
-#max_meta_value_length = 256
-
-
-# max_meta_count is the max number of metadata keys that can be stored
-# on a single account, container, or object
-
-#max_meta_count = 90
-
-
-# max_meta_overall_size is the max number of bytes in the utf8 encoding
-# of the metadata (keys + values)
-
-#max_meta_overall_size = 4096
-
-
-# max_object_name_length is the max number of bytes in the utf8 encoding of an
-# object name: Gluster FS can handle much longer file names, but the length
-# between the slashes of the URL is handled below. Remember that most web
-# clients can't handle anything greater than 2048, and those that do are
-# rather clumsy.
-
-max_object_name_length = 2048
-
-# max_object_name_component_length (GlusterFS) is the max number of bytes in
-# the utf8 encoding of an object name component (the part between the
-# slashes); this is a limit imposed by the underlying file system (for XFS it
-# is 255 bytes).
-
-max_object_name_component_length = 255
-
-# container_listing_limit is the default (and max) number of items
-# returned for a container listing request
-
-#container_listing_limit = 10000
-
-
-# account_listing_limit is the default (and max) number of items returned
-# for an account listing request
-
-#account_listing_limit = 10000
-
-
-# max_account_name_length is the max number of bytes in the utf8 encoding of
-# an account name: Gluster FS Filename limit (XFS limit?), must be the same
-# size as max_object_name_component_length above.
-
-max_account_name_length = 255
-
-
-# max_container_name_length is the max number of bytes in the utf8 encoding
-# of a container name: Gluster FS Filename limit (XFS limit?), must be the same
-# size as max_object_name_component_length above.
-
-max_container_name_length = 255