summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rwxr-xr-xextras/hook-scripts/S40ufo-stop.py24
-rw-r--r--ufo/README16
-rwxr-xr-xufo/bin/gluster-swift-gen-builders44
-rw-r--r--ufo/etc/account-server/1.conf-gluster19
-rw-r--r--ufo/etc/container-server/1.conf-gluster21
-rw-r--r--ufo/etc/fs.conf-gluster17
-rw-r--r--ufo/etc/object-server/1.conf-gluster36
-rw-r--r--ufo/etc/proxy-server.conf-gluster69
-rw-r--r--ufo/etc/swift.conf-gluster91
-rw-r--r--ufo/gluster-swift-ufo.spec79
-rw-r--r--ufo/gluster/__init__.py0
-rw-r--r--ufo/gluster/swift/__init__.py18
-rw-r--r--ufo/gluster/swift/account/__init__.py0
-rw-r--r--ufo/gluster/swift/account/server.py45
-rw-r--r--ufo/gluster/swift/common/DiskDir.py496
-rw-r--r--ufo/gluster/swift/common/DiskFile.py338
-rw-r--r--ufo/gluster/swift/common/Glusterfs.py144
-rw-r--r--ufo/gluster/swift/common/__init__.py0
-rw-r--r--ufo/gluster/swift/common/constraints.py94
-rw-r--r--ufo/gluster/swift/common/exceptions.py27
-rw-r--r--ufo/gluster/swift/common/fs_utils.py179
-rw-r--r--ufo/gluster/swift/common/middleware/__init__.py0
-rw-r--r--ufo/gluster/swift/common/middleware/gluster.py40
-rw-r--r--ufo/gluster/swift/common/ring.py111
-rw-r--r--ufo/gluster/swift/common/utils.py496
-rw-r--r--ufo/gluster/swift/container/__init__.py0
-rw-r--r--ufo/gluster/swift/container/server.py46
-rw-r--r--ufo/gluster/swift/obj/__init__.py0
-rw-r--r--ufo/gluster/swift/obj/server.py34
-rw-r--r--ufo/gluster/swift/proxy/__init__.py0
-rw-r--r--ufo/gluster/swift/proxy/server.py27
-rw-r--r--ufo/setup.py57
-rw-r--r--ufo/test/__init__.py49
-rw-r--r--ufo/test/unit/__init__.py95
-rw-r--r--ufo/test/unit/common/__init__.py0
-rw-r--r--ufo/test/unit/common/data/README.rings3
-rw-r--r--ufo/test/unit/common/data/account.builderbin0 -> 537 bytes
-rw-r--r--ufo/test/unit/common/data/account.ring.gzbin0 -> 183 bytes
-rw-r--r--ufo/test/unit/common/data/account_tree.tar.bz2bin0 -> 228 bytes
-rw-r--r--ufo/test/unit/common/data/backups/1365124498.account.builderbin0 -> 537 bytes
-rw-r--r--ufo/test/unit/common/data/backups/1365124498.container.builderbin0 -> 537 bytes
-rw-r--r--ufo/test/unit/common/data/backups/1365124498.object.builderbin0 -> 228 bytes
-rw-r--r--ufo/test/unit/common/data/backups/1365124499.object.builderbin0 -> 537 bytes
-rw-r--r--ufo/test/unit/common/data/container.builderbin0 -> 537 bytes
-rw-r--r--ufo/test/unit/common/data/container.ring.gzbin0 -> 185 bytes
-rw-r--r--ufo/test/unit/common/data/container_tree.tar.bz2bin0 -> 282 bytes
-rw-r--r--ufo/test/unit/common/data/object.builderbin0 -> 537 bytes
-rw-r--r--ufo/test/unit/common/data/object.ring.gzbin0 -> 182 bytes
-rw-r--r--ufo/test/unit/common/test_Glusterfs.py95
-rw-r--r--ufo/test/unit/common/test_diskfile.py932
-rw-r--r--ufo/test/unit/common/test_fs_utils.py277
-rw-r--r--ufo/test/unit/common/test_ring.py55
-rw-r--r--ufo/test/unit/common/test_utils.py1020
-rw-r--r--ufo/tools/test-requires6
-rw-r--r--ufo/tox.ini25
-rwxr-xr-xufo/unittests.sh7
56 files changed, 5132 insertions, 0 deletions
diff --git a/extras/hook-scripts/S40ufo-stop.py b/extras/hook-scripts/S40ufo-stop.py
new file mode 100755
index 0000000..107f196
--- /dev/null
+++ b/extras/hook-scripts/S40ufo-stop.py
@@ -0,0 +1,24 @@
+#!/usr/bin/env python
+
+import os
+from optparse import OptionParser
+
+if __name__ == '__main__':
+ # check if swift is installed
+ try:
+ from gluster.swift.common.Glusterfs import get_mnt_point, unmount
+ except ImportError:
+ import sys
+ sys.exit("Openstack Swift does not appear to be installed properly");
+
+ op = OptionParser(usage="%prog [options...]")
+ op.add_option('--volname', dest='vol', type=str)
+ op.add_option('--last', dest='last', type=str)
+ (opts, args) = op.parse_args()
+
+
+ mnt_point = get_mnt_point(opts.vol)
+ if mnt_point:
+ unmount(mnt_point)
+ else:
+ sys.exit("get_mnt_point returned none for mount point")
diff --git a/ufo/README b/ufo/README
new file mode 100644
index 0000000..9efd918
--- /dev/null
+++ b/ufo/README
@@ -0,0 +1,16 @@
+Gluster Unified File and Object Storage allows files and directories created
+via gluster-native/nfs mount to be accessed as containers and objects. It is
+a plugin for OpenStack Swift project.
+
+Install
+ * TBD
+
+Once this is done, you can access the GlusterFS volumes as Swift accounts.
+Add the Volume names with the user-name and its corresponding password to the
+/etc/swift/proxy-server.conf (follow the syntax used in the sample conf file).
+
+Command to start the servers (TBD)
+ swift-init main start
+
+Command to stop the servers (TBD)
+ swift-init main stop
diff --git a/ufo/bin/gluster-swift-gen-builders b/ufo/bin/gluster-swift-gen-builders
new file mode 100755
index 0000000..bef5d14
--- /dev/null
+++ b/ufo/bin/gluster-swift-gen-builders
@@ -0,0 +1,44 @@
+#!/bin/bash
+
+# Note that these port numbers must match the configured values for the
+# various servers in their configuration files.
+declare -A port=(["account.builder"]=6012 ["container.builder"]=6011 \
+ ["object.builder"]=6010)
+
+builder_files="account.builder container.builder object.builder"
+
+function create {
+ swift-ring-builder $1 create 1 1 1 >> /tmp/out
+}
+
+function add {
+ swift-ring-builder $1 add z$2-127.0.0.1:$3/$4_ 100.0
+}
+
+function rebalance {
+ swift-ring-builder $1 rebalance
+}
+
+function build {
+ swift-ring-builder $1
+}
+
+if [ "$1x" = "x" ]; then
+ echo "Please specify the gluster volume name to use."
+ exit 1
+fi
+
+for builder_file in $builder_files
+do
+ create $builder_file
+
+ zone=1
+ for volname in $@
+ do
+ add $builder_file $zone ${port[$builder_file]} $volname
+ zone=$(expr $zone + 1)
+ done
+
+ rebalance $builder_file
+ build $builder_file
+done
diff --git a/ufo/etc/account-server/1.conf-gluster b/ufo/etc/account-server/1.conf-gluster
new file mode 100644
index 0000000..da8f317
--- /dev/null
+++ b/ufo/etc/account-server/1.conf-gluster
@@ -0,0 +1,19 @@
+[DEFAULT]
+devices = /mnt/gluster-object
+mount_check = true
+bind_port = 6012
+user = root
+log_facility = LOG_LOCAL2
+
+[pipeline:main]
+pipeline = account-server
+
+[app:account-server]
+use = egg:gluster_swift_ufo#account
+
+[account-replicator]
+vm_test_mode = yes
+
+[account-auditor]
+
+[account-reaper]
diff --git a/ufo/etc/container-server/1.conf-gluster b/ufo/etc/container-server/1.conf-gluster
new file mode 100644
index 0000000..acad621
--- /dev/null
+++ b/ufo/etc/container-server/1.conf-gluster
@@ -0,0 +1,21 @@
+[DEFAULT]
+devices = /mnt/gluster-object
+mount_check = true
+bind_port = 6011
+user = root
+log_facility = LOG_LOCAL2
+
+[pipeline:main]
+pipeline = container-server
+
+[app:container-server]
+use = egg:gluster_swift_ufo#container
+
+[container-replicator]
+vm_test_mode = yes
+
+[container-updater]
+
+[container-auditor]
+
+[container-sync]
diff --git a/ufo/etc/fs.conf-gluster b/ufo/etc/fs.conf-gluster
new file mode 100644
index 0000000..71a9b03
--- /dev/null
+++ b/ufo/etc/fs.conf-gluster
@@ -0,0 +1,17 @@
+[DEFAULT]
+# IP address of a GlusterFS volume server member. By default, we assume the
+# local host.
+mount_ip = localhost
+
+# By default it is assumed the Gluster volumes can be accessed using other
+# methods besides UFO (not object only), which disables a caching
+# optimizations in order to keep in sync with file system changes.
+object_only = no
+
+# Performance optimization parameter. When turned off, the filesystem will
+# see a reduced number of stat calls, resulting in substantially faster
+# response time for GET and HEAD container requests on containers with large
+# numbers of objects, at the expense of an accurate count of combined bytes
+# used by all objects in the container. For most installations "off" works
+# fine.
+accurate_size_in_listing = off \ No newline at end of file
diff --git a/ufo/etc/object-server/1.conf-gluster b/ufo/etc/object-server/1.conf-gluster
new file mode 100644
index 0000000..0d85546
--- /dev/null
+++ b/ufo/etc/object-server/1.conf-gluster
@@ -0,0 +1,36 @@
+[DEFAULT]
+devices = /mnt/gluster-object
+mount_check = true
+bind_port = 6010
+# If not doing the above, setting this value initially to match the number of
+# CPUs is a good starting point for determining the right value.
+workers = 1
+
+[pipeline:main]
+pipeline = object-server
+
+[app:object-server]
+use = egg:gluster_swift_ufo#object
+user = root
+log_facility = LOG_LOCAL2
+# Timeout clients that don't read or write to the proxy server after 5
+# seconds.
+conn_timeout = 5
+# For high load situations, once connected to a container server, allow for
+# delays communicating with it.
+node_timeout = 60
+# Adjust this value to match the stripe width of the underlying storage array
+# (not the stripe element size). This will provide a reasonable starting point
+# for tuning this value.
+disk_chunk_size = 65536
+# Adjust this value match whatever is set for the disk_chunk_size
+# initially. This will provide a reasonable starting point for tuning this
+# value.
+network_chunk_size = 65556
+
+[object-replicator]
+vm_test_mode = yes
+
+[object-updater]
+
+[object-auditor]
diff --git a/ufo/etc/proxy-server.conf-gluster b/ufo/etc/proxy-server.conf-gluster
new file mode 100644
index 0000000..e04efec
--- /dev/null
+++ b/ufo/etc/proxy-server.conf-gluster
@@ -0,0 +1,69 @@
+[DEFAULT]
+bind_port = 8080
+user = root
+log_facility = LOG_LOCAL1
+# Consider using 1 worker per CPU
+workers = 1
+
+[pipeline:main]
+pipeline = healthcheck cache tempauth proxy-server
+
+[app:proxy-server]
+use = egg:gluster_swift_ufo#proxy
+log_facility = LOG_LOCAL1
+# The API allows for account creation and deletion, but since Gluster/Swift
+# automounts a Gluster volume for a given account, there is no way to create
+# or delete an account. So leave this off.
+allow_account_management = false
+account_autocreate = true
+# Only need to recheck the account exists once a day
+recheck_account_existence = 86400
+# May want to consider bumping this up if containers are created and destroyed
+# infrequently.
+recheck_container_existence = 60
+# Timeout clients that don't read or write to the proxy server after 5
+# seconds.
+client_timeout = 5
+# Give more time to connect to the object, container or account servers in
+# cases of high load.
+conn_timeout = 5
+# For high load situations, once connected to an object, container or account
+# server, allow for delays communicating with them.
+node_timeout = 60
+# May want to consider bumping up this value to 1 - 4 MB depending on how much
+# traffic is for multi-megabyte or gigabyte requests; perhaps matching the
+# stripe width (not stripe element size) of your storage volume is a good
+# starting point. See below for sizing information.
+object_chunk_size = 65536
+# If you do decide to increase the object_chunk_size, then consider lowering
+# this value to one. Up to "put_queue_length" object_chunk_size'd buffers can
+# be queued to the object server for processing. Given one proxy server worker
+# can handle up to 1,024 connections, by default, it will consume 10 * 65,536
+# * 1,024 bytes of memory in the worse case (default values). Be sure the
+# amount of memory available on the system can accommodate increased values
+# for object_chunk_size.
+put_queue_depth = 10
+
+[filter:tempauth]
+use = egg:swift#tempauth
+# Here you need to add users explicitly. See the OpenStack Swift Deployment
+# Guide for more information. The user and user64 directives take the
+# following form:
+# user_<account>_<username> = <key> [group] [group] [...] [storage_url]
+# user64_<account_b64>_<username_b64> = <key> [group] [group] [...] [storage_url]
+# Where you use user64 for accounts and/or usernames that include underscores.
+#
+# NOTE (and WARNING): The account name must match the device name specified
+# when generating the account, container, and object build rings.
+#
+# E.g.
+# user_ufo0_admin = abc123 .admin
+
+[filter:healthcheck]
+use = egg:swift#healthcheck
+
+[filter:cache]
+use = egg:swift#memcache
+# Update this line to contain a comma separated list of memcache servers
+# shared by all nodes running the proxy-server service.
+memcache_servers = localhost:11211
diff --git a/ufo/etc/swift.conf-gluster b/ufo/etc/swift.conf-gluster
new file mode 100644
index 0000000..25c3ca1
--- /dev/null
+++ b/ufo/etc/swift.conf-gluster
@@ -0,0 +1,91 @@
+[DEFAULT]
+
+
+[swift-hash]
+# random unique string that can never change (DO NOT LOSE)
+swift_hash_path_suffix = gluster
+
+
+# The swift-constraints section sets the basic constraints on data
+# saved in the swift cluster.
+
+[swift-constraints]
+
+# max_file_size is the largest "normal" object that can be saved in
+# the cluster. This is also the limit on the size of each segment of
+# a "large" object when using the large object manifest support.
+# This value is set in bytes. Setting it to lower than 1MiB will cause
+# some tests to fail. It is STRONGLY recommended to leave this value at
+# the default (5 * 2**30 + 2).
+
+# FIXME: Really? Gluster can handle a 2^64 sized file? And can the fronting
+# web service handle such a size? I think with UFO, we need to keep with the
+# default size from Swift and encourage users to research what size their web
+# services infrastructure can handle.
+
+max_file_size = 18446744073709551616
+
+
+# max_meta_name_length is the max number of bytes in the utf8 encoding
+# of the name portion of a metadata header.
+
+#max_meta_name_length = 128
+
+
+# max_meta_value_length is the max number of bytes in the utf8 encoding
+# of a metadata value
+
+#max_meta_value_length = 256
+
+
+# max_meta_count is the max number of metadata keys that can be stored
+# on a single account, container, or object
+
+#max_meta_count = 90
+
+
+# max_meta_overall_size is the max number of bytes in the utf8 encoding
+# of the metadata (keys + values)
+
+#max_meta_overall_size = 4096
+
+
+# max_object_name_length is the max number of bytes in the utf8 encoding of an
+# object name: Gluster FS can handle much longer file names, but the length
+# between the slashes of the URL is handled below. Remember that most web
+# clients can't handle anything greater than 2048, and those that do are
+# rather clumsy.
+
+max_object_name_length = 2048
+
+# max_object_name_component_length (GlusterFS) is the max number of bytes in
+# the utf8 encoding of an object name component (the part between the
+# slashes); this is a limit imposed by the underlying file system (for XFS it
+# is 255 bytes).
+
+max_object_name_component_length = 255
+
+# container_listing_limit is the default (and max) number of items
+# returned for a container listing request
+
+#container_listing_limit = 10000
+
+
+# account_listing_limit is the default (and max) number of items returned
+# for an account listing request
+
+#account_listing_limit = 10000
+
+
+# max_account_name_length is the max number of bytes in the utf8 encoding of
+# an account name: Gluster FS Filename limit (XFS limit?), must be the same
+# size as max_object_name_component_length above.
+
+max_account_name_length = 255
+
+
+# max_container_name_length is the max number of bytes in the utf8 encoding
+# of a container name: Gluster FS Filename limit (XFS limit?), must be the same
+# size as max_object_name_component_length above.
+
+max_container_name_length = 255
diff --git a/ufo/gluster-swift-ufo.spec b/ufo/gluster-swift-ufo.spec
new file mode 100644
index 0000000..8c0167c
--- /dev/null
+++ b/ufo/gluster-swift-ufo.spec
@@ -0,0 +1,79 @@
+############################################################################################################
+# Command to build rpms.#
+# $ rpmbuild -ta %{name}-%{version}-%{release}.tar.gz #
+############################################################################################################
+# Setting up the environment. #
+# * Create a directory %{name}-%{version} under $HOME/rpmbuild/SOURCES #
+# * Copy the contents of gluster directory into $HOME/rpmbuild/SOURCES/%{name}-%{version} #
+# * tar zcvf %{name}-%{version}-%{release}.tar.gz $HOME/rpmbuild/SOURCES/%{name}-%{version} %{name}.spec #
+# For more information refer #
+# http://fedoraproject.org/wiki/How_to_create_an_RPM_package #
+############################################################################################################
+
+%if ! (0%{?fedora} > 12 || 0%{?rhel} > 5)
+%{!?python_sitelib: %global python_sitelib %(%{__python} -c "from distutils.sysconfig import get_python_lib; print(get_python_lib())")}
+%endif
+
+%define _confdir %{_sysconfdir}/swift
+%define _ufo_version 1.1
+%define _ufo_release 4
+
+Summary : GlusterFS Unified File and Object Storage.
+Name : gluster-swift-ufo
+Version : %{_ufo_version}
+Release : %{_ufo_release}
+Group : Application/File
+Vendor : Red Hat Inc.
+Source0 : %{name}-%{version}-%{release}.tar.gz
+Packager : gluster-users@gluster.org
+License : Apache
+BuildArch: noarch
+Requires : memcached
+Requires : openssl
+Requires : python
+Requires : openstack-swift >= 1.4.8
+Requires : openstack-swift-account >= 1.4.8
+Requires : openstack-swift-container >= 1.4.8
+Requires : openstack-swift-object >= 1.4.8
+Requires : openstack-swift-proxy >= 1.4.8
+Obsoletes: gluster-swift
+Obsoletes: gluster-swift-plugin
+
+%description
+Gluster Unified File and Object Storage unifies NAS and object storage
+technology. This provides a system for data storage that enables users to access
+the same data as an object and as a file, simplifying management and controlling
+storage costs.
+
+%prep
+%setup -q
+
+%build
+%{__python} setup.py build
+
+%install
+rm -rf %{buildroot}
+
+%{__python} setup.py install -O1 --skip-build --root %{buildroot}
+
+mkdir -p %{buildroot}/%{_confdir}/
+cp -r etc/* %{buildroot}/%{_confdir}/
+
+mkdir -p %{buildroot}/%{_bindir}/
+cp bin/gluster-swift-gen-builders %{buildroot}/%{_bindir}/
+
+%clean
+rm -rf %{buildroot}
+
+%files
+%defattr(-,root,root)
+%{python_sitelib}/gluster
+%{python_sitelib}/gluster_swift_ufo-%{version}-*.egg-info
+%{_bindir}/gluster-swift-gen-builders
+%dir %{_confdir}
+%config %{_confdir}/account-server/1.conf-gluster
+%config %{_confdir}/container-server/1.conf-gluster
+%config %{_confdir}/object-server/1.conf-gluster
+%config %{_confdir}/swift.conf-gluster
+%config %{_confdir}/proxy-server.conf-gluster
+%config %{_confdir}/fs.conf-gluster
diff --git a/ufo/gluster/__init__.py b/ufo/gluster/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/ufo/gluster/__init__.py
diff --git a/ufo/gluster/swift/__init__.py b/ufo/gluster/swift/__init__.py
new file mode 100644
index 0000000..ef350fd
--- /dev/null
+++ b/ufo/gluster/swift/__init__.py
@@ -0,0 +1,18 @@
+""" Gluster Swift UFO """
+
+class Version(object):
+ def __init__(self, canonical_version, final):
+ self.canonical_version = canonical_version
+ self.final = final
+
+ @property
+ def pretty_version(self):
+ if self.final:
+ return self.canonical_version
+ else:
+ return '%s-dev' % (self.canonical_version,)
+
+
+_version = Version('1.1', False)
+__version__ = _version.pretty_version
+__canonical_version__ = _version.canonical_version
diff --git a/ufo/gluster/swift/account/__init__.py b/ufo/gluster/swift/account/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/ufo/gluster/swift/account/__init__.py
diff --git a/ufo/gluster/swift/account/server.py b/ufo/gluster/swift/account/server.py
new file mode 100644
index 0000000..aeaabc9
--- /dev/null
+++ b/ufo/gluster/swift/account/server.py
@@ -0,0 +1,45 @@
+# Copyright (c) 2012 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+""" Account Server for Gluster Swift UFO """
+
+# Simply importing this monkey patches the constraint handling to fit our
+# needs
+import gluster.swift.common.constraints
+
+from swift.account import server
+from gluster.swift.common.DiskDir import DiskAccount
+
+
+class AccountController(server.AccountController):
+ def _get_account_broker(self, drive, part, account):
+ """
+ Overriden to provide the GlusterFS specific broker that talks to
+ Gluster for the information related to servicing a given request
+ instead of talking to a database.
+
+ :param drive: drive that holds the container
+ :param part: partition the container is in
+ :param account: account name
+ :returns: DiskDir object
+ """
+ return DiskAccount(self.root, drive, account, self.logger)
+
+
+def app_factory(global_conf, **local_conf):
+ """paste.deploy app factory for creating WSGI account server apps."""
+ conf = global_conf.copy()
+ conf.update(local_conf)
+ return AccountController(conf)
diff --git a/ufo/gluster/swift/common/DiskDir.py b/ufo/gluster/swift/common/DiskDir.py
new file mode 100644
index 0000000..18d08cc
--- /dev/null
+++ b/ufo/gluster/swift/common/DiskDir.py
@@ -0,0 +1,496 @@
+# Copyright (c) 2012 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os, errno
+
+from gluster.swift.common.utils import clean_metadata, dir_empty, rmdirs, \
+ mkdirs, validate_account, validate_container, is_marker, \
+ get_container_details, get_account_details, get_container_metadata, \
+ create_container_metadata, create_account_metadata, DEFAULT_GID, \
+ DEFAULT_UID, validate_object, create_object_metadata, read_metadata, \
+ write_metadata, X_CONTENT_TYPE, X_CONTENT_LENGTH, X_TIMESTAMP, \
+ X_PUT_TIMESTAMP, X_TYPE, X_ETAG, X_OBJECTS_COUNT, X_BYTES_USED, \
+ X_CONTAINER_COUNT, CONTAINER, os_path
+from gluster.swift.common import Glusterfs
+
+from swift.common.constraints import CONTAINER_LISTING_LIMIT
+from swift.common.utils import normalize_timestamp, TRUE_VALUES
+
+
+DATADIR = 'containers'
+
+# Create a dummy db_file in /etc/swift
+_unittests_enabled = os.getenv('GLUSTER_UNIT_TEST_ENABLED', 'no')
+if _unittests_enabled in TRUE_VALUES:
+ _tmp_dir = '/tmp/gluster_unit_tests'
+ try:
+ os.mkdir(_tmp_dir)
+ except OSError as e:
+ if e.errno != errno.EEXIST:
+ raise
+ _db_file = os.path.join(_tmp_dir, 'db_file.db')
+else:
+ _db_file = '/etc/swift/db_file.db'
+if not os.path.exists(_db_file):
+ file(_db_file, 'w+')
+
+
+def _read_metadata(dd):
+ """ Filter read metadata so that it always returns a tuple that includes
+ some kind of timestamp. With 1.4.8 of the Swift integration the
+ timestamps were not stored. Here we fabricate timestamps for volumes
+ where the existing data has no timestamp (that is, stored data is not
+ a tuple), allowing us a measure of backward compatibility.
+
+ FIXME: At this time it does not appear that the timestamps on each
+ metadata are used for much, so this should not hurt anything.
+ """
+ metadata_i = read_metadata(dd)
+ metadata = {}
+ timestamp = 0
+ for key, value in metadata_i.iteritems():
+ if not isinstance(value, tuple):
+ value = (value, timestamp)
+ metadata[key] = value
+ return metadata
+
+
+class DiskCommon(object):
+ def is_deleted(self):
+ return not os_path.exists(self.datadir)
+
+ def filter_prefix(self, objects, prefix):
+ """
+ Accept sorted list.
+ """
+ found = 0
+ filtered_objs = []
+ for object_name in objects:
+ if object_name.startswith(prefix):
+ filtered_objs.append(object_name)
+ found = 1
+ else:
+ if found:
+ break
+ return filtered_objs
+
+ def filter_delimiter(self, objects, delimiter, prefix):
+ """
+ Accept sorted list.
+ Objects should start with prefix.
+ """
+ filtered_objs=[]
+ for object_name in objects:
+ tmp_obj = object_name.replace(prefix, '', 1)
+ sufix = tmp_obj.split(delimiter, 1)
+ new_obj = prefix + sufix[0]
+ if new_obj and new_obj not in filtered_objs:
+ filtered_objs.append(new_obj)
+
+ return filtered_objs
+
+ def filter_marker(self, objects, marker):
+ """
+ TODO: We can traverse in reverse order to optimize.
+ Accept sorted list.
+ """
+ filtered_objs=[]
+ found = 0
+ if objects[-1] < marker:
+ return filtered_objs
+ for object_name in objects:
+ if object_name > marker:
+ filtered_objs.append(object_name)
+
+ return filtered_objs
+
+ def filter_end_marker(self, objects, end_marker):
+ """
+ Accept sorted list.
+ """
+ filtered_objs=[]
+ for object_name in objects:
+ if object_name < end_marker:
+ filtered_objs.append(object_name)
+ else:
+ break
+
+ return filtered_objs
+
+ def filter_limit(self, objects, limit):
+ filtered_objs=[]
+ for i in range(0, limit):
+ filtered_objs.append(objects[i])
+
+ return filtered_objs
+
+
+class DiskDir(DiskCommon):
+ """
+ Manage object files on disk.
+
+ :param path: path to devices on the node
+ :param drive: gluster volume drive name
+ :param account: account name for the object
+ :param container: container name for the object
+ :param logger: account or container server logging object
+ :param uid: user ID container object should assume
+ :param gid: group ID container object should assume
+ """
+
+ def __init__(self, path, drive, account, container, logger,
+ uid=DEFAULT_UID, gid=DEFAULT_GID):
+ self.root = path
+ if container:
+ self.container = container
+ else:
+ self.container = None
+ if self.container:
+ self.datadir = os.path.join(path, drive, self.container)
+ else:
+ self.datadir = os.path.join(path, drive)
+ self.account = account
+ assert logger is not None
+ self.logger = logger
+ self.metadata = {}
+ self.container_info = None
+ self.object_info = None
+ self.uid = int(uid)
+ self.gid = int(gid)
+ self.db_file = _db_file
+ self.dir_exists = os_path.exists(self.datadir)
+ if self.dir_exists:
+ try:
+ self.metadata = _read_metadata(self.datadir)
+ except EOFError:
+ create_container_metadata(self.datadir)
+ else:
+ return
+ if self.container:
+ if not self.metadata:
+ create_container_metadata(self.datadir)
+ self.metadata = _read_metadata(self.datadir)
+ else:
+ if not validate_container(self.metadata):
+ create_container_metadata(self.datadir)
+ self.metadata = _read_metadata(self.datadir)
+ else:
+ if not self.metadata:
+ create_account_metadata(self.datadir)
+ self.metadata = _read_metadata(self.datadir)
+ else:
+ if not validate_account(self.metadata):
+ create_account_metadata(self.datadir)
+ self.metadata = _read_metadata(self.datadir)
+
+ def empty(self):
+ return dir_empty(self.datadir)
+
+ def delete(self):
+ if self.empty():
+ #For delete account.
+ if os_path.ismount(self.datadir):
+ clean_metadata(self.datadir)
+ else:
+ rmdirs(self.datadir)
+ self.dir_exists = False
+
+ def put_metadata(self, metadata):
+ """
+ Write metadata to directory/container.
+ """
+ write_metadata(self.datadir, metadata)
+ self.metadata = metadata
+
+ def put(self, metadata):
+ """
+ Create and write metatdata to directory/container.
+ :param metadata: Metadata to write.
+ """
+ if not self.dir_exists:
+ mkdirs(self.datadir)
+
+ os.chown(self.datadir, self.uid, self.gid)
+ write_metadata(self.datadir, metadata)
+ self.metadata = metadata
+ self.dir_exists = True
+
+ def put_obj(self, content_length, timestamp):
+ ocnt = self.metadata[X_OBJECTS_COUNT][0]
+ self.metadata[X_OBJECTS_COUNT] = (int(ocnt) + 1, timestamp)
+ self.metadata[X_PUT_TIMESTAMP] = timestamp
+ bused = self.metadata[X_BYTES_USED][0]
+ self.metadata[X_BYTES_USED] = (int(bused) + int(content_length), timestamp)
+ #TODO: define update_metadata instad of writing whole metadata again.
+ self.put_metadata(self.metadata)
+
+ def delete_obj(self, content_length):
+ ocnt, timestamp = self.metadata[X_OBJECTS_COUNT][0]
+ self.metadata[X_OBJECTS_COUNT] = (int(ocnt) - 1, timestamp)
+ bused, timestamp = self.metadata[X_BYTES_USED]
+ self.metadata[X_BYTES_USED] = (int(bused) - int(content_length), timestamp)
+ self.put_metadata(self.metadata)
+
+ def put_container(self, container, put_timestamp, del_timestamp, object_count, bytes_used):
+ """
+ For account server.
+ """
+ self.metadata[X_OBJECTS_COUNT] = (0, put_timestamp)
+ self.metadata[X_BYTES_USED] = (0, put_timestamp)
+ ccnt = self.metadata[X_CONTAINER_COUNT][0]
+ self.metadata[X_CONTAINER_COUNT] = (int(ccnt) + 1, put_timestamp)
+ self.metadata[X_PUT_TIMESTAMP] = (1, put_timestamp)
+ self.put_metadata(self.metadata)
+
+ def delete_container(self, object_count, bytes_used):
+ """
+ For account server.
+ """
+ self.metadata[X_OBJECTS_COUNT] = (0, 0)
+ self.metadata[X_BYTES_USED] = (0, 0)
+ ccnt, timestamp = self.metadata[X_CONTAINER_COUNT]
+ self.metadata[X_CONTAINER_COUNT] = (int(ccnt) - 1, timestamp)
+ self.put_metadata(self.metadata)
+
+ def unlink(self):
+ """
+ Remove directory/container if empty.
+ """
+ if dir_empty(self.datadir):
+ rmdirs(self.datadir)
+
+ def list_objects_iter(self, limit, marker, end_marker,
+ prefix, delimiter, path):
+ """
+ Returns tuple of name, created_at, size, content_type, etag.
+ """
+ if path:
+ prefix = path = path.rstrip('/') + '/'
+ delimiter = '/'
+ if delimiter and not prefix:
+ prefix = ''
+
+ self.update_object_count()
+
+ objects, object_count, bytes_used = self.object_info
+
+ if objects and prefix:
+ objects = self.filter_prefix(objects, prefix)
+
+ if objects and delimiter:
+ objects = self.filter_delimiter(objects, delimiter, prefix)
+
+ if objects and marker:
+ objects = self.filter_marker(objects, marker)
+
+ if objects and end_marker:
+ objects = self.filter_end_marker(objects, end_marker)
+
+ if objects and limit:
+ if len(objects) > limit:
+ objects = self.filter_limit(objects, limit)
+
+ container_list = []
+ if objects:
+ for obj in objects:
+ list_item = []
+ list_item.append(obj)
+ obj_path = os.path.join(self.datadir, obj)
+ metadata = read_metadata(obj_path)
+ if not metadata or not validate_object(metadata):
+ metadata = create_object_metadata(obj_path)
+ if metadata:
+ list_item.append(metadata[X_TIMESTAMP])
+ list_item.append(int(metadata[X_CONTENT_LENGTH]))
+ list_item.append(metadata[X_CONTENT_TYPE])
+ list_item.append(metadata[X_ETAG])
+ container_list.append(list_item)
+
+ return container_list
+
+ def update_object_count(self):
+ if not self.object_info:
+ self.object_info = get_container_details(self.datadir)
+
+ objects, object_count, bytes_used = self.object_info
+
+ if X_OBJECTS_COUNT not in self.metadata \
+ or int(self.metadata[X_OBJECTS_COUNT][0]) != object_count \
+ or X_BYTES_USED not in self.metadata \
+ or int(self.metadata[X_BYTES_USED][0]) != bytes_used:
+ self.metadata[X_OBJECTS_COUNT] = (object_count, 0)
+ self.metadata[X_BYTES_USED] = (bytes_used, 0)
+ write_metadata(self.datadir, self.metadata)
+
+ def update_container_count(self):
+ if not self.container_info:
+ self.container_info = get_account_details(self.datadir)
+
+ containers, container_count = self.container_info
+
+ if X_CONTAINER_COUNT not in self.metadata \
+ or int(self.metadata[X_CONTAINER_COUNT][0]) != container_count:
+ self.metadata[X_CONTAINER_COUNT] = (container_count, 0)
+ write_metadata(self.datadir, self.metadata)
+
+ def get_info(self, include_metadata=False):
+ """
+ Get global data for the container.
+ :returns: dict with keys: account, container, object_count, bytes_used,
+ hash, id, created_at, put_timestamp, delete_timestamp,
+ reported_put_timestamp, reported_delete_timestamp,
+ reported_object_count, and reported_bytes_used.
+ If include_metadata is set, metadata is included as a key
+ pointing to a dict of tuples of the metadata
+ """
+ # TODO: delete_timestamp, reported_put_timestamp
+ # reported_delete_timestamp, reported_object_count,
+ # reported_bytes_used, created_at
+ if not Glusterfs.OBJECT_ONLY:
+ # If we are not configured for object only environments, we should
+ # update the object counts in case they changed behind our back.
+ self.update_object_count()
+
+ data = {'account' : self.account, 'container' : self.container,
+ 'object_count' : self.metadata.get(X_OBJECTS_COUNT, ('0', 0))[0],
+ 'bytes_used' : self.metadata.get(X_BYTES_USED, ('0',0))[0],
+ 'hash': '', 'id' : '', 'created_at' : '1',
+ 'put_timestamp' : self.metadata.get(X_PUT_TIMESTAMP, ('0',0))[0],
+ 'delete_timestamp' : '1',
+ 'reported_put_timestamp' : '1', 'reported_delete_timestamp' : '1',
+ 'reported_object_count' : '1', 'reported_bytes_used' : '1'}
+ if include_metadata:
+ data['metadata'] = self.metadata
+ return data
+
+ def put_object(self, name, timestamp, size, content_type,
+ etag, deleted=0):
+ # TODO: Implement the specifics of this func.
+ pass
+
+ def initialize(self, timestamp):
+ pass
+
+ def update_put_timestamp(self, timestamp):
+ """
+ Create the container if it doesn't exist and update the timestamp
+ """
+ if not os_path.exists(self.datadir):
+ self.put(self.metadata)
+
+ def delete_object(self, name, timestamp):
+ # TODO: Implement the delete object
+ pass
+
+ def delete_db(self, timestamp):
+ """
+ Delete the container
+ """
+ self.unlink()
+
+ def update_metadata(self, metadata):
+ assert self.metadata, "Valid container/account metadata should have been created by now"
+ if metadata:
+ new_metadata = self.metadata.copy()
+ new_metadata.update(metadata)
+ if new_metadata != self.metadata:
+ write_metadata(self.datadir, new_metadata)
+ self.metadata = new_metadata
+
+
+class DiskAccount(DiskDir):
+ def __init__(self, root, drive, account, logger):
+ super(DiskAccount, self).__init__(root, drive, account, None, logger)
+ assert self.dir_exists
+
+ def list_containers_iter(self, limit, marker, end_marker,
+ prefix, delimiter):
+ """
+ Return tuple of name, object_count, bytes_used, 0(is_subdir).
+ Used by account server.
+ """
+ if delimiter and not prefix:
+ prefix = ''
+
+ self.update_container_count()
+
+ containers, container_count = self.container_info
+
+ if containers:
+ containers.sort()
+
+ if containers and prefix:
+ containers = self.filter_prefix(containers, prefix)
+
+ if containers and delimiter:
+ containers = self.filter_delimiter(containers, delimiter, prefix)
+
+ if containers and marker:
+ containers = self.filter_marker(containers, marker)
+
+ if containers and end_marker:
+ containers = self.filter_end_marker(containers, end_marker)
+
+ if containers and limit:
+ if len(containers) > limit:
+ containers = self.filter_limit(containers, limit)
+
+ account_list = []
+ if containers:
+ for cont in containers:
+ list_item = []
+ metadata = None
+ list_item.append(cont)
+ cont_path = os.path.join(self.datadir, cont)
+ metadata = _read_metadata(cont_path)
+ if not metadata or not validate_container(metadata):
+ metadata = create_container_metadata(cont_path)
+
+ if metadata:
+ list_item.append(metadata[X_OBJECTS_COUNT][0])
+ list_item.append(metadata[X_BYTES_USED][0])
+ list_item.append(0)
+ account_list.append(list_item)
+
+ return account_list
+
+ def get_info(self, include_metadata=False):
+ """
+ Get global data for the account.
+ :returns: dict with keys: account, created_at, put_timestamp,
+ delete_timestamp, container_count, object_count,
+ bytes_used, hash, id
+ """
+ if not Glusterfs.OBJECT_ONLY:
+ # If we are not configured for object only environments, we should
+ # update the container counts in case they changed behind our back.
+ self.update_container_count()
+
+ data = {'account' : self.account, 'created_at' : '1',
+ 'put_timestamp' : '1', 'delete_timestamp' : '1',
+ 'container_count' : self.metadata.get(X_CONTAINER_COUNT, (0,0))[0],
+ 'object_count' : self.metadata.get(X_OBJECTS_COUNT, (0,0))[0],
+ 'bytes_used' : self.metadata.get(X_BYTES_USED, (0,0))[0],
+ 'hash' : '', 'id' : ''}
+
+ if include_metadata:
+ data['metadata'] = self.metadata
+ return data
+
+ def get_container_timestamp(self, container):
+ cont_path = os.path.join(self.datadir, container)
+ metadata = read_metadata(cont_path)
+
+ return int(metadata.get(X_PUT_TIMESTAMP, ('0',0))[0]) or None
diff --git a/ufo/gluster/swift/common/DiskFile.py b/ufo/gluster/swift/common/DiskFile.py
new file mode 100644
index 0000000..900bd49
--- /dev/null
+++ b/ufo/gluster/swift/common/DiskFile.py
@@ -0,0 +1,338 @@
+# Copyright (c) 2012 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import errno
+import random
+from hashlib import md5
+from contextlib import contextmanager
+from swift.common.utils import normalize_timestamp, renamer
+from swift.common.exceptions import DiskFileNotExist
+from gluster.swift.common.exceptions import AlreadyExistsAsDir
+from gluster.swift.common.utils import mkdirs, rmdirs, validate_object, \
+ create_object_metadata, do_open, do_close, do_unlink, do_chown, \
+ do_listdir, read_metadata, write_metadata, os_path, do_fsync
+from gluster.swift.common.utils import X_CONTENT_TYPE, X_CONTENT_LENGTH, \
+ X_TIMESTAMP, X_PUT_TIMESTAMP, X_TYPE, X_ETAG, X_OBJECTS_COUNT, \
+ X_BYTES_USED, X_OBJECT_TYPE, FILE, DIR, MARKER_DIR, OBJECT, DIR_TYPE, \
+ FILE_TYPE, DEFAULT_UID, DEFAULT_GID
+
+import logging
+from swift.obj.server import DiskFile
+
+
+DEFAULT_DISK_CHUNK_SIZE = 65536
+# keep these lower-case
+DISALLOWED_HEADERS = set('content-length content-type deleted etag'.split())
+
+
+def _adjust_metadata(metadata):
+ # Fix up the metadata to ensure it has a proper value for the
+ # Content-Type metadata, as well as an X_TYPE and X_OBJECT_TYPE
+ # metadata values.
+ content_type = metadata['Content-Type']
+ if not content_type:
+ # FIXME: How can this be that our caller supplied us with metadata
+ # that has a content type that evaluates to False?
+ #
+ # FIXME: If the file exists, we would already know it is a
+ # directory. So why are we assuming it is a file object?
+ metadata['Content-Type'] = FILE_TYPE
+ x_object_type = FILE
+ else:
+ x_object_type = MARKER_DIR if content_type.lower() == DIR_TYPE else FILE
+ metadata[X_TYPE] = OBJECT
+ metadata[X_OBJECT_TYPE] = x_object_type
+ return metadata
+
+
+class Gluster_DiskFile(DiskFile):
+ """
+ Manage object files on disk.
+
+ :param path: path to devices on the node/mount path for UFO.
+ :param device: device name/account_name for UFO.
+ :param partition: partition on the device the object lives in
+ :param account: account name for the object
+ :param container: container name for the object
+ :param obj: object name for the object
+ :param logger: logger object for writing out log file messages
+ :param keep_data_fp: if True, don't close the fp, otherwise close it
+ :param disk_chunk_Size: size of chunks on file reads
+ :param uid: user ID disk object should assume (file or directory)
+ :param gid: group ID disk object should assume (file or directory)
+ """
+
+ def __init__(self, path, device, partition, account, container, obj,
+ logger, keep_data_fp=False,
+ disk_chunk_size=DEFAULT_DISK_CHUNK_SIZE,
+ uid=DEFAULT_UID, gid=DEFAULT_GID, iter_hook=None):
+ self.disk_chunk_size = disk_chunk_size
+ self.iter_hook = iter_hook
+ # Don't support obj_name ending/begining with '/', like /a, a/, /a/b/,
+ # etc.
+ obj = obj.strip(os.path.sep)
+ if os.path.sep in obj:
+ self._obj_path, self._obj = os.path.split(obj)
+ else:
+ self._obj_path = ''
+ self._obj = obj
+
+ if self._obj_path:
+ self.name = os.path.join(container, self._obj_path)
+ else:
+ self.name = container
+ # Absolute path for object directory.
+ self.datadir = os.path.join(path, device, self.name)
+ self.device_path = os.path.join(path, device)
+ self._container_path = os.path.join(path, device, container)
+ self._is_dir = False
+ self.tmppath = None
+ self.logger = logger
+ self.metadata = {}
+ self.meta_file = None
+ self.fp = None
+ self.iter_etag = None
+ self.started_at_0 = False
+ self.read_to_eof = False
+ self.quarantined_dir = None
+ self.keep_cache = False
+ self.uid = int(uid)
+ self.gid = int(gid)
+
+ # Don't store a value for data_file until we know it exists.
+ self.data_file = None
+ data_file = os.path.join(self.datadir, self._obj)
+ if not os_path.exists(data_file):
+ return
+
+ self.data_file = os.path.join(data_file)
+ self.metadata = read_metadata(data_file)
+ if not self.metadata:
+ create_object_metadata(data_file)
+ self.metadata = read_metadata(data_file)
+
+ if not validate_object(self.metadata):
+ create_object_metadata(data_file)
+ self.metadata = read_metadata(data_file)
+
+ self.filter_metadata()
+
+ if os_path.isdir(data_file):
+ self._is_dir = True
+ else:
+ if keep_data_fp:
+ # The caller has an assumption that the "fp" field of this
+ # object is an file object if keep_data_fp is set. However,
+ # this implementation of the DiskFile object does not need to
+ # open the file for internal operations. So if the caller
+ # requests it, we'll just open the file for them.
+ self.fp = do_open(data_file, 'rb')
+
+ def close(self, verify_file=True):
+ """
+ Close the file. Will handle quarantining file if necessary.
+
+ :param verify_file: Defaults to True. If false, will not check
+ file to see if it needs quarantining.
+ """
+ #Marker directory
+ if self._is_dir:
+ return
+ if self.fp:
+ do_close(self.fp)
+ self.fp = None
+
+ def is_deleted(self):
+ """
+ Check if the file is deleted.
+
+ :returns: True if the file doesn't exist or has been flagged as
+ deleted.
+ """
+ return not self.data_file
+
+ def _create_dir_object(self, dir_path):
+ #TODO: if object already exists???
+ if os_path.exists(dir_path) and not os_path.isdir(dir_path):
+ self.logger.error("Deleting file %s", dir_path)
+ do_unlink(dir_path)
+ #If dir aleady exist just override metadata.
+ mkdirs(dir_path)
+ do_chown(dir_path, self.uid, self.gid)
+ create_object_metadata(dir_path)
+
+ def put_metadata(self, metadata, tombstone=False):
+ """
+ Short hand for putting metadata to .meta and .ts files.
+
+ :param metadata: dictionary of metadata to be written
+ :param tombstone: whether or not we are writing a tombstone
+ """
+ if tombstone:
+ # We don't write tombstone files. So do nothing.
+ return
+ assert self.data_file is not None, "put_metadata: no file to put metadata into"
+ metadata = _adjust_metadata(metadata)
+ write_metadata(self.data_file, metadata)
+ self.metadata = metadata
+ self.filter_metadata()
+
+ def put(self, fd, metadata, extension='.data'):
+ """
+ Finalize writing the file on disk, and renames it from the temp file to
+ the real location. This should be called after the data has been
+ written to the temp file.
+
+ :param fd: file descriptor of the temp file
+ :param metadata: dictionary of metadata to be written
+ :param extension: extension to be used when making the file
+ """
+ # Our caller will use '.data' here; we just ignore it since we map the
+ # URL directly to the file system.
+ extension = ''
+
+ metadata = _adjust_metadata(metadata)
+
+ if metadata[X_OBJECT_TYPE] == MARKER_DIR:
+ if not self.data_file:
+ self.data_file = os.path.join(self.datadir, self._obj)
+ self._create_dir_object(self.data_file)
+ self.put_metadata(metadata)
+ return
+
+ # Check if directory already exists.
+ if self._is_dir:
+ # FIXME: How can we have a directory and it not be marked as a
+ # MARKER_DIR (see above)?
+ msg = 'File object exists as a directory: %s' % self.data_file
+ raise AlreadyExistsAsDir(msg)
+
+ timestamp = normalize_timestamp(metadata[X_TIMESTAMP])
+ write_metadata(self.tmppath, metadata)
+ if X_CONTENT_LENGTH in metadata:
+ self.drop_cache(fd, 0, int(metadata[X_CONTENT_LENGTH]))
+ do_fsync(fd)
+ if self._obj_path:
+ dir_objs = self._obj_path.split('/')
+ assert len(dir_objs) >= 1
+ tmp_path = self._container_path
+ for dir_name in dir_objs:
+ tmp_path = os.path.join(tmp_path, dir_name)
+ self._create_dir_object(tmp_path)
+
+ newpath = os.path.join(self.datadir, self._obj)
+ renamer(self.tmppath, newpath)
+ do_chown(newpath, self.uid, self.gid)
+ self.metadata = metadata
+ self.data_file = newpath
+ self.filter_metadata()
+ return
+
+ def unlinkold(self, timestamp):
+ """
+ Remove any older versions of the object file. Any file that has an
+ older timestamp than timestamp will be deleted.
+
+ :param timestamp: timestamp to compare with each file
+ """
+ if not self.metadata or self.metadata['X-Timestamp'] >= timestamp:
+ return
+
+ assert self.data_file, \
+ "Have metadata, %r, but no data_file" % self.metadata
+
+ if self._is_dir:
+ # Marker directory object
+ if not rmdirs(self.data_file):
+ logging.error('Unable to delete dir object: %s', self.data_file)
+ return
+ else:
+ # File object
+ do_unlink(self.data_file)
+
+ self.metadata = {}
+ self.data_file = None
+
+ def get_data_file_size(self):
+ """
+ Returns the os_path.getsize for the file. Raises an exception if this
+ file does not match the Content-Length stored in the metadata. Or if
+ self.data_file does not exist.
+
+ :returns: file size as an int
+ :raises DiskFileError: on file size mismatch.
+ :raises DiskFileNotExist: on file not existing (including deleted)
+ """
+ #Marker directory.
+ if self._is_dir:
+ return 0
+ try:
+ file_size = 0
+ if self.data_file:
+ file_size = os_path.getsize(self.data_file)
+ if X_CONTENT_LENGTH in self.metadata:
+ metadata_size = int(self.metadata[X_CONTENT_LENGTH])
+ if file_size != metadata_size:
+ self.metadata[X_CONTENT_LENGTH] = file_size
+ write_metadata(self.data_file, self.metadata)
+
+ return file_size
+ except OSError as err:
+ if err.errno != errno.ENOENT:
+ raise
+ raise DiskFileNotExist('Data File does not exist.')
+
+ def filter_metadata(self):
+ if X_TYPE in self.metadata:
+ self.metadata.pop(X_TYPE)
+ if X_OBJECT_TYPE in self.metadata:
+ self.metadata.pop(X_OBJECT_TYPE)
+
+ @contextmanager
+ def mkstemp(self):
+ """Contextmanager to make a temporary file."""
+
+ # Creating intermidiate directories and corresponding metadata.
+ # For optimization, check if the subdirectory already exists,
+ # if exists, then it means that it also has its metadata.
+ # Not checking for container, since the container should already
+ # exist for the call to come here.
+ if not os_path.exists(self.datadir):
+ path = self._container_path
+ subdir_list = self._obj_path.split(os.path.sep)
+ for i in range(len(subdir_list)):
+ path = os.path.join(path, subdir_list[i]);
+ if not os_path.exists(path):
+ self._create_dir_object(path)
+
+ tmpfile = '.' + self._obj + '.' + md5(self._obj + \
+ str(random.random())).hexdigest()
+
+ self.tmppath = os.path.join(self.datadir, tmpfile)
+ fd = do_open(self.tmppath, os.O_RDWR | os.O_CREAT | os.O_EXCL)
+ try:
+ yield fd
+ finally:
+ try:
+ do_close(fd)
+ except OSError:
+ pass
+ tmppath, self.tmppath = self.tmppath, None
+ try:
+ do_unlink(tmppath)
+ except OSError as err:
+ if err.errno != errno.ENOENT:
+ raise
diff --git a/ufo/gluster/swift/common/Glusterfs.py b/ufo/gluster/swift/common/Glusterfs.py
new file mode 100644
index 0000000..a0c8126
--- /dev/null
+++ b/ufo/gluster/swift/common/Glusterfs.py
@@ -0,0 +1,144 @@
+# Copyright (c) 2012 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+import os, sys, fcntl, time, errno
+from ConfigParser import ConfigParser, NoSectionError, NoOptionError
+from swift.common.utils import TRUE_VALUES, search_tree
+from gluster.swift.common.fs_utils import mkdirs
+
+#
+# Read the fs.conf file once at startup (module load)
+#
+_fs_conf = ConfigParser()
+MOUNT_IP = 'localhost'
+OBJECT_ONLY = False
+RUN_DIR='/var/run/swift'
+SWIFT_DIR = '/etc/swift'
+_do_getsize = False
+if _fs_conf.read(os.path.join('/etc/swift', 'fs.conf')):
+ try:
+ MOUNT_IP = _fs_conf.get('DEFAULT', 'mount_ip', 'localhost')
+ except (NoSectionError, NoOptionError):
+ pass
+ try:
+ OBJECT_ONLY = _fs_conf.get('DEFAULT', 'object_only', "no") in TRUE_VALUES
+ except (NoSectionError, NoOptionError):
+ pass
+ try:
+ RUN_DIR = _fs_conf.get('DEFAULT', 'run_dir', '/var/run/swift')
+ except (NoSectionError, NoOptionError):
+ pass
+
+ try:
+ _do_getsize = _fs_conf.get('DEFAULT', 'accurate_size_in_listing', \
+ "no") in TRUE_VALUES
+ except (NoSectionError, NoOptionError):
+ pass
+
+NAME = 'glusterfs'
+
+
+def _busy_wait(full_mount_path):
+ # Iterate for definite number of time over a given
+ # interval for successful mount
+ for i in range(0, 5):
+ if os.path.ismount(os.path.join(full_mount_path)):
+ return True
+ time.sleep(2)
+ logging.error('Busy wait for mount timed out for mount %s', full_mount_path)
+ return False
+
+def mount(root, drive):
+ # FIXME: Possible thundering herd problem here
+
+ el = _get_export_list()
+ for export in el:
+ if drive == export:
+ break
+ else:
+ logging.error('No export found in %r matching drive, %s', el, drive)
+ return False
+
+ # NOTE: root is typically the default value of /mnt/gluster-object
+ full_mount_path = os.path.join(root, drive)
+ if not os.path.isdir(full_mount_path):
+ mkdirs(full_mount_path)
+
+ lck_file = os.path.join(RUN_DIR, '%s.lock' %drive);
+
+ if not os.path.exists(RUN_DIR):
+ mkdirs(RUN_DIR)
+
+ fd = os.open(lck_file, os.O_CREAT|os.O_RDWR)
+ with os.fdopen(fd, 'r+b') as f:
+ try:
+ fcntl.lockf(f, fcntl.LOCK_EX|fcntl.LOCK_NB)
+ except IOError as ex:
+ if ex.errno in (errno.EACCES, errno.EAGAIN):
+ # This means that some other process is mounting the
+ # filesystem, so wait for the mount process to complete
+ return _busy_wait(full_mount_path)
+ else:
+ raise ex
+ mnt_cmd = 'mount -t glusterfs %s:%s %s' % (MOUNT_IP, export, \
+ full_mount_path)
+ if os.system(mnt_cmd) or not _busy_wait(full_mount_path):
+ logging.error('Mount failed %s: %s', NAME, mnt_cmd)
+ return False
+ return True
+
+def unmount(full_mount_path):
+ # FIXME: Possible thundering herd problem here
+
+ umnt_cmd = 'umount %s 2>> /dev/null' % full_mount_path
+ if os.system(umnt_cmd):
+ logging.error('Unable to unmount %s %s' % (full_mount_path, NAME))
+
+def _get_export_list():
+ cmnd = 'gluster --remote-host=%s volume info' % MOUNT_IP
+
+ export_list = []
+
+ if os.system(cmnd + ' >> /dev/null'):
+ logging.error('Getting volume info failed for %s', NAME)
+ else:
+ fp = os.popen(cmnd)
+ while True:
+ item = fp.readline()
+ if not item:
+ break
+ item = item.strip('\n').strip(' ')
+ if item.lower().startswith('volume name:'):
+ export_list.append(item.split(':')[1].strip(' '))
+
+ return export_list
+
+def get_mnt_point(vol_name, conf_dir=SWIFT_DIR, conf_file="object-server*"):
+ """Read the object-server's configuration file and return
+ the device value"""
+
+ mnt_dir = ''
+ conf_files = search_tree(conf_dir, conf_file, '.conf')
+ if not conf_files:
+ raise Exception("Config file not found")
+
+ _conf = ConfigParser()
+ if _conf.read(conf_files[0]):
+ try:
+ mnt_dir = _conf.get('DEFAULT', 'devices', '')
+ except (NoSectionError, NoOptionError):
+ raise
+ return os.path.join(mnt_dir, vol_name)
diff --git a/ufo/gluster/swift/common/__init__.py b/ufo/gluster/swift/common/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/ufo/gluster/swift/common/__init__.py
diff --git a/ufo/gluster/swift/common/constraints.py b/ufo/gluster/swift/common/constraints.py
new file mode 100644
index 0000000..11f626b
--- /dev/null
+++ b/ufo/gluster/swift/common/constraints.py
@@ -0,0 +1,94 @@
+# Copyright (c) 2012 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+try:
+ from webob.exc import HTTPBadRequest
+except ImportError:
+ from swift.common.swob import HTTPBadRequest
+import swift.common.constraints
+import swift.common.ring as _ring
+from gluster.swift.common import Glusterfs, ring
+
+if hasattr(swift.common.constraints, 'constraints_conf_int'):
+ MAX_OBJECT_NAME_COMPONENT_LENGTH = \
+ swift.common.constraints.constraints_conf_int(
+ 'max_object_name_component_length', 255)
+else:
+ MAX_OBJECT_NAME_COMPONENT_LENGTH = 255
+
+def validate_obj_name_component(obj):
+ if len(obj) > MAX_OBJECT_NAME_COMPONENT_LENGTH:
+ return 'too long (%d)' % len(obj)
+ if obj == '.' or obj == '..':
+ return 'cannot be . or ..'
+ return ''
+
+# Save the original check object creation
+__check_object_creation = swift.common.constraints.check_object_creation
+
+# Define our new one which invokes the original
+def gluster_check_object_creation(req, object_name):
+ """
+ Check to ensure that everything is alright about an object to be created.
+ Monkey patches swift.common.constraints.check_object_creation, invoking
+ the original, and then adding an additional check for individual object
+ name components.
+
+ :param req: HTTP request object
+ :param object_name: name of object to be created
+ :raises HTTPRequestEntityTooLarge: the object is too large
+ :raises HTTPLengthRequered: missing content-length header and not
+ a chunked request
+ :raises HTTPBadRequest: missing or bad content-type header, or
+ bad metadata
+ """
+ ret = __check_object_creation(req, object_name)
+
+ if ret is None:
+ for obj in object_name.split('/'):
+ reason = validate_obj_name_component(obj)
+ if reason:
+ bdy = 'Invalid object name "%s", component "%s" %s' \
+ % (object_name, obj, reason)
+ ret = HTTPBadRequest(body=bdy,
+ request=req,
+ content_type='text/plain')
+
+ return ret
+
+# Replace the original check object creation with ours
+swift.common.constraints.check_object_creation = gluster_check_object_creation
+
+# Save the original check mount
+__check_mount = swift.common.constraints.check_mount
+
+# Define our new one which invokes the original
+def gluster_check_mount(root, drive):
+ # FIXME: Potential performance optimization here to not call the original
+ # check mount which makes two stat calls. We could do what they do with
+ # just one.
+ if __check_mount(root, drive):
+ return True
+
+ return Glusterfs.mount(root, drive)
+
+# Replace the original check mount with ours
+swift.common.constraints.check_mount = gluster_check_mount
+
+# Save the original Ring class
+__Ring = _ring.Ring
+
+# Replace the original Ring class
+_ring.Ring = ring.Ring
diff --git a/ufo/gluster/swift/common/exceptions.py b/ufo/gluster/swift/common/exceptions.py
new file mode 100644
index 0000000..d9357db
--- /dev/null
+++ b/ufo/gluster/swift/common/exceptions.py
@@ -0,0 +1,27 @@
+# Copyright (c) 2012 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+class GlusterfsException(Exception):
+ pass
+
+class FileOrDirNotFoundError(GlusterfsException):
+ pass
+
+class NotDirectoryError(GlusterfsException):
+ pass
+
+class AlreadyExistsAsDir(GlusterfsException):
+ pass
+
diff --git a/ufo/gluster/swift/common/fs_utils.py b/ufo/gluster/swift/common/fs_utils.py
new file mode 100644
index 0000000..0613a26
--- /dev/null
+++ b/ufo/gluster/swift/common/fs_utils.py
@@ -0,0 +1,179 @@
+# Copyright (c) 2012 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+import os
+import errno
+import os.path as os_path
+from eventlet import tpool
+from gluster.swift.common.exceptions import FileOrDirNotFoundError, \
+ NotDirectoryError
+
+def do_walk(*args, **kwargs):
+ return os.walk(*args, **kwargs)
+
+def do_write(fd, msg):
+ try:
+ cnt = os.write(fd, msg)
+ except OSError as err:
+ logging.exception("Write failed, err: %s", str(err))
+ raise
+ return cnt
+
+def do_mkdir(path):
+ try:
+ os.mkdir(path)
+ except OSError as err:
+ if err.errno != errno.EEXIST:
+ logging.exception("Mkdir failed on %s err: %s", path, err.strerror)
+ raise
+ return True
+
+def do_makedirs(path):
+ try:
+ os.makedirs(path)
+ except OSError as err:
+ if err.errno != errno.EEXIST:
+ logging.exception("Makedirs failed on %s err: %s", path, err.strerror)
+ raise
+ return True
+
+def do_listdir(path):
+ try:
+ buf = os.listdir(path)
+ except OSError as err:
+ logging.exception("Listdir failed on %s err: %s", path, err.strerror)
+ raise
+ return buf
+
+def do_chown(path, uid, gid):
+ try:
+ os.chown(path, uid, gid)
+ except OSError as err:
+ logging.exception("Chown failed on %s err: %s", path, err.strerror)
+ raise
+ return True
+
+def do_stat(path):
+ try:
+ #Check for fd.
+ if isinstance(path, int):
+ buf = os.fstat(path)
+ else:
+ buf = os.stat(path)
+ except OSError as err:
+ logging.exception("Stat failed on %s err: %s", path, err.strerror)
+ raise
+ return buf
+
+def do_open(path, mode):
+ if isinstance(mode, int):
+ try:
+ fd = os.open(path, mode)
+ except OSError as err:
+ logging.exception("Open failed on %s err: %s", path, str(err))
+ raise
+ else:
+ try:
+ fd = open(path, mode)
+ except IOError as err:
+ logging.exception("Open failed on %s err: %s", path, str(err))
+ raise
+ return fd
+
+def do_close(fd):
+ #fd could be file or int type.
+ try:
+ if isinstance(fd, int):
+ os.close(fd)
+ else:
+ fd.close()
+ except OSError as err:
+ logging.exception("Close failed on %s err: %s", fd, err.strerror)
+ raise
+ return True
+
+def do_unlink(path, log = True):
+ try:
+ os.unlink(path)
+ except OSError as err:
+ if err.errno != errno.ENOENT:
+ if log:
+ logging.exception("Unlink failed on %s err: %s", path, err.strerror)
+ raise
+ return True
+
+def do_rmdir(path):
+ try:
+ os.rmdir(path)
+ except OSError as err:
+ if err.errno != errno.ENOENT:
+ logging.exception("Rmdir failed on %s err: %s", path, err.strerror)
+ raise
+ res = False
+ else:
+ res = True
+ return res
+
+def do_rename(old_path, new_path):
+ try:
+ os.rename(old_path, new_path)
+ except OSError as err:
+ logging.exception("Rename failed on %s to %s err: %s", old_path, new_path, \
+ err.strerror)
+ raise
+ return True
+
+def mkdirs(path):
+ """
+ Ensures the path is a directory or makes it if not. Errors if the path
+ exists but is a file or on permissions failure.
+
+ :param path: path to create
+ """
+ if not os.path.isdir(path):
+ do_makedirs(path)
+
+def dir_empty(path):
+ """
+ Return true if directory/container is empty.
+ :param path: Directory path.
+ :returns: True/False.
+ """
+ if os.path.isdir(path):
+ files = do_listdir(path)
+ return not files
+ elif not os.path.exists(path):
+ raise FileOrDirNotFoundError()
+ raise NotDirectoryError()
+
+def rmdirs(path):
+ if not os.path.isdir(path):
+ return False
+ try:
+ os.rmdir(path)
+ except OSError as err:
+ if err.errno != errno.ENOENT:
+ logging.error("rmdirs failed on %s, err: %s", path, err.strerror)
+ return False
+ return True
+
+def do_fsync(fd):
+ try:
+ tpool.execute(os.fsync, fd)
+ except OSError as err:
+ logging.exception("fsync failed with err: %s", err.strerror)
+ raise
+ return True
diff --git a/ufo/gluster/swift/common/middleware/__init__.py b/ufo/gluster/swift/common/middleware/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/ufo/gluster/swift/common/middleware/__init__.py
diff --git a/ufo/gluster/swift/common/middleware/gluster.py b/ufo/gluster/swift/common/middleware/gluster.py
new file mode 100644
index 0000000..ab63c51
--- /dev/null
+++ b/ufo/gluster/swift/common/middleware/gluster.py
@@ -0,0 +1,40 @@
+# Copyright (c) 2012 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Noop Middleware that simply allows us to monkey patch the constraints
+import gluster.swift.common.constraints
+
+class Gluster(object):
+ """
+ Noop middleware for use with the proxy server to get paste.deploy to load
+ this middleware such that the plugin constraints monkey patch the common
+ constraints ahead of their use.
+ """
+ def __init__(self, app, conf):
+ self.app = app
+ self.conf = conf
+
+ def __call__(self, env, start_response):
+ return self.app(env, start_response)
+
+
+def filter_factory(global_conf, **local_conf):
+ """Returns a WSGI filter app for use with paste.deploy."""
+ conf = global_conf.copy()
+ conf.update(local_conf)
+
+ def gluster_filter(app):
+ return Gluster(app, conf)
+ return gluster_filter
diff --git a/ufo/gluster/swift/common/ring.py b/ufo/gluster/swift/common/ring.py
new file mode 100644
index 0000000..06aab8d
--- /dev/null
+++ b/ufo/gluster/swift/common/ring.py
@@ -0,0 +1,111 @@
+# Copyright (c) 2013 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from ConfigParser import ConfigParser
+from swift.common.ring import ring
+from swift.common.utils import search_tree
+from gluster.swift.common.Glusterfs import SWIFT_DIR
+
+reseller_prefix = "AUTH_"
+conf_files = search_tree(SWIFT_DIR, "proxy-server*", 'conf')
+if conf_files:
+ conf_file = conf_files[0]
+
+_conf = ConfigParser()
+if conf_files and _conf.read(conf_file):
+ if _conf.defaults().get("reseller_prefix", None):
+ reseller_prefix = _conf.defaults().get("reseller_prefix")
+ else:
+ for key, value in _conf._sections.items():
+ if value.get("reseller_prefix", None):
+ reseller_prefix = value["reseller_prefix"]
+ break
+
+if not reseller_prefix.endswith('_'):
+ reseller_prefix = reseller_prefix + '_'
+
+class Ring(ring.Ring):
+ def _get_part_nodes(self, part):
+ seen_ids = set()
+ nodes = [dev for dev in self._devs \
+ if dev['device'] == self.acc_name \
+ and not (dev['id'] in seen_ids \
+ or seen_ids.add(dev['id']))]
+ if not nodes:
+ nodes = [self.false_node]
+ return nodes
+
+ def get_part_nodes(self, part):
+ """
+ Get the nodes that are responsible for the partition. If one
+ node is responsible for more than one replica of the same
+ partition, it will only appear in the output once.
+
+ :param part: partition to get nodes for
+ :returns: list of node dicts
+
+ See :func:`get_nodes` for a description of the node dicts.
+ """
+ return self._get_part_nodes(part)
+
+ def get_nodes(self, account, container=None, obj=None):
+ """
+ Get the partition and nodes for an account/container/object.
+ If a node is responsible for more than one replica, it will
+ only appear in the output once.
+ :param account: account name
+ :param container: container name
+ :param obj: object name
+ :returns: a tuple of (partition, list of node dicts)
+
+ Each node dict will have at least the following keys:
+ ====== ===============================================================
+ id unique integer identifier amongst devices
+ weight a float of the relative weight of this device as compared to
+ others; this indicates how many partitions the builder will try
+ to assign to this device
+ zone integer indicating which zone the device is in; a given
+ partition will not be assigned to multiple devices within the
+ same zone
+ ip the ip address of the device
+ port the tcp port of the device
+ device the device's name on disk (sdb1, for example)
+ meta general use 'extra' field; for example: the online date, the
+ hardware description
+ ====== ===============================================================
+ """
+ self.false_node = {'zone': 1, 'weight': 100.0, 'ip': '127.0.0.1', 'id': 0, \
+ 'meta': '', 'device': 'volume_not_in_ring', \
+ 'port': 6012}
+ if account.startswith(reseller_prefix):
+ self.acc_name = account.replace(reseller_prefix, '', 1)
+ else:
+ self.acc_name = account
+
+ part = 0
+ return part, self._get_part_nodes(part)
+
+
+ def get_more_nodes(self, part):
+ """
+ Generator to get extra nodes for a partition for hinted handoff.
+
+ :param part: partition to get handoff nodes for
+ :returns: generator of node dicts
+
+ See :func:`get_nodes` for a description of the node dicts.
+ Should never be called in the swift UFO environment, so yield nothing
+ """
+ yield self.false_node
diff --git a/ufo/gluster/swift/common/utils.py b/ufo/gluster/swift/common/utils.py
new file mode 100644
index 0000000..f2cd8de
--- /dev/null
+++ b/ufo/gluster/swift/common/utils.py
@@ -0,0 +1,496 @@
+# Copyright (c) 2012 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+import os
+import errno
+import xattr
+import random
+from hashlib import md5
+from eventlet import sleep
+import cPickle as pickle
+from ConfigParser import ConfigParser, NoSectionError, NoOptionError
+from swift.common.utils import normalize_timestamp, TRUE_VALUES
+from gluster.swift.common.fs_utils import *
+from gluster.swift.common import Glusterfs
+
+X_CONTENT_TYPE = 'Content-Type'
+X_CONTENT_LENGTH = 'Content-Length'
+X_TIMESTAMP = 'X-Timestamp'
+X_PUT_TIMESTAMP = 'X-PUT-Timestamp'
+X_TYPE = 'X-Type'
+X_ETAG = 'ETag'
+X_OBJECTS_COUNT = 'X-Object-Count'
+X_BYTES_USED = 'X-Bytes-Used'
+X_CONTAINER_COUNT = 'X-Container-Count'
+X_OBJECT_TYPE = 'X-Object-Type'
+DIR_TYPE = 'application/directory'
+ACCOUNT = 'Account'
+METADATA_KEY = 'user.swift.metadata'
+MAX_XATTR_SIZE = 65536
+CONTAINER = 'container'
+DIR = 'dir'
+MARKER_DIR = 'marker_dir'
+TEMP_DIR = 'tmp'
+ASYNCDIR = 'async_pending' # Keep in sync with swift.obj.server.ASYNCDIR
+FILE = 'file'
+FILE_TYPE = 'application/octet-stream'
+OBJECT = 'Object'
+OBJECT_TYPE = 'application/octet-stream'
+DEFAULT_UID = -1
+DEFAULT_GID = -1
+PICKLE_PROTOCOL = 2
+CHUNK_SIZE = 65536
+MEMCACHE_KEY_PREFIX = 'gluster.swift.'
+MEMCACHE_ACCOUNT_DETAILS_KEY_PREFIX = MEMCACHE_KEY_PREFIX + 'account.details.'
+MEMCACHE_CONTAINER_DETAILS_KEY_PREFIX = MEMCACHE_KEY_PREFIX + 'container.details.'
+
+def read_metadata(path):
+ """
+ Helper function to read the pickled metadata from a File/Directory.
+
+ :param path: File/Directory to read metadata from.
+
+ :returns: dictionary of metadata
+ """
+ metadata = None
+ metadata_s = ''
+ key = 0
+ while metadata is None:
+ try:
+ metadata_s += xattr.getxattr(path, '%s%s' % (METADATA_KEY, (key or '')))
+ except IOError as err:
+ if err.errno == errno.ENODATA:
+ if key > 0:
+ # No errors reading the xattr keys, but since we have not
+ # been able to find enough chunks to get a successful
+ # unpickle operation, we consider the metadata lost, and
+ # drop the existing data so that the internal state can be
+ # recreated.
+ clean_metadata(path)
+ # We either could not find any metadata key, or we could find
+ # some keys, but were not successful in performing the
+ # unpickling (missing keys perhaps)? Either way, just report
+ # to the caller we have no metadata.
+ metadata = {}
+ else:
+ logging.exception("xattr.getxattr failed on %s key %s err: %s",
+ path, key, str(err))
+ # Note that we don't touch the keys on errors fetching the
+ # data since it could be a transient state.
+ raise
+ else:
+ try:
+ # If this key provides all or the remaining part of the pickle
+ # data, we don't need to keep searching for more keys. This
+ # means if we only need to store data in N xattr key/value
+ # pair, we only need to invoke xattr get N times. With large
+ # keys sizes we are shooting for N = 1.
+ metadata = pickle.loads(metadata_s)
+ assert isinstance(metadata, dict)
+ except EOFError, pickle.UnpicklingError:
+ # We still are not able recognize this existing data collected
+ # as a pickled object. Make sure we loop around to try to get
+ # more from another xattr key.
+ metadata = None
+ key += 1
+ return metadata
+
+def write_metadata(path, metadata):
+ """
+ Helper function to write pickled metadata for a File/Directory.
+
+ :param path: File/Directory path to write the metadata
+ :param metadata: dictionary to metadata write
+ """
+ assert isinstance(metadata, dict)
+ metastr = pickle.dumps(metadata, PICKLE_PROTOCOL)
+ key = 0
+ while metastr:
+ try:
+ xattr.setxattr(path, '%s%s' % (METADATA_KEY, key or ''), metastr[:MAX_XATTR_SIZE])
+ except IOError as err:
+ logging.exception("setxattr failed on %s key %s err: %s", path, key, str(err))
+ raise
+ metastr = metastr[MAX_XATTR_SIZE:]
+ key += 1
+
+def clean_metadata(path):
+ key = 0
+ while True:
+ try:
+ xattr.removexattr(path, '%s%s' % (METADATA_KEY, (key or '')))
+ except IOError as err:
+ if err.errno == errno.ENODATA:
+ break
+ raise
+ key += 1
+
+def check_user_xattr(path):
+ if not os_path.exists(path):
+ return False
+ try:
+ xattr.setxattr(path, 'user.test.key1', 'value1')
+ except IOError as err:
+ logging.exception("check_user_xattr: set failed on %s err: %s", path, str(err))
+ raise
+ try:
+ xattr.removexattr(path, 'user.test.key1')
+ except IOError as err:
+ logging.exception("check_user_xattr: remove failed on %s err: %s", path, str(err))
+ #Remove xattr may fail in case of concurrent remove.
+ return True
+
+def validate_container(metadata):
+ if not metadata:
+ logging.warn('validate_container: No metadata')
+ return False
+
+ if X_TYPE not in metadata.keys() or \
+ X_TIMESTAMP not in metadata.keys() or \
+ X_PUT_TIMESTAMP not in metadata.keys() or \
+ X_OBJECTS_COUNT not in metadata.keys() or \
+ X_BYTES_USED not in metadata.keys():
+ #logging.warn('validate_container: Metadata missing entries: %s' % metadata)
+ return False
+
+ (value, timestamp) = metadata[X_TYPE]
+ if value == CONTAINER:
+ return True
+
+ logging.warn('validate_container: metadata type is not CONTAINER (%r)' % (value,))
+ return False
+
+def validate_account(metadata):
+ if not metadata:
+ logging.warn('validate_account: No metadata')
+ return False
+
+ if X_TYPE not in metadata.keys() or \
+ X_TIMESTAMP not in metadata.keys() or \
+ X_PUT_TIMESTAMP not in metadata.keys() or \
+ X_OBJECTS_COUNT not in metadata.keys() or \
+ X_BYTES_USED not in metadata.keys() or \
+ X_CONTAINER_COUNT not in metadata.keys():
+ #logging.warn('validate_account: Metadata missing entries: %s' % metadata)
+ return False
+
+ (value, timestamp) = metadata[X_TYPE]
+ if value == ACCOUNT:
+ return True
+
+ logging.warn('validate_account: metadata type is not ACCOUNT (%r)' % (value,))
+ return False
+
+def validate_object(metadata):
+ if not metadata:
+ logging.warn('validate_object: No metadata')
+ return False
+
+ if X_TIMESTAMP not in metadata.keys() or \
+ X_CONTENT_TYPE not in metadata.keys() or \
+ X_ETAG not in metadata.keys() or \
+ X_CONTENT_LENGTH not in metadata.keys() or \
+ X_TYPE not in metadata.keys() or \
+ X_OBJECT_TYPE not in metadata.keys():
+ #logging.warn('validate_object: Metadata missing entries: %s' % metadata)
+ return False
+
+ if metadata[X_TYPE] == OBJECT:
+ return True
+
+ logging.warn('validate_object: metadata type is not OBJECT (%r)' % (metadata[X_TYPE],))
+ return False
+
+def is_marker(metadata):
+ if not metadata:
+ logging.warn('is_marker: No metadata')
+ return False
+
+ if X_OBJECT_TYPE not in metadata.keys():
+ logging.warn('is_marker: X_OBJECT_TYPE missing from metadata: %s' % metadata)
+ return False
+
+ if metadata[X_OBJECT_TYPE] == MARKER_DIR:
+ return True
+ else:
+ return False
+
+def _update_list(path, cont_path, src_list, reg_file=True, object_count=0,
+ bytes_used=0, obj_list=[]):
+ # strip the prefix off, also stripping the leading and trailing slashes
+ obj_path = path.replace(cont_path, '').strip(os.path.sep)
+
+ for obj_name in src_list:
+ if obj_path:
+ obj_list.append(os.path.join(obj_path, obj_name))
+ else:
+ obj_list.append(obj_name)
+
+ object_count += 1
+
+ if Glusterfs._do_getsize and reg_file:
+ bytes_used += os_path.getsize(os.path.join(path, obj_name))
+ sleep()
+
+ return object_count, bytes_used
+
+def update_list(path, cont_path, dirs=[], files=[], object_count=0,
+ bytes_used=0, obj_list=[]):
+ if files:
+ object_count, bytes_used = _update_list(path, cont_path, files, True,
+ object_count, bytes_used,
+ obj_list)
+ if dirs:
+ object_count, bytes_used = _update_list(path, cont_path, dirs, False,
+ object_count, bytes_used,
+ obj_list)
+ return object_count, bytes_used
+
+
+class ContainerDetails(object):
+ def __init__(self, bytes_used, object_count, obj_list, dir_list):
+ self.bytes_used = bytes_used
+ self.object_count = object_count
+ self.obj_list = obj_list
+ self.dir_list = dir_list
+
+
+def _get_container_details_from_fs(cont_path):
+ """
+ get container details by traversing the filesystem
+ """
+ bytes_used = 0
+ object_count = 0
+ obj_list = []
+ dir_list = []
+
+ if os_path.isdir(cont_path):
+ for (path, dirs, files) in do_walk(cont_path):
+ object_count, bytes_used = update_list(path, cont_path, dirs, files,
+ object_count, bytes_used,
+ obj_list)
+
+ dir_list.append((path, do_stat(path).st_mtime))
+ sleep()
+
+ return ContainerDetails(bytes_used, object_count, obj_list, dir_list)
+
+def get_container_details(cont_path, memcache=None):
+ """
+ Return object_list, object_count and bytes_used.
+ """
+ mkey = ''
+ if memcache:
+ mkey = MEMCACHE_CONTAINER_DETAILS_KEY_PREFIX + cont_path
+ cd = memcache.get(mkey)
+ if cd:
+ if not cd.dir_list:
+ cd = None
+ else:
+ for (path, mtime) in cd.dir_list:
+ if mtime != do_stat(path).st_mtime:
+ cd = None
+ else:
+ cd = None
+ if not cd:
+ cd = _get_container_details_from_fs(cont_path)
+ if memcache:
+ memcache.set(mkey, cd)
+ return cd.obj_list, cd.object_count, cd.bytes_used
+
+
+class AccountDetails(object):
+ """ A simple class to store the three pieces of information associated
+ with an account:
+
+ 1. The last known modification time
+ 2. The count of containers in the following list
+ 3. The list of containers
+ """
+ def __init__(self, mtime, container_count, container_list):
+ self.mtime = mtime
+ self.container_count = container_count
+ self.container_list = container_list
+
+
+def _get_account_details_from_fs(acc_path, acc_stats):
+ container_list = []
+ container_count = 0
+
+ if not acc_stats:
+ acc_stats = do_stat(acc_path)
+ is_dir = (acc_stats.st_mode & 0040000) != 0
+ if is_dir:
+ for name in do_listdir(acc_path):
+ if name.lower() == TEMP_DIR \
+ or name.lower() == ASYNCDIR \
+ or not os_path.isdir(os.path.join(acc_path, name)):
+ continue
+ container_count += 1
+ container_list.append(name)
+
+ return AccountDetails(acc_stats.st_mtime, container_count, container_list)
+
+def get_account_details(acc_path, memcache=None):
+ """
+ Return container_list and container_count.
+ """
+ acc_stats = None
+ mkey = ''
+ if memcache:
+ mkey = MEMCACHE_ACCOUNT_DETAILS_KEY_PREFIX + acc_path
+ ad = memcache.get(mkey)
+ if ad:
+ # FIXME: Do we really need to stat the file? If we are object
+ # only, then we can track the other Swift HTTP APIs that would
+ # modify the account and invalidate the cached entry there. If we
+ # are not object only, are we even called on this path?
+ acc_stats = do_stat(acc_path)
+ if ad.mtime != acc_stats.st_mtime:
+ ad = None
+ else:
+ ad = None
+ if not ad:
+ ad = _get_account_details_from_fs(acc_path, acc_stats)
+ if memcache:
+ memcache.set(mkey, ad)
+ return ad.container_list, ad.container_count
+
+def _get_etag(path):
+ etag = md5()
+ with open(path, 'rb') as fp:
+ while True:
+ chunk = fp.read(CHUNK_SIZE)
+ if chunk:
+ etag.update(chunk)
+ else:
+ break
+ return etag.hexdigest()
+
+def get_object_metadata(obj_path):
+ """
+ Return metadata of object.
+ """
+ try:
+ stats = do_stat(obj_path)
+ except OSError as e:
+ if e.errno != errno.ENOENT:
+ raise
+ metadata = {}
+ else:
+ is_dir = (stats.st_mode & 0040000) != 0
+ metadata = {
+ X_TYPE: OBJECT,
+ X_TIMESTAMP: normalize_timestamp(stats.st_ctime),
+ X_CONTENT_TYPE: DIR_TYPE if is_dir else FILE_TYPE,
+ X_OBJECT_TYPE: DIR if is_dir else FILE,
+ X_CONTENT_LENGTH: 0 if is_dir else stats.st_size,
+ X_ETAG: md5().hexdigest() if is_dir else _get_etag(obj_path),
+ }
+ return metadata
+
+def _add_timestamp(metadata_i):
+ # At this point we have a simple key/value dictionary, turn it into
+ # key/(value,timestamp) pairs.
+ timestamp = 0
+ metadata = {}
+ for key, value_i in metadata_i.iteritems():
+ if not isinstance(value_i, tuple):
+ metadata[key] = (value_i, timestamp)
+ else:
+ metadata[key] = value_i
+ return metadata
+
+def get_container_metadata(cont_path, memcache=None):
+ objects = []
+ object_count = 0
+ bytes_used = 0
+ objects, object_count, bytes_used = get_container_details(cont_path, memcache)
+ metadata = {X_TYPE: CONTAINER,
+ X_TIMESTAMP: normalize_timestamp(os_path.getctime(cont_path)),
+ X_PUT_TIMESTAMP: normalize_timestamp(os_path.getmtime(cont_path)),
+ X_OBJECTS_COUNT: object_count,
+ X_BYTES_USED: bytes_used}
+ return _add_timestamp(metadata)
+
+def get_account_metadata(acc_path, memcache=None):
+ containers = []
+ container_count = 0
+ containers, container_count = get_account_details(acc_path, memcache)
+ metadata = {X_TYPE: ACCOUNT,
+ X_TIMESTAMP: normalize_timestamp(os_path.getctime(acc_path)),
+ X_PUT_TIMESTAMP: normalize_timestamp(os_path.getmtime(acc_path)),
+ X_OBJECTS_COUNT: 0,
+ X_BYTES_USED: 0,
+ X_CONTAINER_COUNT: container_count}
+ return _add_timestamp(metadata)
+
+def restore_metadata(path, metadata):
+ meta_orig = read_metadata(path)
+ if meta_orig:
+ meta_new = meta_orig.copy()
+ meta_new.update(metadata)
+ else:
+ meta_new = metadata
+ if meta_orig != meta_new:
+ write_metadata(path, meta_new)
+ return meta_new
+
+def create_object_metadata(obj_path):
+ metadata = get_object_metadata(obj_path)
+ return restore_metadata(obj_path, metadata)
+
+def create_container_metadata(cont_path, memcache=None):
+ metadata = get_container_metadata(cont_path, memcache)
+ return restore_metadata(cont_path, metadata)
+
+def create_account_metadata(acc_path, memcache=None):
+ metadata = get_account_metadata(acc_path, memcache)
+ return restore_metadata(acc_path, metadata)
+
+def write_pickle(obj, dest, tmp=None, pickle_protocol=0):
+ """
+ Ensure that a pickle file gets written to disk. The file is first written
+ to a tmp file location in the destination directory path, ensured it is
+ synced to disk, then moved to its final destination name.
+
+ This version takes advantage of Gluster's dot-prefix-dot-suffix naming
+ where the a file named ".thefile.name.9a7aasv" is hashed to the same
+ Gluster node as "thefile.name". This ensures the renaming of a temp file
+ once written does not move it to another Gluster node.
+
+ :param obj: python object to be pickled
+ :param dest: path of final destination file
+ :param tmp: path to tmp to use, defaults to None (ignored)
+ :param pickle_protocol: protocol to pickle the obj with, defaults to 0
+ """
+ dirname = os.path.dirname(dest)
+ basename = os.path.basename(dest)
+ tmpname = '.' + basename + '.' + md5(basename + str(random.random())).hexdigest()
+ tmppath = os.path.join(dirname, tmpname)
+ with open(tmppath, 'wb') as fo:
+ pickle.dump(obj, fo, pickle_protocol)
+ # TODO: This flush() method call turns into a flush() system call
+ # We'll need to wrap this as well, but we would do this by writing
+ #a context manager for our own open() method which returns an object
+ # in fo which makes the gluster API call.
+ fo.flush()
+ do_fsync(fo)
+ do_rename(tmppath, dest)
+
+# Over-ride Swift's utils.write_pickle with ours
+import swift.common.utils
+swift.common.utils.write_pickle = write_pickle
diff --git a/ufo/gluster/swift/container/__init__.py b/ufo/gluster/swift/container/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/ufo/gluster/swift/container/__init__.py
diff --git a/ufo/gluster/swift/container/server.py b/ufo/gluster/swift/container/server.py
new file mode 100644
index 0000000..c5792aa
--- /dev/null
+++ b/ufo/gluster/swift/container/server.py
@@ -0,0 +1,46 @@
+# Copyright (c) 2012 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+""" Container Server for Gluster Swift UFO """
+
+# Simply importing this monkey patches the constraint handling to fit our
+# needs
+import gluster.swift.common.constraints
+
+from swift.container import server
+from gluster.swift.common.DiskDir import DiskDir
+
+
+class ContainerController(server.ContainerController):
+ def _get_container_broker(self, drive, part, account, container):
+ """
+ Overriden to provide the GlusterFS specific broker that talks to
+ Gluster for the information related to servicing a given request
+ instead of talking to a database.
+
+ :param drive: drive that holds the container
+ :param part: partition the container is in
+ :param account: account name
+ :param container: container name
+ :returns: DiskDir object
+ """
+ return DiskDir(self.root, drive, account, container, self.logger)
+
+
+def app_factory(global_conf, **local_conf):
+ """paste.deploy app factory for creating WSGI container server apps."""
+ conf = global_conf.copy()
+ conf.update(local_conf)
+ return ContainerController(conf)
diff --git a/ufo/gluster/swift/obj/__init__.py b/ufo/gluster/swift/obj/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/ufo/gluster/swift/obj/__init__.py
diff --git a/ufo/gluster/swift/obj/server.py b/ufo/gluster/swift/obj/server.py
new file mode 100644
index 0000000..1c2b6cb
--- /dev/null
+++ b/ufo/gluster/swift/obj/server.py
@@ -0,0 +1,34 @@
+# Copyright (c) 2012 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+""" Object Server for Gluster Swift UFO """
+
+# Simply importing this monkey patches the constraint handling to fit our
+# needs
+import gluster.swift.common.constraints
+import gluster.swift.common.utils
+
+from swift.obj import server
+from gluster.swift.common.DiskFile import Gluster_DiskFile
+
+# Monkey patch the object server module to use Gluster's DiskFile definition
+server.DiskFile = Gluster_DiskFile
+
+
+def app_factory(global_conf, **local_conf):
+ """paste.deploy app factory for creating WSGI object server apps"""
+ conf = global_conf.copy()
+ conf.update(local_conf)
+ return server.ObjectController(conf)
diff --git a/ufo/gluster/swift/proxy/__init__.py b/ufo/gluster/swift/proxy/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/ufo/gluster/swift/proxy/__init__.py
diff --git a/ufo/gluster/swift/proxy/server.py b/ufo/gluster/swift/proxy/server.py
new file mode 100644
index 0000000..792a97d
--- /dev/null
+++ b/ufo/gluster/swift/proxy/server.py
@@ -0,0 +1,27 @@
+# Copyright (c) 2012 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# Simply importing this monkey patches the constraint handling to fit our
+# needs
+import gluster.swift.common.constraints
+
+from swift.proxy import server
+
+def app_factory(global_conf, **local_conf):
+ """paste.deploy app factory for creating WSGI proxy apps."""
+ conf = global_conf.copy()
+ conf.update(local_conf)
+ return server.Application(conf)
diff --git a/ufo/setup.py b/ufo/setup.py
new file mode 100644
index 0000000..a483102
--- /dev/null
+++ b/ufo/setup.py
@@ -0,0 +1,57 @@
+#!/usr/bin/python
+# Copyright (c) 2012 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from setuptools import setup, find_packages
+
+from gluster.swift import __canonical_version__ as version
+
+
+name = 'gluster_swift_ufo'
+
+
+setup(
+ name=name,
+ version=version,
+ description='Gluster Swift/UFO',
+ license='Apache License (2.0)',
+ author='Red Hat, Inc.',
+ author_email='gluster-users@gluster.org',
+ url='https://gluster.org/',
+ packages=find_packages(exclude=['test', 'bin']),
+ test_suite='nose.collector',
+ classifiers=[
+ 'Development Status :: 4 - Beta',
+ 'License :: OSI Approved :: Apache Software License',
+ 'Operating System :: POSIX :: Linux',
+ 'Programming Language :: Python :: 2.6',
+ 'Environment :: No Input/Output (Daemon)',
+ ],
+ install_requires=[], # removed for better compat
+ scripts=[
+ 'bin/gluster-swift-gen-builders',
+ ],
+ entry_points={
+ 'paste.app_factory': [
+ 'proxy=gluster.swift.proxy.server:app_factory',
+ 'object=gluster.swift.obj.server:app_factory',
+ 'container=gluster.swift.container.server:app_factory',
+ 'account=gluster.swift.account.server:app_factory',
+ ],
+ 'paste.filter_factory': [
+ 'gluster=gluster.swift.common.middleware.gluster:filter_factory',
+ ],
+ },
+ )
diff --git a/ufo/test/__init__.py b/ufo/test/__init__.py
new file mode 100644
index 0000000..50b24ed
--- /dev/null
+++ b/ufo/test/__init__.py
@@ -0,0 +1,49 @@
+# See http://code.google.com/p/python-nose/issues/detail?id=373
+# The code below enables nosetests to work with i18n _() blocks
+
+import __builtin__
+import sys
+import os
+from ConfigParser import MissingSectionHeaderError
+from StringIO import StringIO
+
+from swift.common.utils import readconf
+
+setattr(__builtin__, '_', lambda x: x)
+
+
+# Work around what seems to be a Python bug.
+# c.f. https://bugs.launchpad.net/swift/+bug/820185.
+import logging
+logging.raiseExceptions = False
+
+
+def get_config(section_name=None, defaults=None):
+ """
+ Attempt to get a test config dictionary.
+
+ :param section_name: the section to read (all sections if not defined)
+ :param defaults: an optional dictionary namespace of defaults
+ """
+ config_file = os.environ.get('SWIFT_TEST_CONFIG_FILE',
+ '/etc/swift/test.conf')
+ config = {}
+ if defaults is not None:
+ config.update(defaults)
+
+ try:
+ config = readconf(config_file, section_name)
+ except SystemExit:
+ if not os.path.exists(config_file):
+ print >>sys.stderr, \
+ 'Unable to read test config %s - file not found' \
+ % config_file
+ elif not os.access(config_file, os.R_OK):
+ print >>sys.stderr, \
+ 'Unable to read test config %s - permission denied' \
+ % config_file
+ else:
+ print >>sys.stderr, \
+ 'Unable to read test config %s - section %s not found' \
+ % (config_file, section_name)
+ return config
diff --git a/ufo/test/unit/__init__.py b/ufo/test/unit/__init__.py
new file mode 100644
index 0000000..cb24764
--- /dev/null
+++ b/ufo/test/unit/__init__.py
@@ -0,0 +1,95 @@
+""" Gluster Swift Unit Tests """
+
+import logging
+from collections import defaultdict
+from test import get_config
+from swift.common.utils import TRUE_VALUES
+
+
+class NullLoggingHandler(logging.Handler):
+
+ def emit(self, record):
+ pass
+
+
+class FakeLogger(object):
+ # a thread safe logger
+
+ def __init__(self, *args, **kwargs):
+ self._clear()
+ self.level = logging.NOTSET
+ if 'facility' in kwargs:
+ self.facility = kwargs['facility']
+
+ def _clear(self):
+ self.log_dict = defaultdict(list)
+
+ def _store_in(store_name):
+ def stub_fn(self, *args, **kwargs):
+ self.log_dict[store_name].append((args, kwargs))
+ return stub_fn
+
+ error = _store_in('error')
+ info = _store_in('info')
+ warning = _store_in('warning')
+ debug = _store_in('debug')
+
+ def exception(self, *args, **kwargs):
+ self.log_dict['exception'].append((args, kwargs, str(exc_info()[1])))
+
+ # mock out the StatsD logging methods:
+ increment = _store_in('increment')
+ decrement = _store_in('decrement')
+ timing = _store_in('timing')
+ timing_since = _store_in('timing_since')
+ update_stats = _store_in('update_stats')
+ set_statsd_prefix = _store_in('set_statsd_prefix')
+
+ def setFormatter(self, obj):
+ self.formatter = obj
+
+ def close(self):
+ self._clear()
+
+ def set_name(self, name):
+ # don't touch _handlers
+ self._name = name
+
+ def acquire(self):
+ pass
+
+ def release(self):
+ pass
+
+ def createLock(self):
+ pass
+
+ def emit(self, record):
+ pass
+
+ def handle(self, record):
+ pass
+
+ def flush(self):
+ pass
+
+ def handleError(self, record):
+ pass
+
+
+original_syslog_handler = logging.handlers.SysLogHandler
+
+
+def fake_syslog_handler():
+ for attr in dir(original_syslog_handler):
+ if attr.startswith('LOG'):
+ setattr(FakeLogger, attr,
+ copy.copy(getattr(logging.handlers.SysLogHandler, attr)))
+ FakeLogger.priority_map = \
+ copy.deepcopy(logging.handlers.SysLogHandler.priority_map)
+
+ logging.handlers.SysLogHandler = FakeLogger
+
+
+if get_config('unit_test').get('fake_syslog', 'False').lower() in TRUE_VALUES:
+ fake_syslog_handler()
diff --git a/ufo/test/unit/common/__init__.py b/ufo/test/unit/common/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/ufo/test/unit/common/__init__.py
diff --git a/ufo/test/unit/common/data/README.rings b/ufo/test/unit/common/data/README.rings
new file mode 100644
index 0000000..6457501
--- /dev/null
+++ b/ufo/test/unit/common/data/README.rings
@@ -0,0 +1,3 @@
+The unit tests expect certain ring data built using the following command:
+
+ ../../../../bin/gluster-swift-gen-builders test iops \ No newline at end of file
diff --git a/ufo/test/unit/common/data/account.builder b/ufo/test/unit/common/data/account.builder
new file mode 100644
index 0000000..090ba4b
--- /dev/null
+++ b/ufo/test/unit/common/data/account.builder
Binary files differ
diff --git a/ufo/test/unit/common/data/account.ring.gz b/ufo/test/unit/common/data/account.ring.gz
new file mode 100644
index 0000000..6d4c854
--- /dev/null
+++ b/ufo/test/unit/common/data/account.ring.gz
Binary files differ
diff --git a/ufo/test/unit/common/data/account_tree.tar.bz2 b/ufo/test/unit/common/data/account_tree.tar.bz2
new file mode 100644
index 0000000..cb23e4d
--- /dev/null
+++ b/ufo/test/unit/common/data/account_tree.tar.bz2
Binary files differ
diff --git a/ufo/test/unit/common/data/backups/1365124498.account.builder b/ufo/test/unit/common/data/backups/1365124498.account.builder
new file mode 100644
index 0000000..090ba4b
--- /dev/null
+++ b/ufo/test/unit/common/data/backups/1365124498.account.builder
Binary files differ
diff --git a/ufo/test/unit/common/data/backups/1365124498.container.builder b/ufo/test/unit/common/data/backups/1365124498.container.builder
new file mode 100644
index 0000000..733d27d
--- /dev/null
+++ b/ufo/test/unit/common/data/backups/1365124498.container.builder
Binary files differ
diff --git a/ufo/test/unit/common/data/backups/1365124498.object.builder b/ufo/test/unit/common/data/backups/1365124498.object.builder
new file mode 100644
index 0000000..ff877ec
--- /dev/null
+++ b/ufo/test/unit/common/data/backups/1365124498.object.builder
Binary files differ
diff --git a/ufo/test/unit/common/data/backups/1365124499.object.builder b/ufo/test/unit/common/data/backups/1365124499.object.builder
new file mode 100644
index 0000000..8b8cd6c
--- /dev/null
+++ b/ufo/test/unit/common/data/backups/1365124499.object.builder
Binary files differ
diff --git a/ufo/test/unit/common/data/container.builder b/ufo/test/unit/common/data/container.builder
new file mode 100644
index 0000000..733d27d
--- /dev/null
+++ b/ufo/test/unit/common/data/container.builder
Binary files differ
diff --git a/ufo/test/unit/common/data/container.ring.gz b/ufo/test/unit/common/data/container.ring.gz
new file mode 100644
index 0000000..592b84b
--- /dev/null
+++ b/ufo/test/unit/common/data/container.ring.gz
Binary files differ
diff --git a/ufo/test/unit/common/data/container_tree.tar.bz2 b/ufo/test/unit/common/data/container_tree.tar.bz2
new file mode 100644
index 0000000..b4a1492
--- /dev/null
+++ b/ufo/test/unit/common/data/container_tree.tar.bz2
Binary files differ
diff --git a/ufo/test/unit/common/data/object.builder b/ufo/test/unit/common/data/object.builder
new file mode 100644
index 0000000..8b8cd6c
--- /dev/null
+++ b/ufo/test/unit/common/data/object.builder
Binary files differ
diff --git a/ufo/test/unit/common/data/object.ring.gz b/ufo/test/unit/common/data/object.ring.gz
new file mode 100644
index 0000000..d2f7192
--- /dev/null
+++ b/ufo/test/unit/common/data/object.ring.gz
Binary files differ
diff --git a/ufo/test/unit/common/test_Glusterfs.py b/ufo/test/unit/common/test_Glusterfs.py
new file mode 100644
index 0000000..7de060a
--- /dev/null
+++ b/ufo/test/unit/common/test_Glusterfs.py
@@ -0,0 +1,95 @@
+# Copyright (c) 2013 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import unittest
+import os, fcntl, errno, shutil
+from tempfile import mkdtemp
+import gluster.swift.common.Glusterfs as gfs
+
+def mock_os_path_ismount(path):
+ return True
+
+def mock_get_export_list():
+ return ['test', 'test2']
+
+def mock_os_system(cmd):
+ return False
+
+def mock_fcntl_lockf(f, *a, **kw):
+ raise IOError(errno.EAGAIN)
+
+def _init():
+ global _RUN_DIR, _OS_SYSTEM, _FCNTL_LOCKF
+ global _OS_PATH_ISMOUNT, __GET_EXPORT_LIST
+
+ _RUN_DIR = gfs.RUN_DIR
+ _OS_SYSTEM = os.system
+ _FCNTL_LOCKF = fcntl.lockf
+ _OS_PATH_ISMOUNT = os.path.ismount
+ __GET_EXPORT_LIST = gfs._get_export_list
+
+def _init_mock_variables(tmpdir):
+ os.system = mock_os_system
+ os.path.ismount = mock_os_path_ismount
+ gfs.RUN_DIR = os.path.join(tmpdir, 'var/run/swift')
+ gfs._get_export_list = mock_get_export_list
+
+def _reset_mock_variables():
+ gfs.RUN_DIR = _RUN_DIR
+ gfs._get_export_list = __GET_EXPORT_LIST
+
+ os.system = _OS_SYSTEM
+ fcntl.lockf = _FCNTL_LOCKF
+ os.path.ismount = _OS_PATH_ISMOUNT
+
+class TestGlusterfs(unittest.TestCase):
+ """ Tests for common.GlusterFS """
+
+ def setUp(self):
+ _init()
+
+ def test_mount(self):
+ try:
+ tmpdir = mkdtemp()
+ root = os.path.join(tmpdir, 'mnt/gluster-object')
+ drive = 'test'
+
+ _init_mock_variables(tmpdir)
+ assert gfs.mount(root, drive)
+ finally:
+ _reset_mock_variables()
+ shutil.rmtree(tmpdir)
+
+ def test_mount_egain(self):
+ try:
+ tmpdir = mkdtemp()
+ root = os.path.join(tmpdir, 'mnt/gluster-object')
+ drive = 'test'
+
+ _init_mock_variables(tmpdir)
+ assert gfs.mount(root, drive)
+ fcntl.lockf = mock_fcntl_lockf
+ assert gfs.mount(root, drive)
+ finally:
+ _reset_mock_variables()
+ shutil.rmtree(tmpdir)
+
+ def test_mount_get_export_list_err(self):
+ gfs._get_export_list = mock_get_export_list
+ assert not gfs.mount(None, 'drive')
+ _reset_mock_variables()
+
+ def tearDown(self):
+ _reset_mock_variables()
diff --git a/ufo/test/unit/common/test_diskfile.py b/ufo/test/unit/common/test_diskfile.py
new file mode 100644
index 0000000..85d539a
--- /dev/null
+++ b/ufo/test/unit/common/test_diskfile.py
@@ -0,0 +1,932 @@
+# Copyright (c) 2012 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+""" Tests for gluster.swift.common.DiskFile """
+
+import os
+import stat
+import errno
+import unittest
+import tempfile
+import shutil
+from hashlib import md5
+from swift.common.utils import normalize_timestamp
+from swift.common.exceptions import DiskFileNotExist
+import gluster.swift.common.DiskFile
+import gluster.swift.common.utils
+from gluster.swift.common.DiskFile import Gluster_DiskFile, \
+ AlreadyExistsAsDir
+from gluster.swift.common.utils import DEFAULT_UID, DEFAULT_GID, X_TYPE, \
+ X_OBJECT_TYPE
+from test_utils import _initxattr, _destroyxattr
+from test.unit import FakeLogger
+
+
+_metadata = {}
+
+def _mock_read_metadata(filename):
+ if filename in _metadata:
+ md = _metadata[filename]
+ else:
+ md = {}
+ return md
+
+def _mock_write_metadata(filename, metadata):
+ _metadata[filename] = metadata
+
+def _mock_clear_metadata():
+ _metadata = {}
+
+
+class MockException(Exception):
+ pass
+
+
+def _mock_rmdirs(p):
+ raise MockException("gluster.swift.common.DiskFile.rmdirs() called")
+
+def _mock_do_listdir(p):
+ raise MockException("gluster.swift.common.DiskFile.do_listdir() called")
+
+def _mock_do_unlink(f):
+ ose = OSError()
+ ose.errno = errno.ENOENT
+ raise ose
+
+
+def _mock_do_unlink_eacces_err(f):
+ ose = OSError()
+ ose.errno = errno.EACCES
+ raise ose
+
+def _mock_getsize_eaccess_err(f):
+ ose = OSError()
+ ose.errno = errno.EACCES
+ raise ose
+
+def _mock_do_rmdir_eacces_err(f):
+ ose = OSError()
+ ose.errno = errno.EACCES
+ raise ose
+
+class MockRenamerCalled(Exception):
+ pass
+
+
+def _mock_renamer(a, b):
+ raise MockRenamerCalled()
+
+
+class TestDiskFile(unittest.TestCase):
+ """ Tests for gluster.swift.common.DiskFile """
+
+ def setUp(self):
+ self.lg = FakeLogger()
+ _initxattr()
+ _mock_clear_metadata()
+ self._saved_df_wm = gluster.swift.common.DiskFile.write_metadata
+ self._saved_df_rm = gluster.swift.common.DiskFile.read_metadata
+ gluster.swift.common.DiskFile.write_metadata = _mock_write_metadata
+ gluster.swift.common.DiskFile.read_metadata = _mock_read_metadata
+ self._saved_ut_wm = gluster.swift.common.utils.write_metadata
+ self._saved_ut_rm = gluster.swift.common.utils.read_metadata
+ gluster.swift.common.utils.write_metadata = _mock_write_metadata
+ gluster.swift.common.utils.read_metadata = _mock_read_metadata
+
+ def tearDown(self):
+ self.lg = None
+ _destroyxattr()
+ gluster.swift.common.DiskFile.write_metadata = self._saved_df_wm
+ gluster.swift.common.DiskFile.read_metadata = self._saved_df_rm
+ gluster.swift.common.utils.write_metadata = self._saved_ut_wm
+ gluster.swift.common.utils.read_metadata = self._saved_ut_rm
+
+ def test_constructor_no_slash(self):
+ assert not os.path.exists("/tmp/foo")
+ gdf = Gluster_DiskFile("/tmp/foo", "vol0", "p57", "ufo47", "bar",
+ "z", self.lg)
+ assert gdf._obj == "z"
+ assert gdf._obj_path == ""
+ assert gdf.name == "bar"
+ assert gdf.datadir == "/tmp/foo/vol0/bar"
+ assert gdf.device_path == "/tmp/foo/vol0"
+ assert gdf._container_path == "/tmp/foo/vol0/bar"
+ assert gdf.disk_chunk_size == 65536
+ assert gdf.iter_hook == None
+ assert gdf.logger == self.lg
+ assert gdf.uid == DEFAULT_UID
+ assert gdf.gid == DEFAULT_GID
+ assert gdf.metadata == {}
+ assert gdf.meta_file == None
+ assert gdf.data_file == None
+ assert gdf.fp == None
+ assert gdf.iter_etag == None
+ assert not gdf.started_at_0
+ assert not gdf.read_to_eof
+ assert gdf.quarantined_dir == None
+ assert not gdf.keep_cache
+ assert not gdf._is_dir
+
+ def test_constructor_leadtrail_slash(self):
+ assert not os.path.exists("/tmp/foo")
+ gdf = Gluster_DiskFile("/tmp/foo", "vol0", "p57", "ufo47", "bar",
+ "/b/a/z/", self.lg)
+ assert gdf._obj == "z"
+ assert gdf._obj_path == "b/a"
+ assert gdf.name == "bar/b/a"
+ assert gdf.datadir == "/tmp/foo/vol0/bar/b/a"
+ assert gdf.device_path == "/tmp/foo/vol0"
+
+ def test_constructor_no_metadata(self):
+ td = tempfile.mkdtemp()
+ the_path = os.path.join(td, "vol0", "bar")
+ the_file = os.path.join(the_path, "z")
+ try:
+ os.makedirs(the_path)
+ with open(the_file, "wb") as fd:
+ fd.write("1234")
+ stats = os.stat(the_file)
+ ts = normalize_timestamp(stats.st_ctime)
+ etag = md5()
+ etag.update("1234")
+ etag = etag.hexdigest()
+ exp_md = {
+ 'Content-Length': 4,
+ 'ETag': etag,
+ 'X-Timestamp': ts,
+ 'Content-Type': 'application/octet-stream'}
+ gdf = Gluster_DiskFile(td, "vol0", "p57", "ufo47", "bar",
+ "z", self.lg)
+ assert gdf._obj == "z"
+ assert gdf.data_file == the_file
+ assert not gdf._is_dir
+ assert gdf.fp is None
+ assert gdf.metadata == exp_md
+ finally:
+ shutil.rmtree(td)
+
+ def test_constructor_existing_metadata(self):
+ td = tempfile.mkdtemp()
+ the_path = os.path.join(td, "vol0", "bar")
+ the_file = os.path.join(the_path, "z")
+ try:
+ os.makedirs(the_path)
+ with open(the_file, "wb") as fd:
+ fd.write("1234")
+ ini_md = {
+ 'X-Type': 'Object',
+ 'X-Object-Type': 'file',
+ 'Content-Length': 5,
+ 'ETag': 'etag',
+ 'X-Timestamp': 'ts',
+ 'Content-Type': 'application/loctet-stream'}
+ _metadata[the_file] = ini_md
+ exp_md = ini_md.copy()
+ del exp_md['X-Type']
+ del exp_md['X-Object-Type']
+ gdf = Gluster_DiskFile(td, "vol0", "p57", "ufo47", "bar",
+ "z", self.lg)
+ assert gdf._obj == "z"
+ assert gdf.data_file == the_file
+ assert not gdf._is_dir
+ assert gdf.fp is None
+ assert gdf.metadata == exp_md
+ finally:
+ shutil.rmtree(td)
+
+ def test_constructor_invalid_existing_metadata(self):
+ td = tempfile.mkdtemp()
+ the_path = os.path.join(td, "vol0", "bar")
+ the_file = os.path.join(the_path, "z")
+ inv_md = {
+ 'Content-Length': 5,
+ 'ETag': 'etag',
+ 'X-Timestamp': 'ts',
+ 'Content-Type': 'application/loctet-stream'}
+ _metadata[the_file] = inv_md
+ try:
+ os.makedirs(the_path)
+ with open(the_file, "wb") as fd:
+ fd.write("1234")
+ gdf = Gluster_DiskFile(td, "vol0", "p57", "ufo47", "bar",
+ "z", self.lg)
+ assert gdf._obj == "z"
+ assert gdf.data_file == the_file
+ assert not gdf._is_dir
+ assert gdf.fp is None
+ assert gdf.metadata != inv_md
+ finally:
+ shutil.rmtree(td)
+
+ def test_constructor_isdir(self):
+ td = tempfile.mkdtemp()
+ the_path = os.path.join(td, "vol0", "bar")
+ the_dir = os.path.join(the_path, "d")
+ try:
+ os.makedirs(the_dir)
+ ini_md = {
+ 'X-Type': 'Object',
+ 'X-Object-Type': 'dir',
+ 'Content-Length': 5,
+ 'ETag': 'etag',
+ 'X-Timestamp': 'ts',
+ 'Content-Type': 'application/loctet-stream'}
+ _metadata[the_dir] = ini_md
+ exp_md = ini_md.copy()
+ del exp_md['X-Type']
+ del exp_md['X-Object-Type']
+ gdf = Gluster_DiskFile(td, "vol0", "p57", "ufo47", "bar",
+ "d", self.lg, keep_data_fp=True)
+ assert gdf._obj == "d"
+ assert gdf.data_file == the_dir
+ assert gdf._is_dir
+ assert gdf.fp is None
+ assert gdf.metadata == exp_md
+ finally:
+ shutil.rmtree(td)
+
+ def test_constructor_keep_data_fp(self):
+ td = tempfile.mkdtemp()
+ the_path = os.path.join(td, "vol0", "bar")
+ the_file = os.path.join(the_path, "z")
+ try:
+ os.makedirs(the_path)
+ with open(the_file, "wb") as fd:
+ fd.write("1234")
+ gdf = Gluster_DiskFile(td, "vol0", "p57", "ufo47", "bar",
+ "z", self.lg, keep_data_fp=True)
+ assert gdf._obj == "z"
+ assert gdf.data_file == the_file
+ assert not gdf._is_dir
+ assert gdf.fp is not None
+ finally:
+ shutil.rmtree(td)
+
+ def test_constructor_chunk_size(self):
+ assert not os.path.exists("/tmp/foo")
+ gdf = Gluster_DiskFile("/tmp/foo", "vol0", "p57", "ufo47", "bar",
+ "z", self.lg, disk_chunk_size=8192)
+ assert gdf.disk_chunk_size == 8192
+
+ def test_constructor_iter_hook(self):
+ assert not os.path.exists("/tmp/foo")
+ gdf = Gluster_DiskFile("/tmp/foo", "vol0", "p57", "ufo47", "bar",
+ "z", self.lg, iter_hook='hook')
+ assert gdf.iter_hook == 'hook'
+
+ def test_close(self):
+ assert not os.path.exists("/tmp/foo")
+ gdf = Gluster_DiskFile("/tmp/foo", "vol0", "p57", "ufo47", "bar",
+ "z", self.lg)
+ # Should be a no-op, as by default is_dir is False, but fp is None
+ gdf.close()
+
+ gdf._is_dir = True
+ gdf.fp = "123"
+ # Should still be a no-op as is_dir is True (marker directory)
+ gdf.close()
+ assert gdf.fp == "123"
+
+ gdf._is_dir = False
+ saved_dc = gluster.swift.common.DiskFile.do_close
+ self.called = False
+ def our_do_close(fp):
+ self.called = True
+ gluster.swift.common.DiskFile.do_close = our_do_close
+ try:
+ gdf.close()
+ assert self.called
+ assert gdf.fp is None
+ finally:
+ gluster.swift.common.DiskFile.do_close = saved_dc
+
+ def test_is_deleted(self):
+ assert not os.path.exists("/tmp/foo")
+ gdf = Gluster_DiskFile("/tmp/foo", "vol0", "p57", "ufo47", "bar",
+ "z", self.lg)
+ assert gdf.is_deleted()
+ gdf.data_file = "/tmp/foo/bar"
+ assert not gdf.is_deleted()
+
+ def test_create_dir_object(self):
+ td = tempfile.mkdtemp()
+ the_dir = os.path.join(td, "vol0", "bar", "dir")
+ try:
+ gdf = Gluster_DiskFile(td, "vol0", "p57", "ufo47", "bar",
+ "dir/z", self.lg)
+ # Not created, dir object path is different, just checking
+ assert gdf._obj == "z"
+ gdf._create_dir_object(the_dir)
+ assert os.path.isdir(the_dir)
+ assert the_dir in _metadata
+ finally:
+ shutil.rmtree(td)
+
+ def test_create_dir_object_exists(self):
+ td = tempfile.mkdtemp()
+ the_path = os.path.join(td, "vol0", "bar")
+ the_dir = os.path.join(the_path, "dir")
+ try:
+ os.makedirs(the_path)
+ with open(the_dir, "wb") as fd:
+ fd.write("1234")
+ gdf = Gluster_DiskFile(td, "vol0", "p57", "ufo47", "bar",
+ "dir/z", self.lg)
+ # Not created, dir object path is different, just checking
+ assert gdf._obj == "z"
+ def _mock_do_chown(p, u, g):
+ assert u == DEFAULT_UID
+ assert g == DEFAULT_GID
+ dc = gluster.swift.common.DiskFile.do_chown
+ gluster.swift.common.DiskFile.do_chown = _mock_do_chown
+ try:
+ gdf._create_dir_object(the_dir)
+ finally:
+ gluster.swift.common.DiskFile.do_chown = dc
+ assert os.path.isdir(the_dir)
+ assert the_dir in _metadata
+ finally:
+ shutil.rmtree(td)
+
+ def test_put_metadata(self):
+ td = tempfile.mkdtemp()
+ the_path = os.path.join(td, "vol0", "bar")
+ the_dir = os.path.join(the_path, "z")
+ try:
+ os.makedirs(the_dir)
+ gdf = Gluster_DiskFile(td, "vol0", "p57", "ufo47", "bar",
+ "z", self.lg)
+ md = { 'Content-Type': 'application/octet-stream', 'a': 'b' }
+ gdf.put_metadata(md.copy())
+ assert gdf.metadata == md, "gdf.metadata = %r, md = %r" % (gdf.metadata, md)
+ assert _metadata[the_dir] == md
+ finally:
+ shutil.rmtree(td)
+
+ def test_put_w_tombstone(self):
+ assert not os.path.exists("/tmp/foo")
+ gdf = Gluster_DiskFile("/tmp/foo", "vol0", "p57", "ufo47", "bar",
+ "z", self.lg)
+ assert gdf.metadata == {}
+
+ gdf.put_metadata({'x': '1'}, tombstone=True)
+ assert gdf.metadata == {}
+
+ def test_put_w_meta_file(self):
+ td = tempfile.mkdtemp()
+ the_path = os.path.join(td, "vol0", "bar")
+ the_file = os.path.join(the_path, "z")
+ try:
+ os.makedirs(the_path)
+ with open(the_file, "wb") as fd:
+ fd.write("1234")
+ gdf = Gluster_DiskFile(td, "vol0", "p57", "ufo47", "bar",
+ "z", self.lg)
+ newmd = gdf.metadata.copy()
+ newmd['X-Object-Meta-test'] = '1234'
+ gdf.put_metadata(newmd)
+ assert gdf.metadata == newmd
+ assert _metadata[the_file] == newmd
+ finally:
+ shutil.rmtree(td)
+
+ def test_put_w_meta_file_no_content_type(self):
+ td = tempfile.mkdtemp()
+ the_path = os.path.join(td, "vol0", "bar")
+ the_file = os.path.join(the_path, "z")
+ try:
+ os.makedirs(the_path)
+ with open(the_file, "wb") as fd:
+ fd.write("1234")
+ gdf = Gluster_DiskFile(td, "vol0", "p57", "ufo47", "bar",
+ "z", self.lg)
+ newmd = gdf.metadata.copy()
+ newmd['Content-Type'] = ''
+ newmd['X-Object-Meta-test'] = '1234'
+ gdf.put_metadata(newmd)
+ assert gdf.metadata == newmd
+ assert _metadata[the_file] == newmd
+ finally:
+ shutil.rmtree(td)
+
+ def test_put_w_meta_dir(self):
+ td = tempfile.mkdtemp()
+ the_path = os.path.join(td, "vol0", "bar")
+ the_dir = os.path.join(the_path, "dir")
+ try:
+ os.makedirs(the_dir)
+ gdf = Gluster_DiskFile(td, "vol0", "p57", "ufo47", "bar",
+ "dir", self.lg)
+ newmd = gdf.metadata.copy()
+ newmd['X-Object-Meta-test'] = '1234'
+ gdf.put_metadata(newmd)
+ assert gdf.metadata == newmd
+ assert _metadata[the_dir] == newmd
+ finally:
+ shutil.rmtree(td)
+
+ def test_put_w_marker_dir(self):
+ td = tempfile.mkdtemp()
+ the_path = os.path.join(td, "vol0", "bar")
+ the_dir = os.path.join(the_path, "dir")
+ try:
+ os.makedirs(the_dir)
+ gdf = Gluster_DiskFile(td, "vol0", "p57", "ufo47", "bar",
+ "dir", self.lg)
+ newmd = gdf.metadata.copy()
+ newmd['X-Object-Meta-test'] = '1234'
+ gdf.put_metadata(newmd)
+ assert gdf.metadata == newmd
+ assert _metadata[the_dir] == newmd
+ finally:
+ shutil.rmtree(td)
+
+ def test_put_w_marker_dir_create(self):
+ td = tempfile.mkdtemp()
+ the_path = os.path.join(td, "vol0", "bar")
+ the_dir = os.path.join(the_path, "dir")
+ try:
+ gdf = Gluster_DiskFile(td, "vol0", "p57", "ufo47", "bar",
+ "dir", self.lg)
+ assert gdf.metadata == {}
+ newmd = {
+ 'Content-Length': 0,
+ 'ETag': 'etag',
+ 'X-Timestamp': 'ts',
+ 'Content-Type': 'application/directory'}
+ gdf.put(None, newmd, extension='.dir')
+ assert gdf.data_file == the_dir
+ assert gdf.metadata == newmd
+ assert _metadata[the_dir] == newmd
+ finally:
+ shutil.rmtree(td)
+
+ def test_put_is_dir(self):
+ td = tempfile.mkdtemp()
+ the_path = os.path.join(td, "vol0", "bar")
+ the_dir = os.path.join(the_path, "dir")
+ try:
+ os.makedirs(the_dir)
+ gdf = Gluster_DiskFile(td, "vol0", "p57", "ufo47", "bar",
+ "dir", self.lg)
+ origmd = gdf.metadata.copy()
+ origfmd = _metadata[the_dir]
+ newmd = gdf.metadata.copy()
+ # FIXME: This is a hack to get to the code-path; it is not clear
+ # how this can happen normally.
+ newmd['Content-Type'] = ''
+ newmd['X-Object-Meta-test'] = '1234'
+ try:
+ gdf.put(None, newmd, extension='.data')
+ except AlreadyExistsAsDir:
+ pass
+ else:
+ self.fail("Expected to encounter 'already-exists-as-dir' exception")
+ assert gdf.metadata == origmd
+ assert _metadata[the_dir] == origfmd
+ finally:
+ shutil.rmtree(td)
+
+ def test_put(self):
+ td = tempfile.mkdtemp()
+ try:
+ gdf = Gluster_DiskFile(td, "vol0", "p57", "ufo47", "bar",
+ "z", self.lg)
+ assert gdf._obj == "z"
+ assert gdf._obj_path == ""
+ assert gdf.name == "bar"
+ assert gdf.datadir == os.path.join(td, "vol0", "bar")
+ assert gdf.data_file is None
+
+ body = '1234\n'
+ etag = md5()
+ etag.update(body)
+ etag = etag.hexdigest()
+ metadata = {
+ 'X-Timestamp': '1234',
+ 'Content-Type': 'file',
+ 'ETag': etag,
+ 'Content-Length': '5',
+ }
+
+ with gdf.mkstemp() as fd:
+ assert gdf.tmppath is not None
+ tmppath = gdf.tmppath
+ os.write(fd, body)
+ gdf.put(fd, metadata)
+
+ assert gdf.data_file == os.path.join(td, "vol0", "bar", "z")
+ assert os.path.exists(gdf.data_file)
+ assert not os.path.exists(tmppath)
+ finally:
+ shutil.rmtree(td)
+
+ def test_put_obj_path(self):
+ the_obj_path = os.path.join("b", "a")
+ the_file = os.path.join(the_obj_path, "z")
+ td = tempfile.mkdtemp()
+ try:
+ gdf = Gluster_DiskFile(td, "vol0", "p57", "ufo47", "bar",
+ the_file, self.lg)
+ assert gdf._obj == "z"
+ assert gdf._obj_path == the_obj_path
+ assert gdf.name == os.path.join("bar", "b", "a")
+ assert gdf.datadir == os.path.join(td, "vol0", "bar", "b", "a")
+ assert gdf.data_file is None
+
+ body = '1234\n'
+ etag = md5()
+ etag.update(body)
+ etag = etag.hexdigest()
+ metadata = {
+ 'X-Timestamp': '1234',
+ 'Content-Type': 'file',
+ 'ETag': etag,
+ 'Content-Length': '5',
+ }
+
+ with gdf.mkstemp() as fd:
+ assert gdf.tmppath is not None
+ tmppath = gdf.tmppath
+ os.write(fd, body)
+ gdf.put(fd, metadata)
+
+ assert gdf.data_file == os.path.join(td, "vol0", "bar", "b", "a", "z")
+ assert os.path.exists(gdf.data_file)
+ assert not os.path.exists(tmppath)
+ finally:
+ shutil.rmtree(td)
+
+ def test_unlinkold_no_metadata(self):
+ assert not os.path.exists("/tmp/foo")
+ gdf = Gluster_DiskFile("/tmp/foo", "vol0", "p57", "ufo47", "bar",
+ "z", self.lg)
+ assert gdf.metadata == {}
+ _saved_rmdirs = gluster.swift.common.DiskFile.rmdirs
+ _saved_do_listdir = gluster.swift.common.DiskFile.do_listdir
+ gluster.swift.common.DiskFile.rmdirs = _mock_rmdirs
+ gluster.swift.common.DiskFile.do_listdir = _mock_do_listdir
+ try:
+ gdf.unlinkold(None)
+ except MockException as exp:
+ self.fail(str(exp))
+ finally:
+ gluster.swift.common.DiskFile.rmdirs = _saved_rmdirs
+ gluster.swift.common.DiskFile.do_listdir = _saved_do_listdir
+
+ def test_unlinkold_same_timestamp(self):
+ assert not os.path.exists("/tmp/foo")
+ gdf = Gluster_DiskFile("/tmp/foo", "vol0", "p57", "ufo47", "bar",
+ "z", self.lg)
+ assert gdf.metadata == {}
+ gdf.metadata['X-Timestamp'] = 1
+ _saved_rmdirs = gluster.swift.common.DiskFile.rmdirs
+ _saved_do_listdir = gluster.swift.common.DiskFile.do_listdir
+ gluster.swift.common.DiskFile.rmdirs = _mock_rmdirs
+ gluster.swift.common.DiskFile.do_listdir = _mock_do_listdir
+ try:
+ gdf.unlinkold(1)
+ except MockException as exp:
+ self.fail(str(exp))
+ finally:
+ gluster.swift.common.DiskFile.rmdirs = _saved_rmdirs
+ gluster.swift.common.DiskFile.do_listdir = _saved_do_listdir
+
+ def test_unlinkold_file(self):
+ td = tempfile.mkdtemp()
+ the_path = os.path.join(td, "vol0", "bar")
+ the_file = os.path.join(the_path, "z")
+ try:
+ os.makedirs(the_path)
+ with open(the_file, "wb") as fd:
+ fd.write("1234")
+ gdf = Gluster_DiskFile(td, "vol0", "p57", "ufo47", "bar",
+ "z", self.lg)
+ assert gdf._obj == "z"
+ assert gdf.data_file == the_file
+ assert not gdf._is_dir
+
+ later = float(gdf.metadata['X-Timestamp']) + 1
+ gdf.unlinkold(normalize_timestamp(later))
+ assert os.path.isdir(gdf.datadir)
+ assert not os.path.exists(os.path.join(gdf.datadir, gdf._obj))
+ finally:
+ shutil.rmtree(td)
+
+ def test_unlinkold_file_not_found(self):
+ td = tempfile.mkdtemp()
+ the_path = os.path.join(td, "vol0", "bar")
+ the_file = os.path.join(the_path, "z")
+ try:
+ os.makedirs(the_path)
+ with open(the_file, "wb") as fd:
+ fd.write("1234")
+ gdf = Gluster_DiskFile(td, "vol0", "p57", "ufo47", "bar",
+ "z", self.lg)
+ assert gdf._obj == "z"
+ assert gdf.data_file == the_file
+ assert not gdf._is_dir
+
+ # Handle the case the file is not in the directory listing.
+ os.unlink(the_file)
+
+ later = float(gdf.metadata['X-Timestamp']) + 1
+ gdf.unlinkold(normalize_timestamp(later))
+ assert os.path.isdir(gdf.datadir)
+ assert not os.path.exists(os.path.join(gdf.datadir, gdf._obj))
+ finally:
+ shutil.rmtree(td)
+
+ def test_unlinkold_file_unlink_error(self):
+ td = tempfile.mkdtemp()
+ the_path = os.path.join(td, "vol0", "bar")
+ the_file = os.path.join(the_path, "z")
+ try:
+ os.makedirs(the_path)
+ with open(the_file, "wb") as fd:
+ fd.write("1234")
+ gdf = Gluster_DiskFile(td, "vol0", "p57", "ufo47", "bar",
+ "z", self.lg)
+ assert gdf._obj == "z"
+ assert gdf.data_file == the_file
+ assert not gdf._is_dir
+
+ later = float(gdf.metadata['X-Timestamp']) + 1
+
+ stats = os.stat(the_path)
+ os.chmod(the_path, stats.st_mode & (~stat.S_IWUSR))
+
+ # Handle the case do_unlink() raises an OSError
+ __os_unlink = os.unlink
+ os.unlink = _mock_do_unlink_eacces_err
+ try:
+ gdf.unlinkold(normalize_timestamp(later))
+ except OSError as e:
+ assert e.errno == errno.EACCES
+ else:
+ self.fail("Excepted an OSError when unlinking file")
+ finally:
+ os.unlink = __os_unlink
+ os.chmod(the_path, stats.st_mode)
+
+ assert os.path.isdir(gdf.datadir)
+ assert os.path.exists(os.path.join(gdf.datadir, gdf._obj))
+ finally:
+ shutil.rmtree(td)
+
+ def test_unlinkold_is_dir(self):
+ td = tempfile.mkdtemp()
+ the_path = os.path.join(td, "vol0", "bar")
+ the_dir = os.path.join(the_path, "d")
+ try:
+ os.makedirs(the_dir)
+ gdf = Gluster_DiskFile(td, "vol0", "p57", "ufo47", "bar",
+ "d", self.lg, keep_data_fp=True)
+ assert gdf.data_file == the_dir
+ assert gdf._is_dir
+
+ later = float(gdf.metadata['X-Timestamp']) + 1
+ gdf.unlinkold(normalize_timestamp(later))
+ assert os.path.isdir(gdf.datadir)
+ assert not os.path.exists(os.path.join(gdf.datadir, gdf._obj))
+ finally:
+ shutil.rmtree(td)
+
+ def test_unlinkold_is_dir_failure(self):
+ td = tempfile.mkdtemp()
+ the_path = os.path.join(td, "vol0", "bar")
+ the_dir = os.path.join(the_path, "d")
+ try:
+ os.makedirs(the_dir)
+ gdf = Gluster_DiskFile(td, "vol0", "p57", "ufo47", "bar",
+ "d", self.lg, keep_data_fp=True)
+ assert gdf.data_file == the_dir
+ assert gdf._is_dir
+
+ stats = os.stat(gdf.datadir)
+ os.chmod(gdf.datadir, 0)
+ __os_rmdir = os.rmdir
+ os.rmdir = _mock_do_rmdir_eacces_err
+ try:
+ later = float(gdf.metadata['X-Timestamp']) + 1
+ gdf.unlinkold(normalize_timestamp(later))
+ finally:
+ os.chmod(gdf.datadir, stats.st_mode)
+ os.rmdir = __os_rmdir
+ assert os.path.isdir(gdf.datadir)
+ assert os.path.isdir(gdf.data_file)
+ finally:
+ shutil.rmtree(td)
+
+ def test_get_data_file_size(self):
+ td = tempfile.mkdtemp()
+ the_path = os.path.join(td, "vol0", "bar")
+ the_file = os.path.join(the_path, "z")
+ try:
+ os.makedirs(the_path)
+ with open(the_file, "wb") as fd:
+ fd.write("1234")
+ gdf = Gluster_DiskFile(td, "vol0", "p57", "ufo47", "bar",
+ "z", self.lg)
+ assert gdf._obj == "z"
+ assert gdf.data_file == the_file
+ assert not gdf._is_dir
+ assert 4 == gdf.get_data_file_size()
+ finally:
+ shutil.rmtree(td)
+
+ def test_get_data_file_size(self):
+ td = tempfile.mkdtemp()
+ the_path = os.path.join(td, "vol0", "bar")
+ the_file = os.path.join(the_path, "z")
+ try:
+ os.makedirs(the_path)
+ with open(the_file, "wb") as fd:
+ fd.write("1234")
+ gdf = Gluster_DiskFile(td, "vol0", "p57", "ufo47", "bar",
+ "z", self.lg)
+ assert gdf._obj == "z"
+ assert gdf.data_file == the_file
+ assert not gdf._is_dir
+ assert 4 == gdf.metadata['Content-Length']
+ gdf.metadata['Content-Length'] = 3
+ assert 4 == gdf.get_data_file_size()
+ assert 4 == gdf.metadata['Content-Length']
+ finally:
+ shutil.rmtree(td)
+
+ def test_get_data_file_size_dne(self):
+ assert not os.path.exists("/tmp/foo")
+ gdf = Gluster_DiskFile("/tmp/foo", "vol0", "p57", "ufo47", "bar",
+ "/b/a/z/", self.lg)
+ try:
+ s = gdf.get_data_file_size()
+ except DiskFileNotExist:
+ pass
+ else:
+ self.fail("Expected DiskFileNotExist exception")
+
+ def test_get_data_file_size_dne_os_err(self):
+ td = tempfile.mkdtemp()
+ the_path = os.path.join(td, "vol0", "bar")
+ the_file = os.path.join(the_path, "z")
+ try:
+ os.makedirs(the_path)
+ with open(the_file, "wb") as fd:
+ fd.write("1234")
+ gdf = Gluster_DiskFile(td, "vol0", "p57", "ufo47", "bar",
+ "z", self.lg)
+ assert gdf._obj == "z"
+ assert gdf.data_file == the_file
+ assert not gdf._is_dir
+ gdf.data_file = gdf.data_file + ".dne"
+ try:
+ s = gdf.get_data_file_size()
+ except DiskFileNotExist:
+ pass
+ else:
+ self.fail("Expected DiskFileNotExist exception")
+ finally:
+ shutil.rmtree(td)
+
+ def test_get_data_file_size_os_err(self):
+ td = tempfile.mkdtemp()
+ the_path = os.path.join(td, "vol0", "bar")
+ the_file = os.path.join(the_path, "z")
+ try:
+ os.makedirs(the_path)
+ with open(the_file, "wb") as fd:
+ fd.write("1234")
+ gdf = Gluster_DiskFile(td, "vol0", "p57", "ufo47", "bar",
+ "z", self.lg)
+ assert gdf._obj == "z"
+ assert gdf.data_file == the_file
+ assert not gdf._is_dir
+ stats = os.stat(the_path)
+ os.chmod(the_path, 0)
+ __os_path_getsize = os.path.getsize
+ os.path.getsize = _mock_getsize_eaccess_err
+ try:
+ s = gdf.get_data_file_size()
+ except OSError as err:
+ assert err.errno == errno.EACCES
+ else:
+ self.fail("Expected OSError exception")
+ finally:
+ os.path.getsize = __os_path_getsize
+ os.chmod(the_path, stats.st_mode)
+ finally:
+ shutil.rmtree(td)
+
+ def test_get_data_file_size_dir(self):
+ td = tempfile.mkdtemp()
+ the_path = os.path.join(td, "vol0", "bar")
+ the_dir = os.path.join(the_path, "d")
+ try:
+ os.makedirs(the_dir)
+ gdf = Gluster_DiskFile(td, "vol0", "p57", "ufo47", "bar",
+ "d", self.lg, keep_data_fp=True)
+ assert gdf._obj == "d"
+ assert gdf.data_file == the_dir
+ assert gdf._is_dir
+ assert 0 == gdf.get_data_file_size()
+ finally:
+ shutil.rmtree(td)
+
+ def test_filter_metadata(self):
+ assert not os.path.exists("/tmp/foo")
+ gdf = Gluster_DiskFile("/tmp/foo", "vol0", "p57", "ufo47", "bar",
+ "z", self.lg)
+ assert gdf.metadata == {}
+ gdf.filter_metadata()
+ assert gdf.metadata == {}
+
+ gdf.metadata[X_TYPE] = 'a'
+ gdf.metadata[X_OBJECT_TYPE] = 'b'
+ gdf.metadata['foobar'] = 'c'
+ gdf.filter_metadata()
+ assert X_TYPE not in gdf.metadata
+ assert X_OBJECT_TYPE not in gdf.metadata
+ assert 'foobar' in gdf.metadata
+
+ def test_mkstemp(self):
+ td = tempfile.mkdtemp()
+ the_path = os.path.join(td, "vol0", "bar")
+ the_dir = os.path.join(the_path, "dir")
+ try:
+ gdf = Gluster_DiskFile(td, "vol0", "p57", "ufo47", "bar",
+ "dir/z", self.lg)
+ saved_tmppath = ''
+ with gdf.mkstemp() as fd:
+ assert gdf.datadir == os.path.join(td, "vol0", "bar", "dir")
+ assert os.path.isdir(gdf.datadir)
+ saved_tmppath = gdf.tmppath
+ assert os.path.dirname(saved_tmppath) == gdf.datadir
+ assert os.path.basename(saved_tmppath)[:3] == '.z.'
+ assert os.path.exists(saved_tmppath)
+ os.write(fd, "123")
+ assert not os.path.exists(saved_tmppath)
+ finally:
+ shutil.rmtree(td)
+
+ def test_mkstemp_err_on_close(self):
+ td = tempfile.mkdtemp()
+ the_path = os.path.join(td, "vol0", "bar")
+ the_dir = os.path.join(the_path, "dir")
+ try:
+ gdf = Gluster_DiskFile(td, "vol0", "p57", "ufo47", "bar",
+ "dir/z", self.lg)
+ saved_tmppath = ''
+ with gdf.mkstemp() as fd:
+ assert gdf.datadir == os.path.join(td, "vol0", "bar", "dir")
+ assert os.path.isdir(gdf.datadir)
+ saved_tmppath = gdf.tmppath
+ assert os.path.dirname(saved_tmppath) == gdf.datadir
+ assert os.path.basename(saved_tmppath)[:3] == '.z.'
+ assert os.path.exists(saved_tmppath)
+ os.write(fd, "123")
+ # At the end of previous with block a close on fd is called.
+ # Calling os.close on the same fd will raise an OSError
+ # exception and we must catch it.
+ try:
+ os.close(fd)
+ except OSError as err:
+ pass
+ else:
+ self.fail("Exception expected")
+ assert not os.path.exists(saved_tmppath)
+ finally:
+ shutil.rmtree(td)
+
+ def test_mkstemp_err_on_unlink(self):
+ td = tempfile.mkdtemp()
+ the_path = os.path.join(td, "vol0", "bar")
+ the_dir = os.path.join(the_path, "dir")
+ try:
+ gdf = Gluster_DiskFile(td, "vol0", "p57", "ufo47", "bar",
+ "dir/z", self.lg)
+ saved_tmppath = ''
+ with gdf.mkstemp() as fd:
+ assert gdf.datadir == os.path.join(td, "vol0", "bar", "dir")
+ assert os.path.isdir(gdf.datadir)
+ saved_tmppath = gdf.tmppath
+ assert os.path.dirname(saved_tmppath) == gdf.datadir
+ assert os.path.basename(saved_tmppath)[:3] == '.z.'
+ assert os.path.exists(saved_tmppath)
+ os.write(fd, "123")
+ os.unlink(saved_tmppath)
+ assert not os.path.exists(saved_tmppath)
+ finally:
+ shutil.rmtree(td)
diff --git a/ufo/test/unit/common/test_fs_utils.py b/ufo/test/unit/common/test_fs_utils.py
new file mode 100644
index 0000000..186e07d
--- /dev/null
+++ b/ufo/test/unit/common/test_fs_utils.py
@@ -0,0 +1,277 @@
+# Copyright (c) 2012 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import shutil
+import random
+import unittest
+from tempfile import mkdtemp, mkstemp
+from gluster.swift.common import fs_utils as fs
+from gluster.swift.common.exceptions import NotDirectoryError, \
+ FileOrDirNotFoundError
+
+class TestUtils(unittest.TestCase):
+ """ Tests for common.utils """
+
+ def test_do_walk(self):
+ try:
+ # create directory structure
+ tmpparent = mkdtemp()
+ tmpdirs = []
+ tmpfiles = []
+ for i in range(5):
+ tmpdirs.append(mkdtemp(dir=tmpparent).rsplit(os.path.sep, 1)[1])
+ tmpfiles.append(mkstemp(dir=tmpparent)[1].rsplit(os.path.sep, \
+ 1)[1])
+
+ for path, dirnames, filenames in fs.do_walk(tmpparent):
+ assert path == tmpparent
+ assert dirnames.sort() == tmpdirs.sort()
+ assert filenames.sort() == tmpfiles.sort()
+ break
+ finally:
+ shutil.rmtree(tmpparent)
+
+ def test_do_open(self):
+ try:
+ fd, tmpfile = mkstemp()
+ f = fs.do_open(tmpfile, 'r')
+ try:
+ f.write('test')
+ except IOError as err:
+ pass
+ else:
+ self.fail("IOError expected")
+ finally:
+ f.close()
+ os.close(fd)
+ os.remove(tmpfile)
+
+ def test_do_open_err(self):
+ try:
+ fs.do_open(os.path.join('/tmp', str(random.random())), 'r')
+ except IOError:
+ pass
+ else:
+ self.fail("IOError expected")
+
+ def test_do_write(self):
+ try:
+ fd, tmpfile = mkstemp()
+ cnt = fs.do_write(fd, "test")
+ assert cnt == len("test")
+ finally:
+ os.close(fd)
+ os.remove(tmpfile)
+
+ def test_do_write_err(self):
+ try:
+ fd, tmpfile = mkstemp()
+ fd1 = os.open(tmpfile, os.O_RDONLY)
+ fs.do_write(fd1, "test")
+ except OSError:
+ pass
+ else:
+ self.fail("OSError expected")
+ finally:
+ os.close(fd)
+ os.close(fd1)
+
+ def test_do_mkdir(self):
+ try:
+ path = os.path.join('/tmp', str(random.random()))
+ fs.do_mkdir(path)
+ assert os.path.exists(path)
+ assert fs.do_mkdir(path)
+ finally:
+ os.rmdir(path)
+
+ def test_do_mkdir_err(self):
+ try:
+ path = os.path.join('/tmp', str(random.random()), str(random.random()))
+ fs.do_mkdir(path)
+ except OSError:
+ pass
+ else:
+ self.fail("OSError expected")
+
+
+ def test_do_makedirs(self):
+ try:
+ subdir = os.path.join('/tmp', str(random.random()))
+ path = os.path.join(subdir, str(random.random()))
+ fs.do_makedirs(path)
+ assert os.path.exists(path)
+ assert fs.do_makedirs(path)
+ finally:
+ shutil.rmtree(subdir)
+
+ def test_do_listdir(self):
+ try:
+ tmpdir = mkdtemp()
+ subdir = []
+ for i in range(5):
+ subdir.append(mkdtemp(dir=tmpdir).rsplit(os.path.sep, 1)[1])
+
+ assert subdir.sort() == fs.do_listdir(tmpdir).sort()
+ finally:
+ shutil.rmtree(tmpdir)
+
+ def test_do_listdir_err(self):
+ try:
+ path = os.path.join('/tmp', str(random.random()))
+ fs.do_listdir(path)
+ except OSError:
+ pass
+ else:
+ self.fail("OSError expected")
+
+ def test_do_stat(self):
+ try:
+ tmpdir = mkdtemp()
+ fd, tmpfile = mkstemp(dir=tmpdir)
+ buf1 = os.stat(tmpfile)
+ buf2 = fs.do_stat(fd)
+ buf3 = fs.do_stat(tmpfile)
+
+ assert buf1 == buf2
+ assert buf1 == buf3
+ finally:
+ os.close(fd)
+ os.remove(tmpfile)
+ os.rmdir(tmpdir)
+
+ def test_do_stat_err(self):
+ try:
+ fs.do_stat(os.path.join('/tmp', str(random.random())))
+ except OSError:
+ pass
+ else:
+ self.fail("OSError expected")
+
+ def test_do_close(self):
+ try:
+ fd, tmpfile = mkstemp()
+ fs.do_close(fd);
+ try:
+ os.write(fd, "test")
+ except OSError:
+ pass
+ else:
+ self.fail("OSError expected")
+ fp = open(tmpfile)
+ fs.do_close(fp)
+ finally:
+ os.remove(tmpfile)
+
+ def test_do_unlink(self):
+ try:
+ fd, tmpfile = mkstemp()
+ fs.do_unlink(tmpfile)
+ assert not os.path.exists(tmpfile)
+ assert fs.do_unlink(os.path.join('/tmp', str(random.random())))
+ finally:
+ os.close(fd)
+
+ def test_do_unlink_err(self):
+ try:
+ tmpdir = mkdtemp()
+ fs.do_unlink(tmpdir)
+ except OSError:
+ pass
+ else:
+ self.fail('OSError expected')
+ finally:
+ os.rmdir(tmpdir)
+
+ def test_do_rmdir(self):
+ tmpdir = mkdtemp()
+ fs.do_rmdir(tmpdir)
+ assert not os.path.exists(tmpdir)
+ assert not fs.do_rmdir(os.path.join('/tmp', str(random.random())))
+
+ def test_do_rmdir_err(self):
+ try:
+ fd, tmpfile = mkstemp()
+ fs.do_rmdir(tmpfile)
+ except OSError:
+ pass
+ else:
+ self.fail('OSError expected')
+ finally:
+ os.close(fd)
+ os.remove(tmpfile)
+
+ def test_do_rename(self):
+ try:
+ srcpath = mkdtemp()
+ destpath = os.path.join('/tmp', str(random.random()))
+ fs.do_rename(srcpath, destpath)
+ assert not os.path.exists(srcpath)
+ assert os.path.exists(destpath)
+ finally:
+ os.rmdir(destpath)
+
+ def test_do_rename_err(self):
+ try:
+ srcpath = os.path.join('/tmp', str(random.random()))
+ destpath = os.path.join('/tmp', str(random.random()))
+ fs.do_rename(srcpath, destpath)
+ except OSError:
+ pass
+ else:
+ self.fail("OSError expected")
+
+ def test_dir_empty(self):
+ try:
+ tmpdir = mkdtemp()
+ subdir = mkdtemp(dir=tmpdir)
+ assert not fs.dir_empty(tmpdir)
+ assert fs.dir_empty(subdir)
+ finally:
+ shutil.rmtree(tmpdir)
+
+ def test_dir_empty_err(self):
+ try:
+ try:
+ assert fs.dir_empty(os.path.join('/tmp', str(random.random())))
+ except FileOrDirNotFoundError:
+ pass
+ else:
+ self.fail("FileOrDirNotFoundError exception expected")
+
+ fd, tmpfile = mkstemp()
+ try:
+ fs.dir_empty(tmpfile)
+ except NotDirectoryError:
+ pass
+ else:
+ self.fail("NotDirectoryError exception expected")
+ finally:
+ os.close(fd)
+ os.unlink(tmpfile)
+
+ def test_rmdirs(self):
+ try:
+ tmpdir = mkdtemp()
+ subdir = mkdtemp(dir=tmpdir)
+ fd, tmpfile = mkstemp(dir=tmpdir)
+ assert not fs.rmdirs(tmpfile)
+ assert not fs.rmdirs(tmpdir)
+ assert fs.rmdirs(subdir)
+ assert not os.path.exists(subdir)
+ finally:
+ os.close(fd)
+ shutil.rmtree(tmpdir)
diff --git a/ufo/test/unit/common/test_ring.py b/ufo/test/unit/common/test_ring.py
new file mode 100644
index 0000000..8b7509c
--- /dev/null
+++ b/ufo/test/unit/common/test_ring.py
@@ -0,0 +1,55 @@
+# Copyright (c) 2013 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import unittest
+import gluster.swift.common.constraints
+import swift.common.utils
+from gluster.swift.common.ring import Ring
+
+
+class TestRing(unittest.TestCase):
+ """ Tests for common.utils """
+
+ def setUp(self):
+ swift.common.utils.HASH_PATH_SUFFIX = 'endcap'
+ swiftdir = os.path.join(os.getcwd(), "common", "data")
+ self.ring = Ring(swiftdir, ring_name='object')
+
+ def test_first_device(self):
+ part, node = self.ring.get_nodes('test')
+ assert node[0]['device'] == 'test'
+ node = self.ring.get_part_nodes(0)
+ assert node[0]['device'] == 'test'
+ for node in self.ring.get_more_nodes(0):
+ assert node['device'] == 'volume_not_in_ring'
+
+ def test_invalid_device(self):
+ part, node = self.ring.get_nodes('test2')
+ assert node[0]['device'] == 'volume_not_in_ring'
+ node = self.ring.get_part_nodes(0)
+ assert node[0]['device'] == 'volume_not_in_ring'
+
+ def test_second_device(self):
+ part, node = self.ring.get_nodes('iops')
+ assert node[0]['device'] == 'iops'
+ node = self.ring.get_part_nodes(0)
+ assert node[0]['device'] == 'iops'
+ for node in self.ring.get_more_nodes(0):
+ assert node['device'] == 'volume_not_in_ring'
+
+ def test_second_device_with_reseller_prefix(self):
+ part, node = self.ring.get_nodes('AUTH_iops')
+ assert node[0]['device'] == 'iops'
diff --git a/ufo/test/unit/common/test_utils.py b/ufo/test/unit/common/test_utils.py
new file mode 100644
index 0000000..c645509
--- /dev/null
+++ b/ufo/test/unit/common/test_utils.py
@@ -0,0 +1,1020 @@
+# Copyright (c) 2012 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+""" Tests for common.utils """
+
+import os
+import unittest
+import errno
+import xattr
+import cPickle as pickle
+import tempfile
+import hashlib
+import tarfile
+import shutil
+from collections import defaultdict
+from swift.common.utils import normalize_timestamp
+from gluster.swift.common import utils, Glusterfs
+
+#
+# Somewhat hacky way of emulating the operation of xattr calls. They are made
+# against a dictionary that stores the xattr key/value pairs.
+#
+_xattrs = {}
+_xattr_op_cnt = defaultdict(int)
+_xattr_set_err = {}
+_xattr_get_err = {}
+_xattr_rem_err = {}
+
+def _xkey(path, key):
+ return "%s:%s" % (path, key)
+
+def _setxattr(path, key, value, *args, **kwargs):
+ _xattr_op_cnt['set'] += 1
+ xkey = _xkey(path, key)
+ if xkey in _xattr_set_err:
+ e = IOError()
+ e.errno = _xattr_set_err[xkey]
+ raise e
+ global _xattrs
+ _xattrs[xkey] = value
+
+def _getxattr(path, key, *args, **kwargs):
+ _xattr_op_cnt['get'] += 1
+ xkey = _xkey(path, key)
+ if xkey in _xattr_get_err:
+ e = IOError()
+ e.errno = _xattr_get_err[xkey]
+ raise e
+ global _xattrs
+ if xkey in _xattrs:
+ ret_val = _xattrs[xkey]
+ else:
+ e = IOError("Fake IOError")
+ e.errno = errno.ENODATA
+ raise e
+ return ret_val
+
+def _removexattr(path, key, *args, **kwargs):
+ _xattr_op_cnt['remove'] += 1
+ xkey = _xkey(path, key)
+ if xkey in _xattr_rem_err:
+ e = IOError()
+ e.errno = _xattr_rem_err[xkey]
+ raise e
+ global _xattrs
+ if xkey in _xattrs:
+ del _xattrs[xkey]
+ else:
+ e = IOError("Fake IOError")
+ e.errno = errno.ENODATA
+ raise e
+
+def _initxattr():
+ global _xattrs
+ _xattrs = {}
+ global _xattr_op_cnt
+ _xattr_op_cnt = defaultdict(int)
+ global _xattr_set_err, _xattr_get_err, _xattr_rem_err
+ _xattr_set_err = {}
+ _xattr_get_err = {}
+ _xattr_rem_err = {}
+
+ # Save the current methods
+ global _xattr_set; _xattr_set = xattr.setxattr
+ global _xattr_get; _xattr_get = xattr.getxattr
+ global _xattr_remove; _xattr_remove = xattr.removexattr
+
+ # Monkey patch the calls we use with our internal unit test versions
+ xattr.setxattr = _setxattr
+ xattr.getxattr = _getxattr
+ xattr.removexattr = _removexattr
+
+def _destroyxattr():
+ # Restore the current methods just in case
+ global _xattr_set; xattr.setxattr = _xattr_set
+ global _xattr_get; xattr.getxattr = _xattr_get
+ global _xattr_remove; xattr.removexattr = _xattr_remove
+ # Destroy the stored values and
+ global _xattrs; _xattrs = None
+
+
+class SimMemcache(object):
+ def __init__(self):
+ self._d = {}
+
+ def get(self, key):
+ return self._d.get(key, None)
+
+ def set(self, key, value):
+ self._d[key] = value
+
+
+class TestUtils(unittest.TestCase):
+ """ Tests for common.utils """
+
+ def setUp(self):
+ _initxattr()
+
+ def tearDown(self):
+ _destroyxattr()
+
+ def test_write_metadata(self):
+ path = "/tmp/foo/w"
+ orig_d = { 'bar' : 'foo' }
+ utils.write_metadata(path, orig_d)
+ xkey = _xkey(path, utils.METADATA_KEY)
+ assert len(_xattrs.keys()) == 1
+ assert xkey in _xattrs
+ assert orig_d == pickle.loads(_xattrs[xkey])
+ assert _xattr_op_cnt['set'] == 1
+
+ def test_write_metadata_err(self):
+ path = "/tmp/foo/w"
+ orig_d = { 'bar' : 'foo' }
+ xkey = _xkey(path, utils.METADATA_KEY)
+ _xattr_set_err[xkey] = errno.EOPNOTSUPP
+ try:
+ utils.write_metadata(path, orig_d)
+ except IOError as e:
+ assert e.errno == errno.EOPNOTSUPP
+ assert len(_xattrs.keys()) == 0
+ assert _xattr_op_cnt['set'] == 1
+ else:
+ self.fail("Expected an IOError exception on write")
+
+ def test_write_metadata_multiple(self):
+ # At 64 KB an xattr key/value pair, this should generate three keys.
+ path = "/tmp/foo/w"
+ orig_d = { 'bar' : 'x' * 150000 }
+ utils.write_metadata(path, orig_d)
+ assert len(_xattrs.keys()) == 3, "Expected 3 keys, found %d" % len(_xattrs.keys())
+ payload = ''
+ for i in range(0,3):
+ xkey = _xkey(path, "%s%s" % (utils.METADATA_KEY, i or ''))
+ assert xkey in _xattrs
+ assert len(_xattrs[xkey]) <= utils.MAX_XATTR_SIZE
+ payload += _xattrs[xkey]
+ assert orig_d == pickle.loads(payload)
+ assert _xattr_op_cnt['set'] == 3, "%r" % _xattr_op_cnt
+
+ def test_clean_metadata(self):
+ path = "/tmp/foo/c"
+ expected_d = { 'a': 'y' * 150000 }
+ expected_p = pickle.dumps(expected_d, utils.PICKLE_PROTOCOL)
+ for i in range(0,3):
+ xkey = _xkey(path, "%s%s" % (utils.METADATA_KEY, i or ''))
+ _xattrs[xkey] = expected_p[:utils.MAX_XATTR_SIZE]
+ expected_p = expected_p[utils.MAX_XATTR_SIZE:]
+ assert not expected_p
+ utils.clean_metadata(path)
+ assert _xattr_op_cnt['remove'] == 4, "%r" % _xattr_op_cnt
+
+ def test_clean_metadata_err(self):
+ path = "/tmp/foo/c"
+ xkey = _xkey(path, utils.METADATA_KEY)
+ _xattrs[xkey] = pickle.dumps({ 'a': 'y' }, utils.PICKLE_PROTOCOL)
+ _xattr_rem_err[xkey] = errno.EOPNOTSUPP
+ try:
+ utils.clean_metadata(path)
+ except IOError as e:
+ assert e.errno == errno.EOPNOTSUPP
+ assert _xattr_op_cnt['remove'] == 1, "%r" % _xattr_op_cnt
+ else:
+ self.fail("Expected an IOError exception on remove")
+
+ def test_read_metadata(self):
+ path = "/tmp/foo/r"
+ expected_d = { 'a': 'y' }
+ xkey = _xkey(path, utils.METADATA_KEY)
+ _xattrs[xkey] = pickle.dumps(expected_d, utils.PICKLE_PROTOCOL)
+ res_d = utils.read_metadata(path)
+ assert res_d == expected_d, "Expected %r, result %r" % (expected_d, res_d)
+ assert _xattr_op_cnt['get'] == 1, "%r" % _xattr_op_cnt
+
+ def test_read_metadata_notfound(self):
+ path = "/tmp/foo/r"
+ res_d = utils.read_metadata(path)
+ assert res_d == {}
+ assert _xattr_op_cnt['get'] == 1, "%r" % _xattr_op_cnt
+
+ def test_read_metadata_err(self):
+ path = "/tmp/foo/r"
+ expected_d = { 'a': 'y' }
+ xkey = _xkey(path, utils.METADATA_KEY)
+ _xattrs[xkey] = pickle.dumps(expected_d, utils.PICKLE_PROTOCOL)
+ _xattr_get_err[xkey] = errno.EOPNOTSUPP
+ try:
+ res_d = utils.read_metadata(path)
+ except IOError as e:
+ assert e.errno == errno.EOPNOTSUPP
+ assert (_xattr_op_cnt['get'] == 1), "%r" % _xattr_op_cnt
+ else:
+ self.fail("Expected an IOError exception on get")
+
+ def test_read_metadata_multiple(self):
+ path = "/tmp/foo/r"
+ expected_d = { 'a': 'y' * 150000 }
+ expected_p = pickle.dumps(expected_d, utils.PICKLE_PROTOCOL)
+ for i in range(0,3):
+ xkey = _xkey(path, "%s%s" % (utils.METADATA_KEY, i or ''))
+ _xattrs[xkey] = expected_p[:utils.MAX_XATTR_SIZE]
+ expected_p = expected_p[utils.MAX_XATTR_SIZE:]
+ assert not expected_p
+ res_d = utils.read_metadata(path)
+ assert res_d == expected_d, "Expected %r, result %r" % (expected_d, res_d)
+ assert _xattr_op_cnt['get'] == 3, "%r" % _xattr_op_cnt
+
+ def test_read_metadata_multiple_one_missing(self):
+ path = "/tmp/foo/r"
+ expected_d = { 'a': 'y' * 150000 }
+ expected_p = pickle.dumps(expected_d, utils.PICKLE_PROTOCOL)
+ for i in range(0,2):
+ xkey = _xkey(path, "%s%s" % (utils.METADATA_KEY, i or ''))
+ _xattrs[xkey] = expected_p[:utils.MAX_XATTR_SIZE]
+ expected_p = expected_p[utils.MAX_XATTR_SIZE:]
+ assert len(expected_p) <= utils.MAX_XATTR_SIZE
+ res_d = utils.read_metadata(path)
+ assert res_d == {}
+ assert _xattr_op_cnt['get'] == 3, "%r" % _xattr_op_cnt
+ assert len(_xattrs.keys()) == 0, "Expected 0 keys, found %d" % len(_xattrs.keys())
+
+ def test_restore_metadata_none(self):
+ # No initial metadata
+ path = "/tmp/foo/i"
+ res_d = utils.restore_metadata(path, { 'b': 'y' })
+ expected_d = { 'b': 'y' }
+ assert res_d == expected_d, "Expected %r, result %r" % (expected_d, res_d)
+ assert _xattr_op_cnt['get'] == 1, "%r" % _xattr_op_cnt
+ assert _xattr_op_cnt['set'] == 1, "%r" % _xattr_op_cnt
+
+ def test_restore_metadata(self):
+ # Initial metadata
+ path = "/tmp/foo/i"
+ initial_d = { 'a': 'z' }
+ xkey = _xkey(path, utils.METADATA_KEY)
+ _xattrs[xkey] = pickle.dumps(initial_d, utils.PICKLE_PROTOCOL)
+ res_d = utils.restore_metadata(path, { 'b': 'y' })
+ expected_d = { 'a': 'z', 'b': 'y' }
+ assert res_d == expected_d, "Expected %r, result %r" % (expected_d, res_d)
+ assert _xattr_op_cnt['get'] == 1, "%r" % _xattr_op_cnt
+ assert _xattr_op_cnt['set'] == 1, "%r" % _xattr_op_cnt
+
+ def test_restore_metadata_nochange(self):
+ # Initial metadata but no changes
+ path = "/tmp/foo/i"
+ initial_d = { 'a': 'z' }
+ xkey = _xkey(path, utils.METADATA_KEY)
+ _xattrs[xkey] = pickle.dumps(initial_d, utils.PICKLE_PROTOCOL)
+ res_d = utils.restore_metadata(path, {})
+ expected_d = { 'a': 'z' }
+ assert res_d == expected_d, "Expected %r, result %r" % (expected_d, res_d)
+ assert _xattr_op_cnt['get'] == 1, "%r" % _xattr_op_cnt
+ assert _xattr_op_cnt['set'] == 0, "%r" % _xattr_op_cnt
+
+ def test_add_timestamp_empty(self):
+ orig = {}
+ res = utils._add_timestamp(orig)
+ assert res == {}
+
+ def test_add_timestamp_none(self):
+ orig = { 'a': 1, 'b': 2, 'c': 3 }
+ exp = { 'a': (1, 0), 'b': (2, 0), 'c': (3, 0) }
+ res = utils._add_timestamp(orig)
+ assert res == exp
+
+ def test_add_timestamp_mixed(self):
+ orig = { 'a': 1, 'b': (2, 1), 'c': 3 }
+ exp = { 'a': (1, 0), 'b': (2, 1), 'c': (3, 0) }
+ res = utils._add_timestamp(orig)
+ assert res == exp
+
+ def test_add_timestamp_all(self):
+ orig = { 'a': (1, 0), 'b': (2, 1), 'c': (3, 0) }
+ res = utils._add_timestamp(orig)
+ assert res == orig
+
+ def test_get_etag_empty(self):
+ tf = tempfile.NamedTemporaryFile()
+ hd = utils._get_etag(tf.name)
+ assert hd == hashlib.md5().hexdigest()
+
+ def test_get_etag(self):
+ tf = tempfile.NamedTemporaryFile()
+ tf.file.write('123' * utils.CHUNK_SIZE)
+ tf.file.flush()
+ hd = utils._get_etag(tf.name)
+ tf.file.seek(0)
+ md5 = hashlib.md5()
+ while True:
+ chunk = tf.file.read(utils.CHUNK_SIZE)
+ if not chunk:
+ break
+ md5.update(chunk)
+ assert hd == md5.hexdigest()
+
+ def test_get_object_metadata_dne(self):
+ md = utils.get_object_metadata("/tmp/doesNotEx1st")
+ assert md == {}
+
+ def test_get_object_metadata_err(self):
+ tf = tempfile.NamedTemporaryFile()
+ try:
+ md = utils.get_object_metadata(os.path.join(tf.name,"doesNotEx1st"))
+ except OSError as e:
+ assert e.errno != errno.ENOENT
+ else:
+ self.fail("Expected exception")
+
+ obj_keys = (utils.X_TIMESTAMP, utils.X_CONTENT_TYPE, utils.X_ETAG,
+ utils.X_CONTENT_LENGTH, utils.X_TYPE, utils.X_OBJECT_TYPE)
+
+ def test_get_object_metadata_file(self):
+ tf = tempfile.NamedTemporaryFile()
+ tf.file.write('123'); tf.file.flush()
+ md = utils.get_object_metadata(tf.name)
+ for key in self.obj_keys:
+ assert key in md, "Expected key %s in %r" % (key, md)
+ assert md[utils.X_TYPE] == utils.OBJECT
+ assert md[utils.X_OBJECT_TYPE] == utils.FILE
+ assert md[utils.X_CONTENT_TYPE] == utils.FILE_TYPE
+ assert md[utils.X_CONTENT_LENGTH] == os.path.getsize(tf.name)
+ assert md[utils.X_TIMESTAMP] == normalize_timestamp(os.path.getctime(tf.name))
+ assert md[utils.X_ETAG] == utils._get_etag(tf.name)
+
+ def test_get_object_metadata_dir(self):
+ td = tempfile.mkdtemp()
+ try:
+ md = utils.get_object_metadata(td)
+ for key in self.obj_keys:
+ assert key in md, "Expected key %s in %r" % (key, md)
+ assert md[utils.X_TYPE] == utils.OBJECT
+ assert md[utils.X_OBJECT_TYPE] == utils.DIR
+ assert md[utils.X_CONTENT_TYPE] == utils.DIR_TYPE
+ assert md[utils.X_CONTENT_LENGTH] == 0
+ assert md[utils.X_TIMESTAMP] == normalize_timestamp(os.path.getctime(td))
+ assert md[utils.X_ETAG] == hashlib.md5().hexdigest()
+ finally:
+ os.rmdir(td)
+
+ def test_create_object_metadata_file(self):
+ tf = tempfile.NamedTemporaryFile()
+ tf.file.write('4567'); tf.file.flush()
+ r_md = utils.create_object_metadata(tf.name)
+
+ xkey = _xkey(tf.name, utils.METADATA_KEY)
+ assert len(_xattrs.keys()) == 1
+ assert xkey in _xattrs
+ assert _xattr_op_cnt['get'] == 1
+ assert _xattr_op_cnt['set'] == 1
+ md = pickle.loads(_xattrs[xkey])
+ assert r_md == md
+
+ for key in self.obj_keys:
+ assert key in md, "Expected key %s in %r" % (key, md)
+ assert md[utils.X_TYPE] == utils.OBJECT
+ assert md[utils.X_OBJECT_TYPE] == utils.FILE
+ assert md[utils.X_CONTENT_TYPE] == utils.FILE_TYPE
+ assert md[utils.X_CONTENT_LENGTH] == os.path.getsize(tf.name)
+ assert md[utils.X_TIMESTAMP] == normalize_timestamp(os.path.getctime(tf.name))
+ assert md[utils.X_ETAG] == utils._get_etag(tf.name)
+
+ def test_create_object_metadata_dir(self):
+ td = tempfile.mkdtemp()
+ try:
+ r_md = utils.create_object_metadata(td)
+
+ xkey = _xkey(td, utils.METADATA_KEY)
+ assert len(_xattrs.keys()) == 1
+ assert xkey in _xattrs
+ assert _xattr_op_cnt['get'] == 1
+ assert _xattr_op_cnt['set'] == 1
+ md = pickle.loads(_xattrs[xkey])
+ assert r_md == md
+
+ for key in self.obj_keys:
+ assert key in md, "Expected key %s in %r" % (key, md)
+ assert md[utils.X_TYPE] == utils.OBJECT
+ assert md[utils.X_OBJECT_TYPE] == utils.DIR
+ assert md[utils.X_CONTENT_TYPE] == utils.DIR_TYPE
+ assert md[utils.X_CONTENT_LENGTH] == 0
+ assert md[utils.X_TIMESTAMP] == normalize_timestamp(os.path.getctime(td))
+ assert md[utils.X_ETAG] == hashlib.md5().hexdigest()
+ finally:
+ os.rmdir(td)
+
+ def test_get_container_metadata(self):
+ def _mock_get_container_details(path, memcache=None):
+ o_list = [ 'a', 'b', 'c' ]
+ o_count = 3
+ b_used = 47
+ return o_list, o_count, b_used
+ orig_gcd = utils.get_container_details
+ utils.get_container_details = _mock_get_container_details
+ td = tempfile.mkdtemp()
+ try:
+ exp_md = {
+ utils.X_TYPE: (utils.CONTAINER, 0),
+ utils.X_TIMESTAMP: (normalize_timestamp(os.path.getctime(td)), 0),
+ utils.X_PUT_TIMESTAMP: (normalize_timestamp(os.path.getmtime(td)), 0),
+ utils.X_OBJECTS_COUNT: (3, 0),
+ utils.X_BYTES_USED: (47, 0),
+ }
+ md = utils.get_container_metadata(td)
+ assert md == exp_md
+ finally:
+ utils.get_container_details = orig_gcd
+ os.rmdir(td)
+
+ def test_get_account_metadata(self):
+ def _mock_get_account_details(path, memcache=None):
+ c_list = [ '123', 'abc' ]
+ c_count = 2
+ return c_list, c_count
+ orig_gad = utils.get_account_details
+ utils.get_account_details = _mock_get_account_details
+ td = tempfile.mkdtemp()
+ try:
+ exp_md = {
+ utils.X_TYPE: (utils.ACCOUNT, 0),
+ utils.X_TIMESTAMP: (normalize_timestamp(os.path.getctime(td)), 0),
+ utils.X_PUT_TIMESTAMP: (normalize_timestamp(os.path.getmtime(td)), 0),
+ utils.X_OBJECTS_COUNT: (0, 0),
+ utils.X_BYTES_USED: (0, 0),
+ utils.X_CONTAINER_COUNT: (2, 0),
+ }
+ md = utils.get_account_metadata(td)
+ assert md == exp_md
+ finally:
+ utils.get_account_details = orig_gad
+ os.rmdir(td)
+
+ cont_keys = [utils.X_TYPE, utils.X_TIMESTAMP, utils.X_PUT_TIMESTAMP,
+ utils.X_OBJECTS_COUNT, utils.X_BYTES_USED]
+
+ def test_create_container_metadata(self):
+ td = tempfile.mkdtemp()
+ try:
+ r_md = utils.create_container_metadata(td)
+
+ xkey = _xkey(td, utils.METADATA_KEY)
+ assert len(_xattrs.keys()) == 1
+ assert xkey in _xattrs
+ assert _xattr_op_cnt['get'] == 1
+ assert _xattr_op_cnt['set'] == 1
+ md = pickle.loads(_xattrs[xkey])
+ assert r_md == md
+
+ for key in self.cont_keys:
+ assert key in md, "Expected key %s in %r" % (key, md)
+ assert md[utils.X_TYPE] == (utils.CONTAINER, 0)
+ assert md[utils.X_TIMESTAMP] == (normalize_timestamp(os.path.getctime(td)), 0)
+ assert md[utils.X_PUT_TIMESTAMP] == (normalize_timestamp(os.path.getmtime(td)), 0)
+ assert md[utils.X_OBJECTS_COUNT] == (0, 0)
+ assert md[utils.X_BYTES_USED] == (0, 0)
+ finally:
+ os.rmdir(td)
+
+ acct_keys = [val for val in cont_keys]
+ acct_keys.append(utils.X_CONTAINER_COUNT)
+
+ def test_create_account_metadata(self):
+ td = tempfile.mkdtemp()
+ try:
+ r_md = utils.create_account_metadata(td)
+
+ xkey = _xkey(td, utils.METADATA_KEY)
+ assert len(_xattrs.keys()) == 1
+ assert xkey in _xattrs
+ assert _xattr_op_cnt['get'] == 1
+ assert _xattr_op_cnt['set'] == 1
+ md = pickle.loads(_xattrs[xkey])
+ assert r_md == md
+
+ for key in self.acct_keys:
+ assert key in md, "Expected key %s in %r" % (key, md)
+ assert md[utils.X_TYPE] == (utils.ACCOUNT, 0)
+ assert md[utils.X_TIMESTAMP] == (normalize_timestamp(os.path.getctime(td)), 0)
+ assert md[utils.X_PUT_TIMESTAMP] == (normalize_timestamp(os.path.getmtime(td)), 0)
+ assert md[utils.X_OBJECTS_COUNT] == (0, 0)
+ assert md[utils.X_BYTES_USED] == (0, 0)
+ assert md[utils.X_CONTAINER_COUNT] == (0, 0)
+ finally:
+ os.rmdir(td)
+
+ def test_container_details_uncached(self):
+ the_path = "/tmp/bar"
+ def mock_get_container_details_from_fs(cont_path):
+ bu = 5
+ oc = 1
+ ol = ['foo',]
+ dl = [('a',100),]
+ return utils.ContainerDetails(bu, oc, ol, dl)
+ orig_gcdff = utils._get_container_details_from_fs
+ utils._get_container_details_from_fs = mock_get_container_details_from_fs
+ try:
+ retval = utils.get_container_details(the_path)
+ cd = mock_get_container_details_from_fs(the_path)
+ assert retval == (cd.obj_list, cd.object_count, cd.bytes_used)
+ finally:
+ utils._get_container_details_from_fs = orig_gcdff
+
+ def test_container_details_cached_hit(self):
+ mc = SimMemcache()
+ the_path = "/tmp/bar"
+ def mock_get_container_details_from_fs(cont_path, bu_p=5):
+ bu = bu_p
+ oc = 1
+ ol = ['foo',]
+ dl = [('a',100),]
+ return utils.ContainerDetails(bu, oc, ol, dl)
+ def mock_do_stat(path):
+ class MockStat(object):
+ def __init__(self, mtime):
+ self.st_mtime = mtime
+ return MockStat(100)
+ cd = mock_get_container_details_from_fs(the_path, bu_p=6)
+ mc.set(utils.MEMCACHE_CONTAINER_DETAILS_KEY_PREFIX + the_path, cd)
+ orig_gcdff = utils._get_container_details_from_fs
+ utils._get_container_details_from_fs = mock_get_container_details_from_fs
+ orig_ds = utils.do_stat
+ utils.do_stat = mock_do_stat
+ try:
+ retval = utils.get_container_details(the_path, memcache=mc)
+ # If it did not properly use memcache, the default mocked version
+ # of get details from fs would return 5 bytes used instead of the
+ # 6 we specified above.
+ cd = mock_get_container_details_from_fs(the_path, bu_p=6)
+ assert retval == (cd.obj_list, cd.object_count, cd.bytes_used)
+ finally:
+ utils._get_container_details_from_fs = orig_gcdff
+ utils.do_stat = orig_ds
+
+ def test_container_details_cached_miss_key(self):
+ mc = SimMemcache()
+ the_path = "/tmp/bar"
+ def mock_get_container_details_from_fs(cont_path, bu_p=5):
+ bu = bu_p
+ oc = 1
+ ol = ['foo',]
+ dl = [('a',100),]
+ return utils.ContainerDetails(bu, oc, ol, dl)
+ def mock_do_stat(path):
+ # Be sure we don't miss due to mtimes not matching
+ self.fail("do_stat should not have been called")
+ cd = mock_get_container_details_from_fs(the_path + "u", bu_p=6)
+ mc.set(utils.MEMCACHE_CONTAINER_DETAILS_KEY_PREFIX + the_path + "u", cd)
+ orig_gcdff = utils._get_container_details_from_fs
+ utils._get_container_details_from_fs = mock_get_container_details_from_fs
+ orig_ds = utils.do_stat
+ utils.do_stat = mock_do_stat
+ try:
+ retval = utils.get_container_details(the_path, memcache=mc)
+ cd = mock_get_container_details_from_fs(the_path)
+ assert retval == (cd.obj_list, cd.object_count, cd.bytes_used)
+ mkey = utils.MEMCACHE_CONTAINER_DETAILS_KEY_PREFIX + the_path
+ assert mkey in mc._d
+ finally:
+ utils._get_container_details_from_fs = orig_gcdff
+ utils.do_stat = orig_ds
+
+ def test_container_details_cached_miss_dir_list(self):
+ mc = SimMemcache()
+ the_path = "/tmp/bar"
+ def mock_get_container_details_from_fs(cont_path, bu_p=5):
+ bu = bu_p
+ oc = 1
+ ol = ['foo',]
+ dl = []
+ return utils.ContainerDetails(bu, oc, ol, dl)
+ def mock_do_stat(path):
+ # Be sure we don't miss due to mtimes not matching
+ self.fail("do_stat should not have been called")
+ cd = mock_get_container_details_from_fs(the_path, bu_p=6)
+ mc.set(utils.MEMCACHE_CONTAINER_DETAILS_KEY_PREFIX + the_path, cd)
+ orig_gcdff = utils._get_container_details_from_fs
+ utils._get_container_details_from_fs = mock_get_container_details_from_fs
+ orig_ds = utils.do_stat
+ utils.do_stat = mock_do_stat
+ try:
+ retval = utils.get_container_details(the_path, memcache=mc)
+ cd = mock_get_container_details_from_fs(the_path)
+ assert retval == (cd.obj_list, cd.object_count, cd.bytes_used)
+ mkey = utils.MEMCACHE_CONTAINER_DETAILS_KEY_PREFIX + the_path
+ assert mkey in mc._d
+ assert 5 == mc._d[mkey].bytes_used
+ finally:
+ utils._get_container_details_from_fs = orig_gcdff
+ utils.do_stat = orig_ds
+
+ def test_container_details_cached_miss_mtime(self):
+ mc = SimMemcache()
+ the_path = "/tmp/bar"
+ def mock_get_container_details_from_fs(cont_path, bu_p=5):
+ bu = bu_p
+ oc = 1
+ ol = ['foo',]
+ dl = [('a',100),]
+ return utils.ContainerDetails(bu, oc, ol, dl)
+ def mock_do_stat(path):
+ # Be sure we miss due to mtimes not matching
+ class MockStat(object):
+ def __init__(self, mtime):
+ self.st_mtime = mtime
+ return MockStat(200)
+ cd = mock_get_container_details_from_fs(the_path, bu_p=6)
+ mc.set(utils.MEMCACHE_CONTAINER_DETAILS_KEY_PREFIX + the_path, cd)
+ orig_gcdff = utils._get_container_details_from_fs
+ utils._get_container_details_from_fs = mock_get_container_details_from_fs
+ orig_ds = utils.do_stat
+ utils.do_stat = mock_do_stat
+ try:
+ retval = utils.get_container_details(the_path, memcache=mc)
+ cd = mock_get_container_details_from_fs(the_path)
+ assert retval == (cd.obj_list, cd.object_count, cd.bytes_used)
+ mkey = utils.MEMCACHE_CONTAINER_DETAILS_KEY_PREFIX + the_path
+ assert mkey in mc._d
+ assert 5 == mc._d[mkey].bytes_used
+ finally:
+ utils._get_container_details_from_fs = orig_gcdff
+ utils.do_stat = orig_ds
+
+ def test_account_details_uncached(self):
+ the_path = "/tmp/bar"
+ def mock_get_account_details_from_fs(acc_path, acc_stats):
+ mt = 100
+ cc = 2
+ cl = ['a', 'b']
+ return utils.AccountDetails(mt, cc, cl)
+ orig_gcdff = utils._get_account_details_from_fs
+ utils._get_account_details_from_fs = mock_get_account_details_from_fs
+ try:
+ retval = utils.get_account_details(the_path)
+ ad = mock_get_account_details_from_fs(the_path, None)
+ assert retval == (ad.container_list, ad.container_count)
+ finally:
+ utils._get_account_details_from_fs = orig_gcdff
+
+ def test_account_details_cached_hit(self):
+ mc = SimMemcache()
+ the_path = "/tmp/bar"
+ def mock_get_account_details_from_fs(acc_path, acc_stats):
+ mt = 100
+ cc = 2
+ cl = ['a', 'b']
+ return utils.AccountDetails(mt, cc, cl)
+ def mock_do_stat(path):
+ class MockStat(object):
+ def __init__(self, mtime):
+ self.st_mtime = mtime
+ return MockStat(100)
+ ad = mock_get_account_details_from_fs(the_path, None)
+ ad.container_list = ['x', 'y']
+ mc.set(utils.MEMCACHE_ACCOUNT_DETAILS_KEY_PREFIX + the_path, ad)
+ orig_gcdff = utils._get_account_details_from_fs
+ orig_ds = utils.do_stat
+ utils._get_account_details_from_fs = mock_get_account_details_from_fs
+ utils.do_stat = mock_do_stat
+ try:
+ retval = utils.get_account_details(the_path, memcache=mc)
+ assert retval == (ad.container_list, ad.container_count)
+ wrong_ad = mock_get_account_details_from_fs(the_path, None)
+ assert wrong_ad != ad
+ finally:
+ utils._get_account_details_from_fs = orig_gcdff
+ utils.do_stat = orig_ds
+
+ def test_account_details_cached_miss(self):
+ mc = SimMemcache()
+ the_path = "/tmp/bar"
+ def mock_get_account_details_from_fs(acc_path, acc_stats):
+ mt = 100
+ cc = 2
+ cl = ['a', 'b']
+ return utils.AccountDetails(mt, cc, cl)
+ def mock_do_stat(path):
+ class MockStat(object):
+ def __init__(self, mtime):
+ self.st_mtime = mtime
+ return MockStat(100)
+ ad = mock_get_account_details_from_fs(the_path, None)
+ ad.container_list = ['x', 'y']
+ mc.set(utils.MEMCACHE_ACCOUNT_DETAILS_KEY_PREFIX + the_path + 'u', ad)
+ orig_gcdff = utils._get_account_details_from_fs
+ orig_ds = utils.do_stat
+ utils._get_account_details_from_fs = mock_get_account_details_from_fs
+ utils.do_stat = mock_do_stat
+ try:
+ retval = utils.get_account_details(the_path, memcache=mc)
+ correct_ad = mock_get_account_details_from_fs(the_path, None)
+ assert retval == (correct_ad.container_list, correct_ad.container_count)
+ assert correct_ad != ad
+ finally:
+ utils._get_account_details_from_fs = orig_gcdff
+ utils.do_stat = orig_ds
+
+ def test_account_details_cached_miss_mtime(self):
+ mc = SimMemcache()
+ the_path = "/tmp/bar"
+ def mock_get_account_details_from_fs(acc_path, acc_stats):
+ mt = 100
+ cc = 2
+ cl = ['a', 'b']
+ return utils.AccountDetails(mt, cc, cl)
+ def mock_do_stat(path):
+ class MockStat(object):
+ def __init__(self, mtime):
+ self.st_mtime = mtime
+ return MockStat(100)
+ ad = mock_get_account_details_from_fs(the_path, None)
+ ad.container_list = ['x', 'y']
+ ad.mtime = 200
+ mc.set(utils.MEMCACHE_ACCOUNT_DETAILS_KEY_PREFIX + the_path, ad)
+ orig_gcdff = utils._get_account_details_from_fs
+ orig_ds = utils.do_stat
+ utils._get_account_details_from_fs = mock_get_account_details_from_fs
+ utils.do_stat = mock_do_stat
+ try:
+ retval = utils.get_account_details(the_path, memcache=mc)
+ correct_ad = mock_get_account_details_from_fs(the_path, None)
+ assert retval == (correct_ad.container_list, correct_ad.container_count)
+ assert correct_ad != ad
+ finally:
+ utils._get_account_details_from_fs = orig_gcdff
+ utils.do_stat = orig_ds
+
+ def test_get_account_details_from_fs(self):
+ orig_cwd = os.getcwd()
+ td = tempfile.mkdtemp()
+ try:
+ tf = tarfile.open("common/data/account_tree.tar.bz2", "r:bz2")
+ os.chdir(td)
+ tf.extractall()
+
+ ad = utils._get_account_details_from_fs(td, None)
+ assert ad.mtime == os.path.getmtime(td)
+ assert ad.container_count == 3
+ assert set(ad.container_list) == set(['c1', 'c2', 'c3'])
+ finally:
+ os.chdir(orig_cwd)
+ shutil.rmtree(td)
+
+ def test_get_container_details_from_fs_notadir(self):
+ tf = tempfile.NamedTemporaryFile()
+ cd = utils._get_container_details_from_fs(tf.name)
+ assert cd.bytes_used == 0
+ assert cd.object_count == 0
+ assert cd.obj_list == []
+ assert cd.dir_list == []
+
+ def test_get_container_details_from_fs(self):
+ orig_cwd = os.getcwd()
+ td = tempfile.mkdtemp()
+ try:
+ tf = tarfile.open("common/data/container_tree.tar.bz2", "r:bz2")
+ os.chdir(td)
+ tf.extractall()
+
+ cd = utils._get_container_details_from_fs(td)
+ assert cd.bytes_used == 0, repr(cd.bytes_used)
+ assert cd.object_count == 8, repr(cd.object_count)
+ assert set(cd.obj_list) == set(['file1', 'file3', 'file2',
+ 'dir3', 'dir1', 'dir2',
+ 'dir1/file1', 'dir1/file2'
+ ]), repr(cd.obj_list)
+
+ full_dir1 = os.path.join(td, 'dir1')
+ full_dir2 = os.path.join(td, 'dir2')
+ full_dir3 = os.path.join(td, 'dir3')
+ exp_dir_dict = { td: os.path.getmtime(td),
+ full_dir1: os.path.getmtime(full_dir1),
+ full_dir2: os.path.getmtime(full_dir2),
+ full_dir3: os.path.getmtime(full_dir3),
+ }
+ for d,m in cd.dir_list:
+ assert d in exp_dir_dict
+ assert exp_dir_dict[d] == m
+ finally:
+ os.chdir(orig_cwd)
+ shutil.rmtree(td)
+
+
+ def test_get_container_details_from_fs_do_getsize_true(self):
+ orig_cwd = os.getcwd()
+ td = tempfile.mkdtemp()
+ try:
+ tf = tarfile.open("common/data/container_tree.tar.bz2", "r:bz2")
+ os.chdir(td)
+ tf.extractall()
+
+ __do_getsize = Glusterfs._do_getsize
+ Glusterfs._do_getsize = True
+
+ cd = utils._get_container_details_from_fs(td)
+ assert cd.bytes_used == 30, repr(cd.bytes_used)
+ assert cd.object_count == 8, repr(cd.object_count)
+ assert set(cd.obj_list) == set(['file1', 'file3', 'file2',
+ 'dir3', 'dir1', 'dir2',
+ 'dir1/file1', 'dir1/file2'
+ ]), repr(cd.obj_list)
+
+ full_dir1 = os.path.join(td, 'dir1')
+ full_dir2 = os.path.join(td, 'dir2')
+ full_dir3 = os.path.join(td, 'dir3')
+ exp_dir_dict = { td: os.path.getmtime(td),
+ full_dir1: os.path.getmtime(full_dir1),
+ full_dir2: os.path.getmtime(full_dir2),
+ full_dir3: os.path.getmtime(full_dir3),
+ }
+ for d,m in cd.dir_list:
+ assert d in exp_dir_dict
+ assert exp_dir_dict[d] == m
+ finally:
+ Glusterfs._do_getsize = __do_getsize
+ os.chdir(orig_cwd)
+ shutil.rmtree(td)
+
+ def test_get_account_details_from_fs_notadir_w_stats(self):
+ tf = tempfile.NamedTemporaryFile()
+ ad = utils._get_account_details_from_fs(tf.name, os.stat(tf.name))
+ assert ad.mtime == os.path.getmtime(tf.name)
+ assert ad.container_count == 0
+ assert ad.container_list == []
+
+ def test_get_account_details_from_fs_notadir(self):
+ tf = tempfile.NamedTemporaryFile()
+ ad = utils._get_account_details_from_fs(tf.name, None)
+ assert ad.mtime == os.path.getmtime(tf.name)
+ assert ad.container_count == 0
+ assert ad.container_list == []
+
+ def test_write_pickle(self):
+ td = tempfile.mkdtemp()
+ try:
+ fpp = os.path.join(td, 'pp')
+ utils.write_pickle('pickled peppers', fpp)
+ with open(fpp, "rb") as f:
+ contents = f.read()
+ s = pickle.loads(contents)
+ assert s == 'pickled peppers', repr(s)
+ finally:
+ shutil.rmtree(td)
+
+ def test_write_pickle_ignore_tmp(self):
+ tf = tempfile.NamedTemporaryFile()
+ td = tempfile.mkdtemp()
+ try:
+ fpp = os.path.join(td, 'pp')
+ # Also test an explicity pickle protocol
+ utils.write_pickle('pickled peppers', fpp, tmp=tf.name, pickle_protocol=2)
+ with open(fpp, "rb") as f:
+ contents = f.read()
+ s = pickle.loads(contents)
+ assert s == 'pickled peppers', repr(s)
+ with open(tf.name, "rb") as f:
+ contents = f.read()
+ assert contents == ''
+ finally:
+ shutil.rmtree(td)
+
+ def test_check_user_xattr_bad_path(self):
+ assert False == utils.check_user_xattr("/tmp/foo/bar/check/user/xattr")
+
+ def test_check_user_xattr_bad_set(self):
+ td = tempfile.mkdtemp()
+ xkey = _xkey(td, 'user.test.key1')
+ _xattr_set_err[xkey] = errno.EOPNOTSUPP
+ try:
+ assert False == utils.check_user_xattr(td)
+ except IOError:
+ pass
+ else:
+ self.fail("Expected IOError")
+ finally:
+ shutil.rmtree(td)
+
+ def test_check_user_xattr_bad_remove(self):
+ td = tempfile.mkdtemp()
+ xkey = _xkey(td, 'user.test.key1')
+ _xattr_rem_err[xkey] = errno.EOPNOTSUPP
+ try:
+ utils.check_user_xattr(td)
+ except IOError:
+ self.fail("Unexpected IOError")
+ finally:
+ shutil.rmtree(td)
+
+ def test_check_user_xattr(self):
+ td = tempfile.mkdtemp()
+ try:
+ assert utils.check_user_xattr(td)
+ finally:
+ shutil.rmtree(td)
+
+ def test_validate_container_empty(self):
+ ret = utils.validate_container({})
+ assert ret == False
+
+ def test_validate_container_missing_keys(self):
+ ret = utils.validate_container({ 'foo': 'bar' })
+ assert ret == False
+
+ def test_validate_container_bad_type(self):
+ md = { utils.X_TYPE: ('bad', 0),
+ utils.X_TIMESTAMP: ('na', 0),
+ utils.X_PUT_TIMESTAMP: ('na', 0),
+ utils.X_OBJECTS_COUNT: ('na', 0),
+ utils.X_BYTES_USED: ('na', 0) }
+ ret = utils.validate_container(md)
+ assert ret == False
+
+ def test_validate_container_good_type(self):
+ md = { utils.X_TYPE: (utils.CONTAINER, 0),
+ utils.X_TIMESTAMP: ('na', 0),
+ utils.X_PUT_TIMESTAMP: ('na', 0),
+ utils.X_OBJECTS_COUNT: ('na', 0),
+ utils.X_BYTES_USED: ('na', 0) }
+ ret = utils.validate_container(md)
+ assert ret
+
+ def test_validate_account_empty(self):
+ ret = utils.validate_account({})
+ assert ret == False
+
+ def test_validate_account_missing_keys(self):
+ ret = utils.validate_account({ 'foo': 'bar' })
+ assert ret == False
+
+ def test_validate_account_bad_type(self):
+ md = { utils.X_TYPE: ('bad', 0),
+ utils.X_TIMESTAMP: ('na', 0),
+ utils.X_PUT_TIMESTAMP: ('na', 0),
+ utils.X_OBJECTS_COUNT: ('na', 0),
+ utils.X_BYTES_USED: ('na', 0),
+ utils.X_CONTAINER_COUNT: ('na', 0) }
+ ret = utils.validate_account(md)
+ assert ret == False
+
+ def test_validate_account_good_type(self):
+ md = { utils.X_TYPE: (utils.ACCOUNT, 0),
+ utils.X_TIMESTAMP: ('na', 0),
+ utils.X_PUT_TIMESTAMP: ('na', 0),
+ utils.X_OBJECTS_COUNT: ('na', 0),
+ utils.X_BYTES_USED: ('na', 0),
+ utils.X_CONTAINER_COUNT: ('na', 0) }
+ ret = utils.validate_account(md)
+ assert ret
+
+ def test_validate_object_empty(self):
+ ret = utils.validate_object({})
+ assert ret == False
+
+ def test_validate_object_missing_keys(self):
+ ret = utils.validate_object({ 'foo': 'bar' })
+ assert ret == False
+
+ def test_validate_object_bad_type(self):
+ md = { utils.X_TIMESTAMP: 'na',
+ utils.X_CONTENT_TYPE: 'na',
+ utils.X_ETAG: 'bad',
+ utils.X_CONTENT_LENGTH: 'na',
+ utils.X_TYPE: 'bad',
+ utils.X_OBJECT_TYPE: 'na' }
+ ret = utils.validate_object(md)
+ assert ret == False
+
+ def test_validate_object_good_type(self):
+ md = { utils.X_TIMESTAMP: 'na',
+ utils.X_CONTENT_TYPE: 'na',
+ utils.X_ETAG: 'bad',
+ utils.X_CONTENT_LENGTH: 'na',
+ utils.X_TYPE: utils.OBJECT,
+ utils.X_OBJECT_TYPE: 'na' }
+ ret = utils.validate_object(md)
+ assert ret
+
+ def test_is_marker_empty(self):
+ assert False == utils.is_marker(None)
+
+ def test_is_marker_missing(self):
+ assert False == utils.is_marker( { 'foo': 'bar' } )
+
+ def test_is_marker_not_marker(self):
+ md = { utils.X_OBJECT_TYPE: utils.DIR }
+ assert False == utils.is_marker(md)
+
+ def test_is_marker(self):
+ md = { utils.X_OBJECT_TYPE: utils.MARKER_DIR }
+ assert utils.is_marker(md)
diff --git a/ufo/tools/test-requires b/ufo/tools/test-requires
new file mode 100644
index 0000000..ef8bf14
--- /dev/null
+++ b/ufo/tools/test-requires
@@ -0,0 +1,6 @@
+coverage
+nose
+nosexcover
+openstack.nose_plugin
+nosehtmloutput
+mock>=0.8.0
diff --git a/ufo/tox.ini b/ufo/tox.ini
new file mode 100644
index 0000000..792bb41
--- /dev/null
+++ b/ufo/tox.ini
@@ -0,0 +1,25 @@
+[tox]
+envlist = py26,py27
+
+[testenv]
+setenv = VIRTUAL_ENV={envdir}
+ NOSE_WITH_OPENSTACK=1
+ NOSE_OPENSTACK_COLOR=1
+ NOSE_OPENSTACK_RED=0.05
+ NOSE_OPENSTACK_YELLOW=0.025
+ NOSE_OPENSTACK_SHOW_ELAPSED=1
+ NOSE_OPENSTACK_STDOUT=1
+deps =
+ https://launchpad.net/swift/grizzly/1.8.0/+download/swift-1.8.0.tar.gz
+ -r{toxinidir}/tools/test-requires
+changedir = {toxinidir}/test/unit
+commands = nosetests -v --exe --with-coverage --cover-package gluster --cover-erase {posargs}
+
+[tox:jenkins]
+downloadcache = ~/cache/pip
+
+[testenv:cover]
+setenv = NOSE_WITH_COVERAGE=1
+
+[testenv:venv]
+commands = {posargs}
diff --git a/ufo/unittests.sh b/ufo/unittests.sh
new file mode 100755
index 0000000..a02be77
--- /dev/null
+++ b/ufo/unittests.sh
@@ -0,0 +1,7 @@
+#!/bin/bash
+
+cd $(dirname $0)/test/unit
+nosetests --exe --with-coverage --cover-package gluster --cover-erase $@
+saved_status=$?
+rm -f .coverage
+exit $saved_status