summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rwxr-xr-x.functests6
-rw-r--r--gluster/swift/common/DiskDir.py90
-rw-r--r--gluster/swift/common/Glusterfs.py2
-rw-r--r--gluster/swift/common/middleware/gswauth/swauth/middleware.py1
-rw-r--r--gluster/swift/common/utils.py41
-rw-r--r--gluster/swift/obj/diskfile.py137
-rw-r--r--gluster/swift/obj/server.py155
-rw-r--r--gluster/swift/proxy/server.py2
-rw-r--r--requirements.txt9
-rw-r--r--test-requirements.txt12
-rw-r--r--test/__init__.py4
-rw-r--r--test/functional/__init__.py959
-rw-r--r--test/functional/gluster_swift_tests.py3
-rw-r--r--test/functional/swift_test_client.py246
-rwxr-xr-xtest/functional/test_account.py123
-rwxr-xr-xtest/functional/test_container.py549
-rwxr-xr-xtest/functional/test_object.py353
-rw-r--r--test/functional/tests.py1331
-rw-r--r--test/functional_auth/common_conf/object-server.conf2
-rw-r--r--test/unit/__init__.py602
-rw-r--r--test/unit/obj/test_diskfile.py138
-rw-r--r--test/unit/obj/test_expirer.py385
-rw-r--r--test/unit/proxy/controllers/test_account.py16
-rw-r--r--test/unit/proxy/controllers/test_base.py463
-rw-r--r--test/unit/proxy/controllers/test_container.py93
-rwxr-xr-xtest/unit/proxy/controllers/test_obj.py1412
-rw-r--r--test/unit/proxy/test_server.py3991
-rw-r--r--tox.ini26
28 files changed, 9327 insertions, 1824 deletions
diff --git a/.functests b/.functests
index 6b13b69..c129712 100755
--- a/.functests
+++ b/.functests
@@ -26,10 +26,4 @@ bash tools/tempauth_functional_tests.sh || EXIT_STATUS=$?
# Run functional tests with gswauth as auth middleware
bash tools/gswauth_functional_tests.sh || EXIT_STATUS=$?
-# Run object expirer functional tests if gsexpiring volume is found.
-if mount | grep "gsexpiring on /mnt/gluster-object/gsexpiring type fuse.glusterfs"; then
- echo "Running object expirer functional tests"
- bash tools/object_expirer_functional.sh || EXIT_STATUS=$?
-fi
-
exit $EXIT_STATUS
diff --git a/gluster/swift/common/DiskDir.py b/gluster/swift/common/DiskDir.py
index 6112709..e8dba35 100644
--- a/gluster/swift/common/DiskDir.py
+++ b/gluster/swift/common/DiskDir.py
@@ -255,7 +255,7 @@ class DiskDir(DiskCommon):
:param uid: user ID container object should assume
:param gid: group ID container object should assume
- Usage pattern from container/server.py (Havana, 1.8.0+):
+ Usage pattern from container/server.py (Kilo, 2.3.0):
DELETE:
if auto-create and obj and not .db_file:
# Creates container
@@ -287,36 +287,43 @@ class DiskDir(DiskCommon):
return 404
.put_object()
else:
- if not .db_file:
- # Creates container
- .initialize()
- else:
- # Update container timestamp
- .is_deleted()
+ _update_or_create():
+ if not .db_file:
+ # Creates container
+ .initialize()
+ recreated = .is_deleted():
+ if recreated:
+ .set_storage_policy_index()
+ .storage_policy_index
.update_put_timestamp()
if .is_deleted()
return conflict
- if metadata:
+ if recreated:
+ .update_status_changed_at()
+
+ if 'X-Container-Sync-To' in metadata:
if .metadata
.set_x_container_sync_points()
.update_metadata()
account_update():
.get_info()
HEAD:
- .pending_timeout
- .stale_reads_ok
- if .is_deleted():
- return 404
- .get_info()
+ info, is_deleted = .get_info_is_deleted()
+ .get_info_is_deleted():
+ if not .db_file:
+ return {}, True
+ info = .get_info()
+ return info, ._is_deleted_info()
.metadata
GET:
- .pending_timeout
- .stale_reads_ok
- if .is_deleted():
- return 404
- .get_info()
- .metadata
+ info, is_deleted = .get_info_is_deleted()
+ .get_info_is_deleted():
+ if not .db_file:
+ return {}, True
+ info = .get_info()
+ return info, ._is_deleted_info()
.list_objects_iter()
+ .metadata
POST:
if .is_deleted():
return 404
@@ -346,8 +353,22 @@ class DiskDir(DiskCommon):
create_container_metadata(self.datadir)
self.metadata = _read_metadata(self.datadir)
+ def update_status_changed_at(self, timestamp):
+ return
+
+ @property
+ def storage_policy_index(self):
+ if not hasattr(self, '_storage_policy_index'):
+ self._storage_policy_index = \
+ self.get_info()['storage_policy_index']
+ return self._storage_policy_index
+
+ def set_storage_policy_index(self, policy_index, timestamp=None):
+ self._storage_policy_index = policy_index
+
def list_objects_iter(self, limit, marker, end_marker,
- prefix, delimiter, path=None):
+ prefix, delimiter, path=None,
+ storage_policy_index=0):
"""
Returns tuple of name, created_at, size, content_type, etag.
"""
@@ -451,6 +472,12 @@ class DiskDir(DiskCommon):
return objects
+ def get_info_is_deleted(self):
+ if not do_exists(self.datadir):
+ return {}, True
+ info = self.get_info()
+ return info, False
+
def get_info(self):
"""
Get global data for the container.
@@ -477,7 +504,10 @@ class DiskDir(DiskCommon):
'x_container_sync_point1', -1),
'x_container_sync_point2': self.metadata.get(
'x_container_sync_point2', -1),
+ 'storage_policy_index': self.metadata.get(
+ 'storage_policy_index', 0)
}
+ self._storage_policy_index = data['storage_policy_index']
return data
def put_object(self, name, timestamp, size, content_type, etag, deleted=0):
@@ -540,13 +570,14 @@ class DiskDir(DiskCommon):
class DiskAccount(DiskCommon):
"""
- Usage pattern from account/server.py (Havana, 1.8.0+):
+ Usage pattern from account/server.py (Kilo, 2.3.0):
DELETE:
.is_deleted()
+ .is_status_deleted()
.delete_db()
+ .is_status_deleted()
PUT:
container:
- .pending_timeout
.db_file
.initialize()
.is_deleted()
@@ -555,25 +586,27 @@ class DiskAccount(DiskCommon):
.db_file
.initialize()
.is_status_deleted()
+ .is_status_deleted()
.is_deleted()
.update_put_timestamp()
- .is_deleted() ???
+ .is_deleted()
.update_metadata()
HEAD:
- .pending_timeout
- .stale_reads_ok
.is_deleted()
+ .is_status_deleted()
.get_info()
+ .get_policy_stats()
.metadata
GET:
- .pending_timeout
- .stale_reads_ok
.is_deleted()
+ .is_status_deleted()
.get_info()
+ .get_policy_stats()
.metadata
.list_containers_iter()
POST:
.is_deleted()
+ .is_status_deleted()
.update_metadata()
"""
@@ -748,3 +781,6 @@ class DiskAccount(DiskCommon):
'bytes_used': self.metadata.get(X_BYTES_USED, (0, 0))[0],
'hash': '', 'id': ''}
return data
+
+ def get_policy_stats(self, do_migrations=False):
+ return {}
diff --git a/gluster/swift/common/Glusterfs.py b/gluster/swift/common/Glusterfs.py
index 6a2fdb2..910f752 100644
--- a/gluster/swift/common/Glusterfs.py
+++ b/gluster/swift/common/Glusterfs.py
@@ -148,7 +148,7 @@ def _get_unique_id():
# own the lock.
continue
raise
- except:
+ except Exception:
os.close(fd)
raise
else:
diff --git a/gluster/swift/common/middleware/gswauth/swauth/middleware.py b/gluster/swift/common/middleware/gswauth/swauth/middleware.py
index cdcc638..3cd9cf7 100644
--- a/gluster/swift/common/middleware/gswauth/swauth/middleware.py
+++ b/gluster/swift/common/middleware/gswauth/swauth/middleware.py
@@ -28,6 +28,7 @@ import base64
from eventlet.timeout import Timeout
from eventlet import TimeoutError
+from swift import gettext_ as _
from swift.common.swob import HTTPAccepted, HTTPBadRequest, HTTPConflict, \
HTTPCreated, HTTPForbidden, HTTPMethodNotAllowed, HTTPMovedPermanently, \
HTTPNoContent, HTTPNotFound, HTTPUnauthorized, \
diff --git a/gluster/swift/common/utils.py b/gluster/swift/common/utils.py
index b6a5a09..e6f4bcc 100644
--- a/gluster/swift/common/utils.py
+++ b/gluster/swift/common/utils.py
@@ -17,6 +17,7 @@ import os
import stat
import json
import errno
+import random
import logging
from hashlib import md5
from eventlet import sleep
@@ -29,7 +30,7 @@ from swift.common.db import utf8encodekeys
from gluster.swift.common.fs_utils import do_getctime, do_getmtime, do_stat, \
do_listdir, do_walk, do_rmdir, do_log_rl, get_filename_from_fd, do_open, \
do_isdir, do_getsize, do_getxattr, do_setxattr, do_removexattr, do_read, \
- do_close, do_dup, do_lseek, do_fstat
+ do_close, do_dup, do_lseek, do_fstat, do_fsync, do_rename
from gluster.swift.common import Glusterfs
X_CONTENT_TYPE = 'Content-Type'
@@ -607,3 +608,41 @@ def rmobjdir(dir_path):
raise
else:
return True
+
+
+def write_pickle(obj, dest, tmp=None, pickle_protocol=0):
+ """
+ Ensure that a pickle file gets written to disk. The file is first written
+ to a tmp file location in the destination directory path, ensured it is
+ synced to disk, then moved to its final destination name.
+
+ This version takes advantage of Gluster's dot-prefix-dot-suffix naming
+ where the a file named ".thefile.name.9a7aasv" is hashed to the same
+ Gluster node as "thefile.name". This ensures the renaming of a temp file
+ once written does not move it to another Gluster node.
+
+ :param obj: python object to be pickled
+ :param dest: path of final destination file
+ :param tmp: path to tmp to use, defaults to None (ignored)
+ :param pickle_protocol: protocol to pickle the obj with, defaults to 0
+ """
+ dirname = os.path.dirname(dest)
+ # Create destination directory
+ try:
+ os.makedirs(dirname)
+ except OSError as err:
+ if err.errno != errno.EEXIST:
+ raise
+ basename = os.path.basename(dest)
+ tmpname = '.' + basename + '.' + \
+ md5(basename + str(random.random())).hexdigest()
+ tmppath = os.path.join(dirname, tmpname)
+ with open(tmppath, 'wb') as fo:
+ pickle.dump(obj, fo, pickle_protocol)
+ # TODO: This flush() method call turns into a flush() system call
+ # We'll need to wrap this as well, but we would do this by writing
+ # a context manager for our own open() method which returns an object
+ # in fo which makes the gluster API call.
+ fo.flush()
+ do_fsync(fo)
+ do_rename(tmppath, dest)
diff --git a/gluster/swift/obj/diskfile.py b/gluster/swift/obj/diskfile.py
index eb180a2..21e6cee 100644
--- a/gluster/swift/obj/diskfile.py
+++ b/gluster/swift/obj/diskfile.py
@@ -23,22 +23,18 @@ except ImportError:
import random
import logging
import time
-from collections import defaultdict
-from socket import gethostname
-from hashlib import md5
+from uuid import uuid4
from eventlet import sleep
-from greenlet import getcurrent
from contextlib import contextmanager
from gluster.swift.common.exceptions import AlreadyExistsAsFile, \
AlreadyExistsAsDir
-from swift.common.utils import TRUE_VALUES, ThreadPool, config_true_value
+from swift.common.utils import ThreadPool
from swift.common.exceptions import DiskFileNotExist, DiskFileError, \
DiskFileNoSpace, DiskFileDeviceUnavailable, DiskFileNotOpen, \
DiskFileExpired
from swift.common.swob import multi_range_iterator
from gluster.swift.common.exceptions import GlusterFileSystemOSError
-from gluster.swift.common.Glusterfs import mount
from gluster.swift.common.fs_utils import do_fstat, do_open, do_close, \
do_unlink, do_chown, do_fsync, do_fchown, do_stat, do_write, do_read, \
do_fadvise64, do_rename, do_fdatasync, do_lseek, do_mkdir
@@ -49,24 +45,15 @@ from gluster.swift.common.utils import X_CONTENT_TYPE, \
X_TIMESTAMP, X_TYPE, X_OBJECT_TYPE, FILE, OBJECT, DIR_TYPE, \
FILE_TYPE, DEFAULT_UID, DEFAULT_GID, DIR_NON_OBJECT, DIR_OBJECT, \
X_ETAG, X_CONTENT_LENGTH
-from ConfigParser import ConfigParser, NoSectionError, NoOptionError
+from swift.obj.diskfile import DiskFileManager as SwiftDiskFileManager
# FIXME: Hopefully we'll be able to move to Python 2.7+ where O_CLOEXEC will
# be back ported. See http://www.python.org/dev/peps/pep-0433/
O_CLOEXEC = 02000000
-DEFAULT_DISK_CHUNK_SIZE = 65536
-DEFAULT_KEEP_CACHE_SIZE = (5 * 1024 * 1024)
-DEFAULT_MB_PER_SYNC = 512
-# keep these lower-case
-DISALLOWED_HEADERS = set('content-length content-type deleted etag'.split())
-
MAX_RENAME_ATTEMPTS = 10
MAX_OPEN_ATTEMPTS = 10
-_cur_pid = str(os.getpid())
-_cur_host = str(gethostname())
-
def _random_sleep():
sleep(random.uniform(0.5, 0.15))
@@ -181,24 +168,6 @@ def make_directory(full_path, uid, gid, metadata=None):
return True, metadata
-_fs_conf = ConfigParser()
-if _fs_conf.read(os.path.join('/etc/swift', 'fs.conf')):
- try:
- _mkdir_locking = _fs_conf.get('DEFAULT', 'mkdir_locking', "no") \
- in TRUE_VALUES
- logging.warn("The option mkdir_locking has been deprecated and is"
- " no longer supported")
- except (NoSectionError, NoOptionError):
- pass
- try:
- _use_put_mount = _fs_conf.get('DEFAULT', 'use_put_mount', "no") \
- in TRUE_VALUES
- except (NoSectionError, NoOptionError):
- _use_put_mount = False
-else:
- _use_put_mount = False
-
-
def _adjust_metadata(metadata):
# Fix up the metadata to ensure it has a proper value for the
# Content-Type metadata, as well as an X_TYPE and X_OBJECT_TYPE
@@ -223,7 +192,7 @@ def _adjust_metadata(metadata):
return metadata
-class OnDiskManager(object):
+class DiskFileManager(SwiftDiskFileManager):
"""
Management class for devices, providing common place for shared parameters
and methods not provided by the DiskFile class (which primarily services
@@ -240,42 +209,14 @@ class OnDiskManager(object):
:param conf: caller provided configuration object
:param logger: caller provided logger
"""
- def __init__(self, conf, logger):
- self.logger = logger
- self.disk_chunk_size = int(conf.get('disk_chunk_size',
- DEFAULT_DISK_CHUNK_SIZE))
- self.keep_cache_size = int(conf.get('keep_cache_size',
- DEFAULT_KEEP_CACHE_SIZE))
- self.bytes_per_sync = int(conf.get('mb_per_sync',
- DEFAULT_MB_PER_SYNC)) * 1024 * 1024
- self.devices = conf.get('devices', '/srv/node/')
- self.mount_check = config_true_value(conf.get('mount_check', 'true'))
- threads_per_disk = int(conf.get('threads_per_disk', '0'))
- self.threadpools = defaultdict(
- lambda: ThreadPool(nthreads=threads_per_disk))
-
- def _get_dev_path(self, device):
- """
- Return the path to a device, checking to see that it is a proper mount
- point based on a configuration parameter.
-
- :param device: name of target device
- :returns: full path to the device, None if the path to the device is
- not a proper mount point.
- """
- if self.mount_check and not mount(self.devices, device):
- dev_path = None
- else:
- dev_path = os.path.join(self.devices, device)
- return dev_path
-
- def get_diskfile(self, device, account, container, obj,
- **kwargs):
- dev_path = self._get_dev_path(device)
+ def get_diskfile(self, device, partition, account, container, obj,
+ policy=None, **kwargs):
+ dev_path = self.get_dev_path(device, self.mount_check)
if not dev_path:
raise DiskFileDeviceUnavailable()
return DiskFile(self, dev_path, self.threadpools[device],
- account, container, obj, **kwargs)
+ partition, account, container, obj,
+ policy=policy, **kwargs)
class DiskFileWriter(object):
@@ -447,10 +388,20 @@ class DiskFileWriter(object):
df._threadpool.force_run_in_thread(self._finalize_put, metadata)
- # Avoid the unlink() system call as part of the DiskFile.create()
- # context cleanup
+ # Avoid the unlink() system call as part of the mkstemp context
+ # cleanup
self._tmppath = None
+ def commit(self, timestamp):
+ """
+ Perform any operations necessary to mark the object as durable. For
+ replication policy type this is a no-op.
+
+ :param timestamp: object put timestamp, an instance of
+ :class:`~swift.common.utils.Timestamp`
+ """
+ pass
+
class DiskFileReader(object):
"""
@@ -579,9 +530,9 @@ class DiskFile(object):
Manage object files on disk.
Object names ending or beginning with a '/' as in /a, a/, /a/b/,
- etc, or object names with multiple consecutive slahes, like a//b,
- are not supported. The proxy server's contraints filter
- gluster.common.constrains.gluster_check_object_creation() should
+ etc, or object names with multiple consecutive slashes, like a//b,
+ are not supported. The proxy server's constraints filter
+ gluster.common.constrains.check_object_creation() should
reject such requests.
:param mgr: associated on-disk manager instance
@@ -593,36 +544,37 @@ class DiskFile(object):
:param uid: user ID disk object should assume (file or directory)
:param gid: group ID disk object should assume (file or directory)
"""
- def __init__(self, mgr, dev_path, threadpool, account, container, obj,
- uid=DEFAULT_UID, gid=DEFAULT_GID):
+ def __init__(self, mgr, dev_path, threadpool, partition,
+ account=None, container=None, obj=None,
+ policy=None, uid=DEFAULT_UID, gid=DEFAULT_GID):
+ # Variables partition and policy is currently unused.
self._mgr = mgr
self._device_path = dev_path
self._threadpool = threadpool or ThreadPool(nthreads=0)
self._uid = int(uid)
self._gid = int(gid)
self._is_dir = False
- self._logger = mgr.logger
self._metadata = None
self._fd = None
self._stat = None
# Don't store a value for data_file until we know it exists.
self._data_file = None
- self._container_path = os.path.join(self._device_path, container)
+ self._account = account # Unused, account = volume
+ self._container = container
+
+ self._container_path = os.path.join(self._device_path, self._container)
+
obj = obj.strip(os.path.sep)
obj_path, self._obj = os.path.split(obj)
if obj_path:
self._obj_path = obj_path.strip(os.path.sep)
- self._datadir = os.path.join(self._container_path, self._obj_path)
+ self._put_datadir = os.path.join(self._container_path,
+ self._obj_path)
else:
self._obj_path = ''
- self._datadir = self._container_path
+ self._put_datadir = self._container_path
- if _use_put_mount:
- self._put_datadir = os.path.join(
- self._device_path + '_PUT', container, self._obj_path)
- else:
- self._put_datadir = self._datadir
self._data_file = os.path.join(self._put_datadir, self._obj)
def open(self):
@@ -687,7 +639,8 @@ class DiskFile(object):
raise DiskFileNotExist
else:
# Re-raise the original exception after fd has been closed
- raise err
+ raise
+
return self
def _is_object_expired(self, metadata):
@@ -874,16 +827,14 @@ class DiskFile(object):
:raises AlreadyExistsAsFile: if path or part of a path is not a \
directory
"""
+
data_file = os.path.join(self._put_datadir, self._obj)
# Assume the full directory path exists to the file already, and
# construct the proper name for the temporary file.
attempts = 1
- cur_thread = str(getcurrent())
while True:
- postfix = md5(self._obj + _cur_host + _cur_pid + cur_thread
- + str(random.random())).hexdigest()
- tmpfile = '.' + self._obj + '.' + postfix
+ tmpfile = '.' + self._obj + '.' + uuid4().hex
tmppath = os.path.join(self._put_datadir, tmpfile)
try:
fd = do_open(tmppath,
@@ -905,7 +856,7 @@ class DiskFile(object):
if attempts >= MAX_OPEN_ATTEMPTS:
# We failed after N attempts to create the temporary
# file.
- raise DiskFileError('DiskFile.create(): failed to'
+ raise DiskFileError('DiskFile.mkstemp(): failed to'
' successfully create a temporary file'
' without running into a name conflict'
' after %d of %d attempts for: %s' % (
@@ -918,7 +869,7 @@ class DiskFile(object):
# FIXME: Possible FUSE issue or race condition, let's
# sleep on it and retry the operation.
_random_sleep()
- logging.warn("DiskFile.create(): %s ... retrying in"
+ logging.warn("DiskFile.mkstemp(): %s ... retrying in"
" 0.1 secs", gerr)
attempts += 1
elif not self._obj_path:
@@ -927,7 +878,7 @@ class DiskFile(object):
# could be a FUSE issue or some race condition, so let's
# sleep a bit and retry.
_random_sleep()
- logging.warn("DiskFile.create(): %s ... retrying in"
+ logging.warn("DiskFile.mkstemp(): %s ... retrying in"
" 0.1 secs", gerr)
attempts += 1
elif attempts > 1:
@@ -935,7 +886,7 @@ class DiskFile(object):
# also be a FUSE issue or some race condition, nap and
# retry.
_random_sleep()
- logging.warn("DiskFile.create(): %s ... retrying in"
+ logging.warn("DiskFile.mkstemp(): %s ... retrying in"
" 0.1 secs" % gerr)
attempts += 1
else:
diff --git a/gluster/swift/obj/server.py b/gluster/swift/obj/server.py
index 8bc080a..1d8d418 100644
--- a/gluster/swift/obj/server.py
+++ b/gluster/swift/obj/server.py
@@ -15,26 +15,27 @@
""" Object Server for Gluster for Swift """
-# Simply importing this monkey patches the constraint handling to fit our
-# needs
-import gluster.swift.common.constraints # noqa
-from swift.common.swob import HTTPConflict
-from swift.common.utils import public, timing_stats
+from swift.common.swob import HTTPConflict, HTTPNotImplemented
+from swift.common.utils import public, timing_stats, replication
from gluster.swift.common.exceptions import AlreadyExistsAsFile, \
AlreadyExistsAsDir
from swift.common.request_helpers import split_and_validate_path
from swift.obj import server
-from gluster.swift.obj.diskfile import OnDiskManager
+from gluster.swift.obj.diskfile import DiskFileManager
-import os
-from swift.common.exceptions import ConnectionTimeout
-from swift.common.bufferedhttp import http_connect
-from eventlet import Timeout
-from swift.common.http import is_success
-from gluster.swift.common.ring import Ring
-from swift import gettext_ as _
+
+class GlusterSwiftDiskFileRouter(object):
+ """
+ Replacement for Swift's DiskFileRouter object.
+ Always returns GlusterSwift's DiskFileManager implementation.
+ """
+ def __init__(self, *args, **kwargs):
+ self.manager_cls = DiskFileManager(*args, **kwargs)
+
+ def __getitem__(self, policy):
+ return self.manager_cls
class ObjectController(server.ObjectController):
@@ -52,23 +53,8 @@ class ObjectController(server.ObjectController):
:param conf: WSGI configuration parameter
"""
- # Common on-disk hierarchy shared across account, container and object
- # servers.
- self._ondisk_mgr = OnDiskManager(conf, self.logger)
- self.swift_dir = conf.get('swift_dir', '/etc/swift')
-
- def get_diskfile(self, device, partition, account, container, obj,
- **kwargs):
- """
- Utility method for instantiating a DiskFile object supporting a given
- REST API.
-
- An implementation of the object server that wants to use a different
- DiskFile class would simply over-ride this method to provide that
- behavior.
- """
- return self._ondisk_mgr.get_diskfile(device, account, container, obj,
- **kwargs)
+ # Replaces Swift's DiskFileRouter object reference with ours.
+ self._diskfile_router = GlusterSwiftDiskFileRouter(conf, self.logger)
def container_update(self, *args, **kwargs):
"""
@@ -79,102 +65,47 @@ class ObjectController(server.ObjectController):
"""
return
- def get_object_ring(self):
- if hasattr(self, 'object_ring'):
- if not self.object_ring:
- self.object_ring = Ring(self.swift_dir, ring_name='object')
- else:
- self.object_ring = Ring(self.swift_dir, ring_name='object')
- return self.object_ring
-
- def async_update(self, op, account, container, obj, host, partition,
- contdevice, headers_out, objdevice):
- """
- In Openstack Swift, this method is called by:
- * container_update (a no-op in gluster-swift)
- * delete_at_update (to PUT objects into .expiring_objects account)
-
- The Swift's version of async_update only sends the request to
- container-server to PUT the object. The container-server calls
- container_update method which makes an entry for the object in it's
- database. No actual object is created on disk.
-
- But in gluster-swift container_update is a no-op, so we'll
- have to PUT an actual object. We override async_update to create a
- container first and then the corresponding "tracker object" which
- tracks expired objects scheduled for deletion.
+ def delete_at_update(self, *args, **kwargs):
"""
+ Update the expiring objects container when objects are updated.
- headers_out['user-agent'] = 'obj-server %s' % os.getpid()
- if all([host, partition, contdevice]):
- # PUT the container. Send request directly to container-server
- container_path = '/%s/%s' % (account, container)
- try:
- with ConnectionTimeout(self.conn_timeout):
- ip, port = host.rsplit(':', 1)
- conn = http_connect(ip, port, contdevice, partition, op,
- container_path, headers_out)
- with Timeout(self.node_timeout):
- response = conn.getresponse()
- response.read()
- if not is_success(response.status):
- self.logger.error(_(
- 'async_update : '
- 'ERROR Container update failed :%(status)d '
- 'response from %(ip)s:%(port)s/%(dev)s'),
- {'status': response.status, 'ip': ip, 'port': port,
- 'dev': contdevice})
- return
- except (Exception, Timeout):
- self.logger.exception(_(
- 'async_update : '
- 'ERROR Container update failed :%(ip)s:%(port)s/%(dev)s'),
- {'ip': ip, 'port': port, 'dev': contdevice})
-
- # PUT the tracker object. Send request directly to object-server
- object_path = '/%s/%s/%s' % (account, container, obj)
- headers_out['Content-Length'] = 0
- headers_out['Content-Type'] = 'text/plain'
- try:
- with ConnectionTimeout(self.conn_timeout):
- # FIXME: Assuming that get_nodes returns single node
- part, nodes = self.get_object_ring().get_nodes(account,
- container,
- obj)
- ip = nodes[0]['ip']
- port = nodes[0]['port']
- objdevice = nodes[0]['device']
- conn = http_connect(ip, port, objdevice, partition, op,
- object_path, headers_out)
- with Timeout(self.node_timeout):
- response = conn.getresponse()
- response.read()
- if is_success(response.status):
- return
- else:
- self.logger.error(_(
- 'async_update : '
- 'ERROR Object PUT failed : %(status)d '
- 'response from %(ip)s:%(port)s/%(dev)s'),
- {'status': response.status, 'ip': ip, 'port': port,
- 'dev': objdevice})
- except (Exception, Timeout):
- self.logger.exception(_(
- 'async_update : '
- 'ERROR Object PUT failed :%(ip)s:%(port)s/%(dev)s'),
- {'ip': ip, 'port': port, 'dev': objdevice})
+ For Gluster, this is a no-op as there are no container DB entries
+ to be created that tracks objects to be expired. Objects to be
+ expired will be determined by crawling the filesystem directly.
+ """
return
@public
@timing_stats()
def PUT(self, request):
try:
+ # now call swift's PUT method
return server.ObjectController.PUT(self, request)
except (AlreadyExistsAsFile, AlreadyExistsAsDir):
device = \
split_and_validate_path(request, 1, 5, True)
return HTTPConflict(drive=device, request=request)
+ @public
+ @replication
+ @timing_stats(sample_rate=0.1)
+ def REPLICATE(self, request):
+ """
+ In Swift, this method handles REPLICATE requests for the Swift
+ Object Server. This is used by the object replicator to get hashes
+ for directories.
+
+ Gluster-Swift does not support this as it expects the underlying
+ GlusterFS to take care of replication
+ """
+ return HTTPNotImplemented(request=request)
+
+ @public
+ @replication
+ @timing_stats(sample_rate=0.1)
+ def REPLICATION(self, request):
+ return HTTPNotImplemented(request=request)
+
def app_factory(global_conf, **local_conf):
"""paste.deploy app factory for creating WSGI object server apps"""
diff --git a/gluster/swift/proxy/server.py b/gluster/swift/proxy/server.py
index 7b2f58e..1621990 100644
--- a/gluster/swift/proxy/server.py
+++ b/gluster/swift/proxy/server.py
@@ -20,7 +20,7 @@ import gluster.swift.common.constraints # noqa
from swift.proxy.server import Application, mimetypes # noqa
from swift.proxy.controllers import AccountController # noqa
-from swift.proxy.controllers import ObjectController # noqa
+from swift.proxy.controllers import ObjectControllerRouter # noqa
from swift.proxy.controllers import ContainerController # noqa
diff --git a/requirements.txt b/requirements.txt
index bbac51a..a50eb9d 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,7 +1,12 @@
+# The order of packages is significant, because pip processes them in the order
+# of appearance. Changing the order has an impact on the overall integration
+# process, which may cause wedges in the gate later.
+
dnspython>=1.9.4
-eventlet>=0.9.15
+eventlet>=0.16.1,!=0.17.0
greenlet>=0.3.1
-netifaces>=0.5
+netifaces>=0.5,!=0.10.0,!=0.10.1
pastedeploy>=1.3.3
simplejson>=2.0.9
xattr>=0.4
+PyECLib==1.0.7 # BSD
diff --git a/test-requirements.txt b/test-requirements.txt
index 63d499e..88ca936 100644
--- a/test-requirements.txt
+++ b/test-requirements.txt
@@ -1,12 +1,16 @@
+# The order of packages is significant, because pip processes them in the order
+# of appearance. Changing the order has an impact on the overall integration
+# process, which may cause wedges in the gate later.
+
# Hacking already pins down pep8, pyflakes and flake8
-hacking>=0.5.6,<0.6
+hacking>=0.8.0,<0.9
coverage
nose
nosexcover
openstack.nose_plugin
nosehtmloutput
+oslosphinx
sphinx>=1.1.2,<1.2
-mock>=0.8.0
+mock>=1.0
python-swiftclient
-python-keystoneclient
-prettytable
+prettytable # SOF
diff --git a/test/__init__.py b/test/__init__.py
index 7eb5f47..3bd25b1 100644
--- a/test/__init__.py
+++ b/test/__init__.py
@@ -53,12 +53,12 @@ def get_config(section_name=None, defaults=None):
:param section_name: the section to read (all sections if not defined)
:param defaults: an optional dictionary namespace of defaults
"""
- config_file = os.environ.get('SWIFT_TEST_CONFIG_FILE',
- '/etc/swift/test.conf')
config = {}
if defaults is not None:
config.update(defaults)
+ config_file = os.environ.get('SWIFT_TEST_CONFIG_FILE',
+ '/etc/swift/test.conf')
try:
config = readconf(config_file, section_name)
except SystemExit:
diff --git a/test/functional/__init__.py b/test/functional/__init__.py
index e69de29..580de56 100644
--- a/test/functional/__init__.py
+++ b/test/functional/__init__.py
@@ -0,0 +1,959 @@
+# Copyright (c) 2014 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import httplib
+import mock
+import os
+import sys
+import pickle
+import socket
+import locale
+import eventlet
+import eventlet.debug
+import functools
+import random
+from ConfigParser import ConfigParser, NoSectionError
+from time import time, sleep
+from httplib import HTTPException
+from urlparse import urlparse
+from nose import SkipTest
+from contextlib import closing
+from gzip import GzipFile
+from shutil import rmtree
+from tempfile import mkdtemp
+from swift.common.middleware.memcache import MemcacheMiddleware
+from swift.common.storage_policy import parse_storage_policies, PolicyError
+
+from test import get_config
+from test.functional.swift_test_client import Account, Connection, \
+ ResponseError
+# This has the side effect of mocking out the xattr module so that unit tests
+# (and in this case, when in-process functional tests are called for) can run
+# on file systems that don't support extended attributes.
+from test.unit import debug_logger, FakeMemcache
+
+from swift.common import constraints, utils, ring, storage_policy
+from swift.common.ring import Ring
+from swift.common.wsgi import monkey_patch_mimetools, loadapp
+from swift.common.utils import config_true_value
+from swift.account import server as account_server
+from swift.container import server as container_server
+from swift.obj import server as object_server, mem_server as mem_object_server
+import swift.proxy.controllers.obj
+
+httplib._MAXHEADERS = constraints.MAX_HEADER_COUNT
+DEBUG = True
+
+# In order to get the proper blocking behavior of sockets without using
+# threads, where we can set an arbitrary timeout for some piece of code under
+# test, we use eventlet with the standard socket library patched. We have to
+# perform this setup at module import time, since all the socket module
+# bindings in the swiftclient code will have been made by the time nose
+# invokes the package or class setup methods.
+eventlet.hubs.use_hub(utils.get_hub())
+eventlet.patcher.monkey_patch(all=False, socket=True)
+eventlet.debug.hub_exceptions(False)
+
+from swiftclient import get_auth, http_connection
+
+has_insecure = False
+try:
+ from swiftclient import __version__ as client_version
+ # Prevent a ValueError in StrictVersion with '2.0.3.68.ga99c2ff'
+ client_version = '.'.join(client_version.split('.')[:3])
+except ImportError:
+ # Pre-PBR we had version, not __version__. Anyhow...
+ client_version = '1.2'
+from distutils.version import StrictVersion
+if StrictVersion(client_version) >= StrictVersion('2.0'):
+ has_insecure = True
+
+
+config = {}
+web_front_end = None
+normalized_urls = None
+
+# If no config was read, we will fall back to old school env vars
+swift_test_auth_version = None
+swift_test_auth = os.environ.get('SWIFT_TEST_AUTH')
+swift_test_user = [os.environ.get('SWIFT_TEST_USER'), None, None, '', '']
+swift_test_key = [os.environ.get('SWIFT_TEST_KEY'), None, None, '', '']
+swift_test_tenant = ['', '', '', '', '']
+swift_test_perm = ['', '', '', '', '']
+swift_test_domain = ['', '', '', '', '']
+swift_test_user_id = ['', '', '', '', '']
+swift_test_tenant_id = ['', '', '', '', '']
+
+skip, skip2, skip3, skip_service_tokens = False, False, False, False
+
+orig_collate = ''
+insecure = False
+
+orig_hash_path_suff_pref = ('', '')
+orig_swift_conf_name = None
+
+in_process = False
+_testdir = _test_servers = _test_coros = None
+
+
+class FakeMemcacheMiddleware(MemcacheMiddleware):
+ """
+ Caching middleware that fakes out caching in swift if memcached
+ does not appear to be running.
+ """
+
+ def __init__(self, app, conf):
+ super(FakeMemcacheMiddleware, self).__init__(app, conf)
+ self.memcache = FakeMemcache()
+
+
+class InProcessException(BaseException):
+ pass
+
+
+def _info(msg):
+ print >> sys.stderr, msg
+
+
+def _debug(msg):
+ if DEBUG:
+ _info('DEBUG: ' + msg)
+
+
+def _in_process_setup_swift_conf(swift_conf_src, testdir):
+ # override swift.conf contents for in-process functional test runs
+ conf = ConfigParser()
+ conf.read(swift_conf_src)
+ try:
+ section = 'swift-hash'
+ conf.set(section, 'swift_hash_path_suffix', 'inprocfunctests')
+ conf.set(section, 'swift_hash_path_prefix', 'inprocfunctests')
+ section = 'swift-constraints'
+ max_file_size = (8 * 1024 * 1024) + 2 # 8 MB + 2
+ conf.set(section, 'max_file_size', max_file_size)
+ except NoSectionError:
+ msg = 'Conf file %s is missing section %s' % (swift_conf_src, section)
+ raise InProcessException(msg)
+
+ test_conf_file = os.path.join(testdir, 'swift.conf')
+ with open(test_conf_file, 'w') as fp:
+ conf.write(fp)
+
+ return test_conf_file
+
+
+def _in_process_find_conf_file(conf_src_dir, conf_file_name, use_sample=True):
+ """
+ Look for a file first in conf_src_dir, if it exists, otherwise optionally
+ look in the source tree sample 'etc' dir.
+
+ :param conf_src_dir: Directory in which to search first for conf file. May
+ be None
+ :param conf_file_name: Name of conf file
+ :param use_sample: If True and the conf_file_name is not found, then return
+ any sample conf file found in the source tree sample
+ 'etc' dir by appending '-sample' to conf_file_name
+ :returns: Path to conf file
+ :raises InProcessException: If no conf file is found
+ """
+ dflt_src_dir = os.path.normpath(os.path.join(os.path.abspath(__file__),
+ os.pardir, os.pardir, os.pardir,
+ 'etc'))
+ conf_src_dir = dflt_src_dir if conf_src_dir is None else conf_src_dir
+ conf_file_path = os.path.join(conf_src_dir, conf_file_name)
+ if os.path.exists(conf_file_path):
+ return conf_file_path
+
+ if use_sample:
+ # fall back to using the corresponding sample conf file
+ conf_file_name += '-sample'
+ conf_file_path = os.path.join(dflt_src_dir, conf_file_name)
+ if os.path.exists(conf_file_path):
+ return conf_file_path
+
+ msg = 'Failed to find config file %s' % conf_file_name
+ raise InProcessException(msg)
+
+
+def _in_process_setup_ring(swift_conf, conf_src_dir, testdir):
+ """
+ If SWIFT_TEST_POLICY is set:
+ - look in swift.conf file for specified policy
+ - move this to be policy-0 but preserving its options
+ - copy its ring file to test dir, changing its devices to suit
+ in process testing, and renaming it to suit policy-0
+ Otherwise, create a default ring file.
+ """
+ conf = ConfigParser()
+ conf.read(swift_conf)
+ sp_prefix = 'storage-policy:'
+
+ try:
+ # policy index 0 will be created if no policy exists in conf
+ policies = parse_storage_policies(conf)
+ except PolicyError as e:
+ raise InProcessException(e)
+
+ # clear all policies from test swift.conf before adding test policy back
+ for policy in policies:
+ conf.remove_section(sp_prefix + str(policy.idx))
+
+ policy_specified = os.environ.get('SWIFT_TEST_POLICY')
+ if policy_specified:
+ policy_to_test = policies.get_by_name(policy_specified)
+ if policy_to_test is None:
+ raise InProcessException('Failed to find policy name "%s"'
+ % policy_specified)
+ _info('Using specified policy %s' % policy_to_test.name)
+ else:
+ policy_to_test = policies.default
+ _info('Defaulting to policy %s' % policy_to_test.name)
+
+ # make policy_to_test be policy index 0 and default for the test config
+ sp_zero_section = sp_prefix + '0'
+ conf.add_section(sp_zero_section)
+ for (k, v) in policy_to_test.get_info(config=True).items():
+ conf.set(sp_zero_section, k, v)
+ conf.set(sp_zero_section, 'default', True)
+
+ with open(swift_conf, 'w') as fp:
+ conf.write(fp)
+
+ # look for a source ring file
+ ring_file_src = ring_file_test = 'object.ring.gz'
+ if policy_to_test.idx:
+ ring_file_src = 'object-%s.ring.gz' % policy_to_test.idx
+ try:
+ ring_file_src = _in_process_find_conf_file(conf_src_dir, ring_file_src,
+ use_sample=False)
+ except InProcessException as e:
+ if policy_specified:
+ raise InProcessException('Failed to find ring file %s'
+ % ring_file_src)
+ ring_file_src = None
+
+ ring_file_test = os.path.join(testdir, ring_file_test)
+ if ring_file_src:
+ # copy source ring file to a policy-0 test ring file, re-homing servers
+ _info('Using source ring file %s' % ring_file_src)
+ ring_data = ring.RingData.load(ring_file_src)
+ obj_sockets = []
+ for dev in ring_data.devs:
+ device = 'sd%c1' % chr(len(obj_sockets) + ord('a'))
+ utils.mkdirs(os.path.join(_testdir, 'sda1'))
+ utils.mkdirs(os.path.join(_testdir, 'sda1', 'tmp'))
+ obj_socket = eventlet.listen(('localhost', 0))
+ obj_sockets.append(obj_socket)
+ dev['port'] = obj_socket.getsockname()[1]
+ dev['ip'] = '127.0.0.1'
+ dev['device'] = device
+ dev['replication_port'] = dev['port']
+ dev['replication_ip'] = dev['ip']
+ ring_data.save(ring_file_test)
+ else:
+ # make default test ring, 2 replicas, 4 partitions, 2 devices
+ _info('No source object ring file, creating 2rep/4part/2dev ring')
+ obj_sockets = [eventlet.listen(('localhost', 0)) for _ in (0, 1)]
+ ring_data = ring.RingData(
+ [[0, 1, 0, 1], [1, 0, 1, 0]],
+ [{'id': 0, 'zone': 0, 'device': 'sda1', 'ip': '127.0.0.1',
+ 'port': obj_sockets[0].getsockname()[1]},
+ {'id': 1, 'zone': 1, 'device': 'sdb1', 'ip': '127.0.0.1',
+ 'port': obj_sockets[1].getsockname()[1]}],
+ 30)
+ with closing(GzipFile(ring_file_test, 'wb')) as f:
+ pickle.dump(ring_data, f)
+
+ for dev in ring_data.devs:
+ _debug('Ring file dev: %s' % dev)
+
+ return obj_sockets
+
+
+def in_process_setup(the_object_server=object_server):
+ _info('IN-PROCESS SERVERS IN USE FOR FUNCTIONAL TESTS')
+ _info('Using object_server class: %s' % the_object_server.__name__)
+ conf_src_dir = os.environ.get('SWIFT_TEST_IN_PROCESS_CONF_DIR')
+
+ if conf_src_dir is not None:
+ if not os.path.isdir(conf_src_dir):
+ msg = 'Config source %s is not a dir' % conf_src_dir
+ raise InProcessException(msg)
+ _info('Using config source dir: %s' % conf_src_dir)
+
+ # If SWIFT_TEST_IN_PROCESS_CONF specifies a config source dir then
+ # prefer config files from there, otherwise read config from source tree
+ # sample files. A mixture of files from the two sources is allowed.
+ proxy_conf = _in_process_find_conf_file(conf_src_dir, 'proxy-server.conf')
+ _info('Using proxy config from %s' % proxy_conf)
+ swift_conf_src = _in_process_find_conf_file(conf_src_dir, 'swift.conf')
+ _info('Using swift config from %s' % swift_conf_src)
+
+ monkey_patch_mimetools()
+
+ global _testdir
+ _testdir = os.path.join(mkdtemp(), 'tmp_functional')
+ utils.mkdirs(_testdir)
+ rmtree(_testdir)
+ utils.mkdirs(os.path.join(_testdir, 'sda1'))
+ utils.mkdirs(os.path.join(_testdir, 'sda1', 'tmp'))
+ utils.mkdirs(os.path.join(_testdir, 'sdb1'))
+ utils.mkdirs(os.path.join(_testdir, 'sdb1', 'tmp'))
+
+ swift_conf = _in_process_setup_swift_conf(swift_conf_src, _testdir)
+ obj_sockets = _in_process_setup_ring(swift_conf, conf_src_dir, _testdir)
+
+ global orig_swift_conf_name
+ orig_swift_conf_name = utils.SWIFT_CONF_FILE
+ utils.SWIFT_CONF_FILE = swift_conf
+ constraints.reload_constraints()
+ storage_policy.SWIFT_CONF_FILE = swift_conf
+ storage_policy.reload_storage_policies()
+ global config
+ if constraints.SWIFT_CONSTRAINTS_LOADED:
+ # Use the swift constraints that are loaded for the test framework
+ # configuration
+ _c = dict((k, str(v))
+ for k, v in constraints.EFFECTIVE_CONSTRAINTS.items())
+ config.update(_c)
+ else:
+ # In-process swift constraints were not loaded, somethings wrong
+ raise SkipTest
+ global orig_hash_path_suff_pref
+ orig_hash_path_suff_pref = utils.HASH_PATH_PREFIX, utils.HASH_PATH_SUFFIX
+ utils.validate_hash_conf()
+
+ # We create the proxy server listening socket to get its port number so
+ # that we can add it as the "auth_port" value for the functional test
+ # clients.
+ prolis = eventlet.listen(('localhost', 0))
+
+ # The following set of configuration values is used both for the
+ # functional test frame work and for the various proxy, account, container
+ # and object servers.
+ config.update({
+ # Values needed by the various in-process swift servers
+ 'devices': _testdir,
+ 'swift_dir': _testdir,
+ 'mount_check': 'false',
+ 'client_timeout': '4',
+ 'allow_account_management': 'true',
+ 'account_autocreate': 'true',
+ 'allow_versions': 'True',
+ # Below are values used by the functional test framework, as well as
+ # by the various in-process swift servers
+ 'auth_host': '127.0.0.1',
+ 'auth_port': str(prolis.getsockname()[1]),
+ 'auth_ssl': 'no',
+ 'auth_prefix': '/auth/',
+ # Primary functional test account (needs admin access to the
+ # account)
+ 'account': 'test',
+ 'username': 'tester',
+ 'password': 'testing',
+ # User on a second account (needs admin access to the account)
+ 'account2': 'test2',
+ 'username2': 'tester2',
+ 'password2': 'testing2',
+ # User on same account as first, but without admin access
+ 'username3': 'tester3',
+ 'password3': 'testing3',
+ # Service user and prefix (emulates glance, cinder, etc. user)
+ 'account5': 'test5',
+ 'username5': 'tester5',
+ 'password5': 'testing5',
+ 'service_prefix': 'SERVICE',
+ # For tempauth middleware. Update reseller_prefix
+ 'reseller_prefix': 'AUTH, SERVICE',
+ 'SERVICE_require_group': 'service'
+ })
+
+ acc1lis = eventlet.listen(('localhost', 0))
+ acc2lis = eventlet.listen(('localhost', 0))
+ con1lis = eventlet.listen(('localhost', 0))
+ con2lis = eventlet.listen(('localhost', 0))
+
+ account_ring_path = os.path.join(_testdir, 'account.ring.gz')
+ with closing(GzipFile(account_ring_path, 'wb')) as f:
+ pickle.dump(ring.RingData([[0, 1, 0, 1], [1, 0, 1, 0]],
+ [{'id': 0, 'zone': 0, 'device': 'sda1', 'ip': '127.0.0.1',
+ 'port': acc1lis.getsockname()[1]},
+ {'id': 1, 'zone': 1, 'device': 'sdb1', 'ip': '127.0.0.1',
+ 'port': acc2lis.getsockname()[1]}], 30),
+ f)
+ container_ring_path = os.path.join(_testdir, 'container.ring.gz')
+ with closing(GzipFile(container_ring_path, 'wb')) as f:
+ pickle.dump(ring.RingData([[0, 1, 0, 1], [1, 0, 1, 0]],
+ [{'id': 0, 'zone': 0, 'device': 'sda1', 'ip': '127.0.0.1',
+ 'port': con1lis.getsockname()[1]},
+ {'id': 1, 'zone': 1, 'device': 'sdb1', 'ip': '127.0.0.1',
+ 'port': con2lis.getsockname()[1]}], 30),
+ f)
+
+ eventlet.wsgi.HttpProtocol.default_request_version = "HTTP/1.0"
+ # Turn off logging requests by the underlying WSGI software.
+ eventlet.wsgi.HttpProtocol.log_request = lambda *a: None
+ logger = utils.get_logger(config, 'wsgi-server', log_route='wsgi')
+ # Redirect logging other messages by the underlying WSGI software.
+ eventlet.wsgi.HttpProtocol.log_message = \
+ lambda s, f, *a: logger.error('ERROR WSGI: ' + f % a)
+ # Default to only 4 seconds for in-process functional test runs
+ eventlet.wsgi.WRITE_TIMEOUT = 4
+
+ acc1srv = account_server.AccountController(
+ config, logger=debug_logger('acct1'))
+ acc2srv = account_server.AccountController(
+ config, logger=debug_logger('acct2'))
+ con1srv = container_server.ContainerController(
+ config, logger=debug_logger('cont1'))
+ con2srv = container_server.ContainerController(
+ config, logger=debug_logger('cont2'))
+
+ objsrvs = [
+ (obj_sockets[index],
+ the_object_server.ObjectController(
+ config, logger=debug_logger('obj%d' % (index + 1))))
+ for index in range(len(obj_sockets))
+ ]
+
+ logger = debug_logger('proxy')
+
+ def get_logger(name, *args, **kwargs):
+ return logger
+
+ with mock.patch('swift.common.utils.get_logger', get_logger):
+ with mock.patch('swift.common.middleware.memcache.MemcacheMiddleware',
+ FakeMemcacheMiddleware):
+ try:
+ app = loadapp(proxy_conf, global_conf=config)
+ except Exception as e:
+ raise InProcessException(e)
+
+ nl = utils.NullLogger()
+ prospa = eventlet.spawn(eventlet.wsgi.server, prolis, app, nl)
+ acc1spa = eventlet.spawn(eventlet.wsgi.server, acc1lis, acc1srv, nl)
+ acc2spa = eventlet.spawn(eventlet.wsgi.server, acc2lis, acc2srv, nl)
+ con1spa = eventlet.spawn(eventlet.wsgi.server, con1lis, con1srv, nl)
+ con2spa = eventlet.spawn(eventlet.wsgi.server, con2lis, con2srv, nl)
+
+ objspa = [eventlet.spawn(eventlet.wsgi.server, objsrv[0], objsrv[1], nl)
+ for objsrv in objsrvs]
+
+ global _test_coros
+ _test_coros = \
+ (prospa, acc1spa, acc2spa, con1spa, con2spa) + tuple(objspa)
+
+ # Create accounts "test" and "test2"
+ def create_account(act):
+ ts = utils.normalize_timestamp(time())
+ account_ring = Ring(_testdir, ring_name='account')
+ partition, nodes = account_ring.get_nodes(act)
+ for node in nodes:
+ # Note: we are just using the http_connect method in the object
+ # controller here to talk to the account server nodes.
+ conn = swift.proxy.controllers.obj.http_connect(
+ node['ip'], node['port'], node['device'], partition, 'PUT',
+ '/' + act, {'X-Timestamp': ts, 'x-trans-id': act})
+ resp = conn.getresponse()
+ assert(resp.status == 201)
+
+ create_account('AUTH_test')
+ create_account('AUTH_test2')
+
+cluster_info = {}
+
+
+def get_cluster_info():
+ # The fallback constraints used for testing will come from the current
+ # effective constraints.
+ eff_constraints = dict(constraints.EFFECTIVE_CONSTRAINTS)
+
+ # We'll update those constraints based on what the /info API provides, if
+ # anything.
+ global cluster_info
+ try:
+ conn = Connection(config)
+ conn.authenticate()
+ cluster_info.update(conn.cluster_info())
+ except (ResponseError, socket.error):
+ # Failed to get cluster_information via /info API, so fall back on
+ # test.conf data
+ pass
+ else:
+ try:
+ eff_constraints.update(cluster_info['swift'])
+ except KeyError:
+ # Most likely the swift cluster has "expose_info = false" set
+ # in its proxy-server.conf file, so we'll just do the best we
+ # can.
+ print >>sys.stderr, "** Swift Cluster not exposing /info **"
+
+ # Finally, we'll allow any constraint present in the swift-constraints
+ # section of test.conf to override everything. Note that only those
+ # constraints defined in the constraints module are converted to integers.
+ test_constraints = get_config('swift-constraints')
+ for k in constraints.DEFAULT_CONSTRAINTS:
+ try:
+ test_constraints[k] = int(test_constraints[k])
+ except KeyError:
+ pass
+ except ValueError:
+ print >>sys.stderr, "Invalid constraint value: %s = %s" % (
+ k, test_constraints[k])
+ eff_constraints.update(test_constraints)
+
+ # Just make it look like these constraints were loaded from a /info call,
+ # even if the /info call failed, or when they are overridden by values
+ # from the swift-constraints section of test.conf
+ cluster_info['swift'] = eff_constraints
+
+
+def setup_package():
+ in_process_env = os.environ.get('SWIFT_TEST_IN_PROCESS')
+ if in_process_env is not None:
+ use_in_process = utils.config_true_value(in_process_env)
+ else:
+ use_in_process = None
+
+ global in_process
+
+ if use_in_process:
+ # Explicitly set to True, so barrel on ahead with in-process
+ # functional test setup.
+ in_process = True
+ # NOTE: No attempt is made to a read local test.conf file.
+ else:
+ if use_in_process is None:
+ # Not explicitly set, default to using in-process functional tests
+ # if the test.conf file is not found, or does not provide a usable
+ # configuration.
+ config.update(get_config('func_test'))
+ if config:
+ in_process = False
+ else:
+ in_process = True
+ else:
+ # Explicitly set to False, do not attempt to use in-process
+ # functional tests, be sure we attempt to read from local
+ # test.conf file.
+ in_process = False
+ config.update(get_config('func_test'))
+
+ if in_process:
+ in_mem_obj_env = os.environ.get('SWIFT_TEST_IN_MEMORY_OBJ')
+ in_mem_obj = utils.config_true_value(in_mem_obj_env)
+ try:
+ in_process_setup(the_object_server=(
+ mem_object_server if in_mem_obj else object_server))
+ except InProcessException as exc:
+ print >> sys.stderr, ('Exception during in-process setup: %s'
+ % str(exc))
+ raise
+
+ global web_front_end
+ web_front_end = config.get('web_front_end', 'integral')
+ global normalized_urls
+ normalized_urls = config.get('normalized_urls', False)
+
+ global orig_collate
+ orig_collate = locale.setlocale(locale.LC_COLLATE)
+ locale.setlocale(locale.LC_COLLATE, config.get('collate', 'C'))
+
+ global insecure
+ insecure = config_true_value(config.get('insecure', False))
+
+ global swift_test_auth_version
+ global swift_test_auth
+ global swift_test_user
+ global swift_test_key
+ global swift_test_tenant
+ global swift_test_perm
+ global swift_test_domain
+ global swift_test_service_prefix
+
+ swift_test_service_prefix = None
+
+ if config:
+ swift_test_auth_version = str(config.get('auth_version', '1'))
+
+ swift_test_auth = 'http'
+ if config_true_value(config.get('auth_ssl', 'no')):
+ swift_test_auth = 'https'
+ if 'auth_prefix' not in config:
+ config['auth_prefix'] = '/'
+ try:
+ suffix = '://%(auth_host)s:%(auth_port)s%(auth_prefix)s' % config
+ swift_test_auth += suffix
+ except KeyError:
+ pass # skip
+
+ if 'service_prefix' in config:
+ swift_test_service_prefix = utils.append_underscore(
+ config['service_prefix'])
+
+ if swift_test_auth_version == "1":
+ swift_test_auth += 'v1.0'
+
+ try:
+ if 'account' in config:
+ swift_test_user[0] = '%(account)s:%(username)s' % config
+ else:
+ swift_test_user[0] = '%(username)s' % config
+ swift_test_key[0] = config['password']
+ except KeyError:
+ # bad config, no account/username configured, tests cannot be
+ # run
+ pass
+ try:
+ swift_test_user[1] = '%s%s' % (
+ '%s:' % config['account2'] if 'account2' in config else '',
+ config['username2'])
+ swift_test_key[1] = config['password2']
+ except KeyError:
+ pass # old config, no second account tests can be run
+ try:
+ swift_test_user[2] = '%s%s' % (
+ '%s:' % config['account'] if 'account'
+ in config else '', config['username3'])
+ swift_test_key[2] = config['password3']
+ except KeyError:
+ pass # old config, no third account tests can be run
+ try:
+ swift_test_user[4] = '%s%s' % (
+ '%s:' % config['account5'], config['username5'])
+ swift_test_key[4] = config['password5']
+ swift_test_tenant[4] = config['account5']
+ except KeyError:
+ pass # no service token tests can be run
+
+ for _ in range(3):
+ swift_test_perm[_] = swift_test_user[_]
+
+ else:
+ swift_test_user[0] = config['username']
+ swift_test_tenant[0] = config['account']
+ swift_test_key[0] = config['password']
+ swift_test_user[1] = config['username2']
+ swift_test_tenant[1] = config['account2']
+ swift_test_key[1] = config['password2']
+ swift_test_user[2] = config['username3']
+ swift_test_tenant[2] = config['account']
+ swift_test_key[2] = config['password3']
+ if 'username4' in config:
+ swift_test_user[3] = config['username4']
+ swift_test_tenant[3] = config['account4']
+ swift_test_key[3] = config['password4']
+ swift_test_domain[3] = config['domain4']
+ if 'username5' in config:
+ swift_test_user[4] = config['username5']
+ swift_test_tenant[4] = config['account5']
+ swift_test_key[4] = config['password5']
+
+ for _ in range(5):
+ swift_test_perm[_] = swift_test_tenant[_] + ':' \
+ + swift_test_user[_]
+
+ global skip
+ skip = not all([swift_test_auth, swift_test_user[0], swift_test_key[0]])
+ if skip:
+ print >>sys.stderr, 'SKIPPING FUNCTIONAL TESTS DUE TO NO CONFIG'
+
+ global skip2
+ skip2 = not all([not skip, swift_test_user[1], swift_test_key[1]])
+ if not skip and skip2:
+ print >>sys.stderr, \
+ 'SKIPPING SECOND ACCOUNT FUNCTIONAL TESTS' \
+ ' DUE TO NO CONFIG FOR THEM'
+
+ global skip3
+ skip3 = not all([not skip, swift_test_user[2], swift_test_key[2]])
+ if not skip and skip3:
+ print >>sys.stderr, \
+ 'SKIPPING THIRD ACCOUNT FUNCTIONAL TESTS DUE TO NO CONFIG FOR THEM'
+
+ global skip_if_not_v3
+ skip_if_not_v3 = (swift_test_auth_version != '3'
+ or not all([not skip,
+ swift_test_user[3],
+ swift_test_key[3]]))
+ if not skip and skip_if_not_v3:
+ print >>sys.stderr, \
+ 'SKIPPING FUNCTIONAL TESTS SPECIFIC TO AUTH VERSION 3'
+
+ global skip_service_tokens
+ skip_service_tokens = not all([not skip, swift_test_user[4],
+ swift_test_key[4], swift_test_tenant[4],
+ swift_test_service_prefix])
+ if not skip and skip_service_tokens:
+ print >>sys.stderr, \
+ 'SKIPPING FUNCTIONAL TESTS SPECIFIC TO SERVICE TOKENS'
+
+ get_cluster_info()
+
+
+def teardown_package():
+ global orig_collate
+ locale.setlocale(locale.LC_COLLATE, orig_collate)
+
+ # clean up containers and objects left behind after running tests
+ conn = Connection(config)
+ conn.authenticate()
+ account = Account(conn, config.get('account', config['username']))
+ account.delete_containers()
+
+ global in_process
+ if in_process:
+ try:
+ for server in _test_coros:
+ server.kill()
+ except Exception:
+ pass
+ try:
+ rmtree(os.path.dirname(_testdir))
+ except Exception:
+ pass
+ utils.HASH_PATH_PREFIX, utils.HASH_PATH_SUFFIX = \
+ orig_hash_path_suff_pref
+ utils.SWIFT_CONF_FILE = orig_swift_conf_name
+ constraints.reload_constraints()
+
+
+class AuthError(Exception):
+ pass
+
+
+class InternalServerError(Exception):
+ pass
+
+
+url = [None, None, None, None, None]
+token = [None, None, None, None, None]
+service_token = [None, None, None, None, None]
+parsed = [None, None, None, None, None]
+conn = [None, None, None, None, None]
+
+
+def connection(url):
+ if has_insecure:
+ return http_connection(url, insecure=insecure)
+ return http_connection(url)
+
+
+def get_url_token(user_index, os_options):
+ authargs = dict(snet=False,
+ tenant_name=swift_test_tenant[user_index],
+ auth_version=swift_test_auth_version,
+ os_options=os_options,
+ insecure=insecure)
+ return get_auth(swift_test_auth,
+ swift_test_user[user_index],
+ swift_test_key[user_index],
+ **authargs)
+
+
+def retry(func, *args, **kwargs):
+ """
+ You can use the kwargs to override:
+ 'retries' (default: 5)
+ 'use_account' (default: 1) - which user's token to pass
+ 'url_account' (default: matches 'use_account') - which user's storage URL
+ 'resource' (default: url[url_account] - URL to connect to; retry()
+ will interpolate the variable :storage_url: if present
+ 'service_user' - add a service token from this user (1 indexed)
+ """
+ global url, token, service_token, parsed, conn
+ retries = kwargs.get('retries', 5)
+ attempts, backoff = 0, 1
+
+ # use account #1 by default; turn user's 1-indexed account into 0-indexed
+ use_account = kwargs.pop('use_account', 1) - 1
+ service_user = kwargs.pop('service_user', None)
+ if service_user:
+ service_user -= 1 # 0-index
+
+ # access our own account by default
+ url_account = kwargs.pop('url_account', use_account + 1) - 1
+ os_options = {'user_domain_name': swift_test_domain[use_account],
+ 'project_domain_name': swift_test_domain[use_account]}
+ while attempts <= retries:
+ auth_failure = False
+ attempts += 1
+ try:
+ if not url[use_account] or not token[use_account]:
+ url[use_account], token[use_account] = get_url_token(
+ use_account, os_options)
+ parsed[use_account] = conn[use_account] = None
+ if not parsed[use_account] or not conn[use_account]:
+ parsed[use_account], conn[use_account] = \
+ connection(url[use_account])
+
+ # default resource is the account url[url_account]
+ resource = kwargs.pop('resource', '%(storage_url)s')
+ template_vars = {'storage_url': url[url_account]}
+ parsed_result = urlparse(resource % template_vars)
+ if isinstance(service_user, int):
+ if not service_token[service_user]:
+ dummy, service_token[service_user] = get_url_token(
+ service_user, os_options)
+ kwargs['service_token'] = service_token[service_user]
+ return func(url[url_account], token[use_account],
+ parsed_result, conn[url_account],
+ *args, **kwargs)
+ except (socket.error, HTTPException):
+ if attempts > retries:
+ raise
+ parsed[use_account] = conn[use_account] = None
+ if service_user:
+ service_token[service_user] = None
+ except AuthError:
+ auth_failure = True
+ url[use_account] = token[use_account] = None
+ if service_user:
+ service_token[service_user] = None
+ except InternalServerError:
+ pass
+ if attempts <= retries:
+ if not auth_failure:
+ sleep(backoff)
+ backoff *= 2
+ raise Exception('No result after %s retries.' % retries)
+
+
+def check_response(conn):
+ resp = conn.getresponse()
+ if resp.status == 401:
+ resp.read()
+ raise AuthError()
+ elif resp.status // 100 == 5:
+ resp.read()
+ raise InternalServerError()
+ return resp
+
+
+def load_constraint(name):
+ global cluster_info
+ try:
+ c = cluster_info['swift'][name]
+ except KeyError:
+ raise SkipTest("Missing constraint: %s" % name)
+ if not isinstance(c, int):
+ raise SkipTest("Bad value, %r, for constraint: %s" % (c, name))
+ return c
+
+
+def get_storage_policy_from_cluster_info(info):
+ policies = info['swift'].get('policies', {})
+ default_policy = []
+ non_default_policies = []
+ for p in policies:
+ if p.get('default', {}):
+ default_policy.append(p)
+ else:
+ non_default_policies.append(p)
+ return default_policy, non_default_policies
+
+
+def reset_acl():
+ def post(url, token, parsed, conn):
+ conn.request('POST', parsed.path, '', {
+ 'X-Auth-Token': token,
+ 'X-Account-Access-Control': '{}'
+ })
+ return check_response(conn)
+ resp = retry(post, use_account=1)
+ resp.read()
+
+
+def requires_acls(f):
+ @functools.wraps(f)
+ def wrapper(*args, **kwargs):
+ global skip, cluster_info
+ if skip or not cluster_info:
+ raise SkipTest('Requires account ACLs')
+ # Determine whether this cluster has account ACLs; if not, skip test
+ if not cluster_info.get('tempauth', {}).get('account_acls'):
+ raise SkipTest('Requires account ACLs')
+ if swift_test_auth_version != '1':
+ # remove when keystoneauth supports account acls
+ raise SkipTest('Requires account ACLs')
+ reset_acl()
+ try:
+ rv = f(*args, **kwargs)
+ finally:
+ reset_acl()
+ return rv
+ return wrapper
+
+
+class FunctionalStoragePolicyCollection(object):
+
+ def __init__(self, policies):
+ self._all = policies
+ self.default = None
+ for p in self:
+ if p.get('default', False):
+ assert self.default is None, 'Found multiple default ' \
+ 'policies %r and %r' % (self.default, p)
+ self.default = p
+
+ @classmethod
+ def from_info(cls, info=None):
+ if not (info or cluster_info):
+ get_cluster_info()
+ info = info or cluster_info
+ try:
+ policy_info = info['swift']['policies']
+ except KeyError:
+ raise AssertionError('Did not find any policy info in %r' % info)
+ policies = cls(policy_info)
+ assert policies.default, \
+ 'Did not find default policy in %r' % policy_info
+ return policies
+
+ def __len__(self):
+ return len(self._all)
+
+ def __iter__(self):
+ return iter(self._all)
+
+ def __getitem__(self, index):
+ return self._all[index]
+
+ def filter(self, **kwargs):
+ return self.__class__([p for p in self if all(
+ p.get(k) == v for k, v in kwargs.items())])
+
+ def exclude(self, **kwargs):
+ return self.__class__([p for p in self if all(
+ p.get(k) != v for k, v in kwargs.items())])
+
+ def select(self):
+ return random.choice(self)
+
+
+def requires_policies(f):
+ @functools.wraps(f)
+ def wrapper(self, *args, **kwargs):
+ if skip:
+ raise SkipTest
+ try:
+ self.policies = FunctionalStoragePolicyCollection.from_info()
+ except AssertionError:
+ raise SkipTest("Unable to determine available policies")
+ if len(self.policies) < 2:
+ raise SkipTest("Multiple policies not enabled")
+ return f(self, *args, **kwargs)
+
+ return wrapper
diff --git a/test/functional/gluster_swift_tests.py b/test/functional/gluster_swift_tests.py
index b4514c9..2ffb841 100644
--- a/test/functional/gluster_swift_tests.py
+++ b/test/functional/gluster_swift_tests.py
@@ -19,8 +19,9 @@ import random
import os,sys,re,hashlib
from nose import SkipTest
-from test.functional.tests import config, locale, Base, Base2, Utils, \
+from test.functional.tests import Base, Base2, Utils, \
TestFileEnv
+from test.functional import config, locale
from test.functional.swift_test_client import Account, Connection, File, \
ResponseError
diff --git a/test/functional/swift_test_client.py b/test/functional/swift_test_client.py
index 27e025b..5c0ab87 100644
--- a/test/functional/swift_test_client.py
+++ b/test/functional/swift_test_client.py
@@ -26,10 +26,16 @@ import simplejson as json
from nose import SkipTest
from xml.dom import minidom
+
from swiftclient import get_auth
+from swift.common import constraints
+from swift.common.utils import config_true_value
+
from test import safe_repr
+httplib._MAXHEADERS = constraints.MAX_HEADER_COUNT
+
class AuthenticationFailed(Exception):
pass
@@ -103,11 +109,13 @@ class Connection(object):
def __init__(self, config):
for key in 'auth_host auth_port auth_ssl username password'.split():
if key not in config:
- raise SkipTest
+ raise SkipTest(
+ "Missing required configuration parameter: %s" % key)
self.auth_host = config['auth_host']
self.auth_port = int(config['auth_port'])
self.auth_ssl = config['auth_ssl'] in ('on', 'true', 'yes', '1')
+ self.insecure = config_true_value(config.get('insecure', 'false'))
self.auth_prefix = config.get('auth_prefix', '/')
self.auth_version = str(config.get('auth_version', '1'))
@@ -117,6 +125,7 @@ class Connection(object):
self.storage_host = None
self.storage_port = None
+ self.storage_url = None
self.conn_class = None
@@ -145,10 +154,11 @@ class Connection(object):
auth_netloc = "%s:%d" % (self.auth_host, self.auth_port)
auth_url = auth_scheme + auth_netloc + auth_path
+ authargs = dict(snet=False, tenant_name=self.account,
+ auth_version=self.auth_version, os_options={},
+ insecure=self.insecure)
(storage_url, storage_token) = get_auth(
- auth_url, auth_user, self.password, snet=False,
- tenant_name=self.account, auth_version=self.auth_version,
- os_options={})
+ auth_url, auth_user, self.password, **authargs)
if not (storage_url and storage_token):
raise AuthenticationFailed()
@@ -172,8 +182,14 @@ class Connection(object):
# unicode and this would cause troubles when doing
# no_safe_quote query.
self.storage_url = str('/%s/%s' % (x[3], x[4]))
-
- self.storage_token = storage_token
+ self.account_name = str(x[4])
+ self.auth_user = auth_user
+ # With v2 keystone, storage_token is unicode.
+ # We want it to be string otherwise this would cause
+ # troubles when doing query with already encoded
+ # non ascii characters in its headers.
+ self.storage_token = str(storage_token)
+ self.user_acl = '%s:%s' % (self.account, self.username)
self.http_connect()
return self.storage_url, self.storage_token
@@ -184,7 +200,7 @@ class Connection(object):
"""
status = self.make_request('GET', '/info',
cfg={'absolute_path': True})
- if status == 404:
+ if status // 100 == 4:
return {}
if not 200 <= status <= 299:
raise ResponseError(self.response, 'GET', '/info')
@@ -195,7 +211,12 @@ class Connection(object):
port=self.storage_port)
#self.connection.set_debuglevel(3)
- def make_path(self, path=[], cfg={}):
+ def make_path(self, path=None, cfg=None):
+ if path is None:
+ path = []
+ if cfg is None:
+ cfg = {}
+
if cfg.get('version_only_path'):
return '/' + self.storage_url.split('/')[1]
@@ -208,7 +229,9 @@ class Connection(object):
else:
return self.storage_url
- def make_headers(self, hdrs, cfg={}):
+ def make_headers(self, hdrs, cfg=None):
+ if cfg is None:
+ cfg = {}
headers = {}
if not cfg.get('no_auth_token'):
@@ -218,8 +241,16 @@ class Connection(object):
headers.update(hdrs)
return headers
- def make_request(self, method, path=[], data='', hdrs={}, parms={},
- cfg={}):
+ def make_request(self, method, path=None, data='', hdrs=None, parms=None,
+ cfg=None):
+ if path is None:
+ path = []
+ if hdrs is None:
+ hdrs = {}
+ if parms is None:
+ parms = {}
+ if cfg is None:
+ cfg = {}
if not cfg.get('absolute_path'):
# Set absolute_path=True to make a request to exactly the given
# path, not storage path + given path. Useful for
@@ -277,7 +308,14 @@ class Connection(object):
'Attempts: %s, Failures: %s' %
(request, len(fail_messages), fail_messages))
- def put_start(self, path, hdrs={}, parms={}, cfg={}, chunked=False):
+ def put_start(self, path, hdrs=None, parms=None, cfg=None, chunked=False):
+ if hdrs is None:
+ hdrs = {}
+ if parms is None:
+ parms = {}
+ if cfg is None:
+ cfg = {}
+
self.http_connect()
path = self.make_path(path, cfg)
@@ -322,7 +360,10 @@ class Base(object):
def __str__(self):
return self.name
- def header_fields(self, required_fields, optional_fields=()):
+ def header_fields(self, required_fields, optional_fields=None):
+ if optional_fields is None:
+ optional_fields = ()
+
headers = dict(self.conn.response.getheaders())
ret = {}
@@ -352,7 +393,11 @@ class Account(Base):
self.conn = conn
self.name = str(name)
- def update_metadata(self, metadata={}, cfg={}):
+ def update_metadata(self, metadata=None, cfg=None):
+ if metadata is None:
+ metadata = {}
+ if cfg is None:
+ cfg = {}
headers = dict(("X-Account-Meta-%s" % k, v)
for k, v in metadata.items())
@@ -365,7 +410,14 @@ class Account(Base):
def container(self, container_name):
return Container(self.conn, self.name, container_name)
- def containers(self, hdrs={}, parms={}, cfg={}):
+ def containers(self, hdrs=None, parms=None, cfg=None):
+ if hdrs is None:
+ hdrs = {}
+ if parms is None:
+ parms = {}
+ if cfg is None:
+ cfg = {}
+
format_type = parms.get('format', None)
if format_type not in [None, 'json', 'xml']:
raise RequestError('Invalid format: %s' % format_type)
@@ -411,7 +463,13 @@ class Account(Base):
return listing_empty(self.containers)
- def info(self, hdrs={}, parms={}, cfg={}):
+ def info(self, hdrs=None, parms=None, cfg=None):
+ if hdrs is None:
+ hdrs = {}
+ if parms is None:
+ parms = {}
+ if cfg is None:
+ cfg = {}
if self.conn.make_request('HEAD', self.path, hdrs=hdrs,
parms=parms, cfg=cfg) != 204:
@@ -435,11 +493,21 @@ class Container(Base):
self.account = str(account)
self.name = str(name)
- def create(self, hdrs={}, parms={}, cfg={}):
+ def create(self, hdrs=None, parms=None, cfg=None):
+ if hdrs is None:
+ hdrs = {}
+ if parms is None:
+ parms = {}
+ if cfg is None:
+ cfg = {}
return self.conn.make_request('PUT', self.path, hdrs=hdrs,
parms=parms, cfg=cfg) in (201, 202)
- def delete(self, hdrs={}, parms={}):
+ def delete(self, hdrs=None, parms=None):
+ if hdrs is None:
+ hdrs = {}
+ if parms is None:
+ parms = {}
return self.conn.make_request('DELETE', self.path, hdrs=hdrs,
parms=parms) == 204
@@ -457,7 +525,13 @@ class Container(Base):
def file(self, file_name):
return File(self.conn, self.account, self.name, file_name)
- def files(self, hdrs={}, parms={}, cfg={}):
+ def files(self, hdrs=None, parms=None, cfg=None):
+ if hdrs is None:
+ hdrs = {}
+ if parms is None:
+ parms = {}
+ if cfg is None:
+ cfg = {}
format_type = parms.get('format', None)
if format_type not in [None, 'json', 'xml']:
raise RequestError('Invalid format: %s' % format_type)
@@ -507,14 +581,23 @@ class Container(Base):
raise ResponseError(self.conn.response, 'GET',
self.conn.make_path(self.path))
- def info(self, hdrs={}, parms={}, cfg={}):
+ def info(self, hdrs=None, parms=None, cfg=None):
+ if hdrs is None:
+ hdrs = {}
+ if parms is None:
+ parms = {}
+ if cfg is None:
+ cfg = {}
self.conn.make_request('HEAD', self.path, hdrs=hdrs,
parms=parms, cfg=cfg)
if self.conn.response.status == 204:
required_fields = [['bytes_used', 'x-container-bytes-used'],
['object_count', 'x-container-object-count']]
- optional_fields = [['versions', 'x-versions-location']]
+ optional_fields = [
+ ['versions', 'x-versions-location'],
+ ['tempurl_key', 'x-container-meta-temp-url-key'],
+ ['tempurl_key2', 'x-container-meta-temp-url-key-2']]
return self.header_fields(required_fields, optional_fields)
@@ -538,7 +621,9 @@ class File(Base):
self.size = None
self.metadata = {}
- def make_headers(self, cfg={}):
+ def make_headers(self, cfg=None):
+ if cfg is None:
+ cfg = {}
headers = {}
if not cfg.get('no_content_length'):
if cfg.get('set_content_length'):
@@ -580,7 +665,13 @@ class File(Base):
data.seek(0)
return checksum.hexdigest()
- def copy(self, dest_cont, dest_file, hdrs={}, parms={}, cfg={}):
+ def copy(self, dest_cont, dest_file, hdrs=None, parms=None, cfg=None):
+ if hdrs is None:
+ hdrs = {}
+ if parms is None:
+ parms = {}
+ if cfg is None:
+ cfg = {}
if 'destination' in cfg:
headers = {'Destination': cfg['destination']}
elif cfg.get('no_destination'):
@@ -595,7 +686,37 @@ class File(Base):
return self.conn.make_request('COPY', self.path, hdrs=headers,
parms=parms) == 201
- def delete(self, hdrs={}, parms={}):
+ def copy_account(self, dest_account, dest_cont, dest_file,
+ hdrs=None, parms=None, cfg=None):
+ if hdrs is None:
+ hdrs = {}
+ if parms is None:
+ parms = {}
+ if cfg is None:
+ cfg = {}
+ if 'destination' in cfg:
+ headers = {'Destination': cfg['destination']}
+ elif cfg.get('no_destination'):
+ headers = {}
+ else:
+ headers = {'Destination-Account': dest_account,
+ 'Destination': '%s/%s' % (dest_cont, dest_file)}
+ headers.update(hdrs)
+
+ if 'Destination-Account' in headers:
+ headers['Destination-Account'] = \
+ urllib.quote(headers['Destination-Account'])
+ if 'Destination' in headers:
+ headers['Destination'] = urllib.quote(headers['Destination'])
+
+ return self.conn.make_request('COPY', self.path, hdrs=headers,
+ parms=parms) == 201
+
+ def delete(self, hdrs=None, parms=None):
+ if hdrs is None:
+ hdrs = {}
+ if parms is None:
+ parms = {}
if self.conn.make_request('DELETE', self.path, hdrs=hdrs,
parms=parms) != 204:
@@ -604,7 +725,13 @@ class File(Base):
return True
- def info(self, hdrs={}, parms={}, cfg={}):
+ def info(self, hdrs=None, parms=None, cfg=None):
+ if hdrs is None:
+ hdrs = {}
+ if parms is None:
+ parms = {}
+ if cfg is None:
+ cfg = {}
if self.conn.make_request('HEAD', self.path, hdrs=hdrs,
parms=parms, cfg=cfg) != 200:
@@ -615,8 +742,8 @@ class File(Base):
['content_type', 'content-type'],
['last_modified', 'last-modified'],
['etag', 'etag']]
-
- optional_fields = [['x_delete_at', 'x-delete-at'],
+ optional_fields = [['x_object_manifest', 'x-object-manifest'],
+ ['x_delete_at', 'x-delete-at'],
['x_delete_after', 'x-delete-after']]
header_fields = self.header_fields(fields,
@@ -624,7 +751,11 @@ class File(Base):
header_fields['etag'] = header_fields['etag'].strip('"')
return header_fields
- def initialize(self, hdrs={}, parms={}):
+ def initialize(self, hdrs=None, parms=None):
+ if hdrs is None:
+ hdrs = {}
+ if parms is None:
+ parms = {}
if not self.name:
return False
@@ -669,7 +800,11 @@ class File(Base):
return data
def read(self, size=-1, offset=0, hdrs=None, buffer=None,
- callback=None, cfg={}, parms={}):
+ callback=None, cfg=None, parms=None):
+ if cfg is None:
+ cfg = {}
+ if parms is None:
+ parms = {}
if size > 0:
range_string = 'bytes=%d-%d' % (offset, (offset + size) - 1)
@@ -726,7 +861,12 @@ class File(Base):
finally:
fobj.close()
- def sync_metadata(self, metadata={}, cfg={}):
+ def sync_metadata(self, metadata=None, cfg=None):
+ if metadata is None:
+ metadata = {}
+ if cfg is None:
+ cfg = {}
+
self.metadata.update(metadata)
if self.metadata:
@@ -737,6 +877,7 @@ class File(Base):
cfg.get('set_content_length')
else:
headers['Content-Length'] = 0
+
self.conn.make_request('POST', self.path, hdrs=headers, cfg=cfg)
if self.conn.response.status not in (201, 202):
@@ -745,7 +886,14 @@ class File(Base):
return True
- def chunked_write(self, data=None, hdrs={}, parms={}, cfg={}):
+ def chunked_write(self, data=None, hdrs=None, parms=None, cfg=None):
+ if hdrs is None:
+ hdrs = {}
+ if parms is None:
+ parms = {}
+ if cfg is None:
+ cfg = {}
+
if data is not None and self.chunked_write_in_progress:
self.conn.put_data(data, True)
elif data is not None:
@@ -764,8 +912,15 @@ class File(Base):
else:
raise RuntimeError
- def write(self, data='', hdrs={}, parms={}, callback=None, cfg={},
+ def write(self, data='', hdrs=None, parms=None, callback=None, cfg=None,
return_resp=False):
+ if hdrs is None:
+ hdrs = {}
+ if parms is None:
+ parms = {}
+ if cfg is None:
+ cfg = {}
+
block_size = 2 ** 20
if isinstance(data, file):
@@ -786,13 +941,15 @@ class File(Base):
transferred = 0
buff = data.read(block_size)
+ buff_len = len(buff)
try:
- while len(buff) > 0:
+ while buff_len > 0:
self.conn.put_data(buff)
- buff = data.read(block_size)
- transferred += len(buff)
+ transferred += buff_len
if callable(callback):
callback(transferred, self.size)
+ buff = data.read(block_size)
+ buff_len = len(buff)
self.conn.put_end()
except socket.timeout as err:
@@ -814,7 +971,14 @@ class File(Base):
return True
- def write_random(self, size=None, hdrs={}, parms={}, cfg={}):
+ def write_random(self, size=None, hdrs=None, parms=None, cfg=None):
+ if hdrs is None:
+ hdrs = {}
+ if parms is None:
+ parms = {}
+ if cfg is None:
+ cfg = {}
+
data = self.random_data(size)
if not self.write(data, hdrs=hdrs, parms=parms, cfg=cfg):
raise ResponseError(self.conn.response, 'PUT',
@@ -822,7 +986,15 @@ class File(Base):
self.md5 = self.compute_md5sum(StringIO.StringIO(data))
return data
- def write_random_return_resp(self, size=None, hdrs={}, parms={}, cfg={}):
+ def write_random_return_resp(self, size=None, hdrs=None, parms=None,
+ cfg=None):
+ if hdrs is None:
+ hdrs = {}
+ if parms is None:
+ parms = {}
+ if cfg is None:
+ cfg = {}
+
data = self.random_data(size)
resp = self.write(data, hdrs=hdrs, parms=parms, cfg=cfg,
return_resp=True)
diff --git a/test/functional/test_account.py b/test/functional/test_account.py
index 1cc61bc..30a8e74 100755
--- a/test/functional/test_account.py
+++ b/test/functional/test_account.py
@@ -21,13 +21,11 @@ from uuid import uuid4
from nose import SkipTest
from string import letters
-from swift.common.constraints import MAX_META_COUNT, MAX_META_NAME_LENGTH, \
- MAX_META_OVERALL_SIZE, MAX_META_VALUE_LENGTH
from swift.common.middleware.acl import format_acl
-from swift_testing import (check_response, retry, skip, skip2, skip3,
- web_front_end, requires_acls)
-import swift_testing
-from test.functional.tests import load_constraint
+
+from test.functional import check_response, retry, requires_acls, \
+ load_constraint
+import test.functional as tf
class TestAccount(unittest.TestCase):
@@ -69,7 +67,7 @@ class TestAccount(unittest.TestCase):
self.assertEqual(resp.status // 100, 2)
def test_metadata(self):
- if skip:
+ if tf.skip:
raise SkipTest
def post(url, token, parsed, conn, value):
@@ -109,6 +107,9 @@ class TestAccount(unittest.TestCase):
self.assertEqual(resp.getheader('x-account-meta-test'), 'Value')
def test_invalid_acls(self):
+ if tf.skip:
+ raise SkipTest
+
def post(url, token, parsed, conn, headers):
new_headers = dict({'X-Auth-Token': token}, **headers)
conn.request('POST', parsed.path, '', new_headers)
@@ -145,7 +146,7 @@ class TestAccount(unittest.TestCase):
resp.read()
self.assertEqual(resp.status, 400)
- acl_user = swift_testing.swift_test_user[1]
+ acl_user = tf.swift_test_user[1]
acl = {'admin': [acl_user], 'invalid_key': 'invalid_value'}
headers = {'x-account-access-control': format_acl(
version=2, acl_dict=acl)}
@@ -173,7 +174,7 @@ class TestAccount(unittest.TestCase):
@requires_acls
def test_read_only_acl(self):
- if skip3:
+ if tf.skip3:
raise SkipTest
def get(url, token, parsed, conn):
@@ -191,7 +192,7 @@ class TestAccount(unittest.TestCase):
self.assertEquals(resp.status, 403)
# grant read access
- acl_user = swift_testing.swift_test_user[2]
+ acl_user = tf.swift_test_user[2]
acl = {'read-only': [acl_user]}
headers = {'x-account-access-control': format_acl(
version=2, acl_dict=acl)}
@@ -224,7 +225,7 @@ class TestAccount(unittest.TestCase):
@requires_acls
def test_read_write_acl(self):
- if skip3:
+ if tf.skip3:
raise SkipTest
def get(url, token, parsed, conn):
@@ -242,7 +243,7 @@ class TestAccount(unittest.TestCase):
self.assertEquals(resp.status, 403)
# grant read-write access
- acl_user = swift_testing.swift_test_user[2]
+ acl_user = tf.swift_test_user[2]
acl = {'read-write': [acl_user]}
headers = {'x-account-access-control': format_acl(
version=2, acl_dict=acl)}
@@ -265,7 +266,7 @@ class TestAccount(unittest.TestCase):
@requires_acls
def test_admin_acl(self):
- if skip3:
+ if tf.skip3:
raise SkipTest
def get(url, token, parsed, conn):
@@ -283,7 +284,7 @@ class TestAccount(unittest.TestCase):
self.assertEquals(resp.status, 403)
# grant admin access
- acl_user = swift_testing.swift_test_user[2]
+ acl_user = tf.swift_test_user[2]
acl = {'admin': [acl_user]}
acl_json_str = format_acl(version=2, acl_dict=acl)
headers = {'x-account-access-control': acl_json_str}
@@ -323,7 +324,7 @@ class TestAccount(unittest.TestCase):
@requires_acls
def test_protected_tempurl(self):
- if skip3:
+ if tf.skip3:
raise SkipTest
def get(url, token, parsed, conn):
@@ -335,7 +336,7 @@ class TestAccount(unittest.TestCase):
conn.request('POST', parsed.path, '', new_headers)
return check_response(conn)
- # add a account metadata, and temp-url-key to account
+ # add an account metadata, and temp-url-key to account
value = str(uuid4())
headers = {
'x-account-meta-temp-url-key': 'secret',
@@ -346,7 +347,7 @@ class TestAccount(unittest.TestCase):
self.assertEqual(resp.status, 204)
# grant read-only access to tester3
- acl_user = swift_testing.swift_test_user[2]
+ acl_user = tf.swift_test_user[2]
acl = {'read-only': [acl_user]}
acl_json_str = format_acl(version=2, acl_dict=acl)
headers = {'x-account-access-control': acl_json_str}
@@ -364,7 +365,7 @@ class TestAccount(unittest.TestCase):
self.assertEqual(resp.getheader('X-Account-Meta-Temp-Url-Key'), None)
# grant read-write access to tester3
- acl_user = swift_testing.swift_test_user[2]
+ acl_user = tf.swift_test_user[2]
acl = {'read-write': [acl_user]}
acl_json_str = format_acl(version=2, acl_dict=acl)
headers = {'x-account-access-control': acl_json_str}
@@ -382,7 +383,7 @@ class TestAccount(unittest.TestCase):
self.assertEqual(resp.getheader('X-Account-Meta-Temp-Url-Key'), None)
# grant admin access to tester3
- acl_user = swift_testing.swift_test_user[2]
+ acl_user = tf.swift_test_user[2]
acl = {'admin': [acl_user]}
acl_json_str = format_acl(version=2, acl_dict=acl)
headers = {'x-account-access-control': acl_json_str}
@@ -417,7 +418,7 @@ class TestAccount(unittest.TestCase):
@requires_acls
def test_account_acls(self):
- if skip2:
+ if tf.skip2:
raise SkipTest
def post(url, token, parsed, conn, headers):
@@ -464,7 +465,7 @@ class TestAccount(unittest.TestCase):
# User1 is swift_owner of their own account, so they can POST an
# ACL -- let's do this and make User2 (test_user[1]) an admin
- acl_user = swift_testing.swift_test_user[1]
+ acl_user = tf.swift_test_user[1]
acl = {'admin': [acl_user]}
headers = {'x-account-access-control': format_acl(
version=2, acl_dict=acl)}
@@ -541,7 +542,7 @@ class TestAccount(unittest.TestCase):
@requires_acls
def test_swift_account_acls(self):
- if skip:
+ if tf.skip:
raise SkipTest
def post(url, token, parsed, conn, headers):
@@ -604,7 +605,7 @@ class TestAccount(unittest.TestCase):
resp.read()
def test_swift_prohibits_garbage_account_acls(self):
- if skip:
+ if tf.skip:
raise SkipTest
def post(url, token, parsed, conn, headers):
@@ -671,7 +672,7 @@ class TestAccount(unittest.TestCase):
resp.read()
def test_unicode_metadata(self):
- if skip:
+ if tf.skip:
raise SkipTest
def post(url, token, parsed, conn, name, value):
@@ -684,7 +685,7 @@ class TestAccount(unittest.TestCase):
return check_response(conn)
uni_key = u'X-Account-Meta-uni\u0E12'
uni_value = u'uni\u0E12'
- if (web_front_end == 'integral'):
+ if (tf.web_front_end == 'integral'):
resp = retry(post, uni_key, '1')
resp.read()
self.assertTrue(resp.status in (201, 204))
@@ -700,7 +701,7 @@ class TestAccount(unittest.TestCase):
self.assert_(resp.status in (200, 204), resp.status)
self.assertEqual(resp.getheader('X-Account-Meta-uni'),
uni_value.encode('utf-8'))
- if (web_front_end == 'integral'):
+ if (tf.web_front_end == 'integral'):
resp = retry(post, uni_key, uni_value)
resp.read()
self.assertEqual(resp.status, 204)
@@ -711,7 +712,7 @@ class TestAccount(unittest.TestCase):
uni_value.encode('utf-8'))
def test_multi_metadata(self):
- if skip:
+ if tf.skip:
raise SkipTest
def post(url, token, parsed, conn, name, value):
@@ -740,7 +741,7 @@ class TestAccount(unittest.TestCase):
self.assertEqual(resp.getheader('x-account-meta-two'), '2')
def test_bad_metadata(self):
- if skip:
+ if tf.skip:
raise SkipTest
def post(url, token, parsed, conn, extra_headers):
@@ -750,27 +751,31 @@ class TestAccount(unittest.TestCase):
return check_response(conn)
resp = retry(post,
- {'X-Account-Meta-' + ('k' * MAX_META_NAME_LENGTH): 'v'})
+ {'X-Account-Meta-' + (
+ 'k' * self.max_meta_name_length): 'v'})
resp.read()
self.assertEqual(resp.status, 204)
resp = retry(
post,
- {'X-Account-Meta-' + ('k' * (MAX_META_NAME_LENGTH + 1)): 'v'})
+ {'X-Account-Meta-' + ('k' * (
+ self.max_meta_name_length + 1)): 'v'})
resp.read()
self.assertEqual(resp.status, 400)
resp = retry(post,
- {'X-Account-Meta-Too-Long': 'k' * MAX_META_VALUE_LENGTH})
+ {'X-Account-Meta-Too-Long': (
+ 'k' * self.max_meta_value_length)})
resp.read()
self.assertEqual(resp.status, 204)
resp = retry(
post,
- {'X-Account-Meta-Too-Long': 'k' * (MAX_META_VALUE_LENGTH + 1)})
+ {'X-Account-Meta-Too-Long': 'k' * (
+ self.max_meta_value_length + 1)})
resp.read()
self.assertEqual(resp.status, 400)
def test_bad_metadata2(self):
- if skip:
+ if tf.skip:
raise SkipTest
def post(url, token, parsed, conn, extra_headers):
@@ -785,20 +790,20 @@ class TestAccount(unittest.TestCase):
resp = retry(post, headers)
headers = {}
- for x in xrange(MAX_META_COUNT):
+ for x in xrange(self.max_meta_count):
headers['X-Account-Meta-%d' % x] = 'v'
resp = retry(post, headers)
resp.read()
self.assertEqual(resp.status, 204)
headers = {}
- for x in xrange(MAX_META_COUNT + 1):
+ for x in xrange(self.max_meta_count + 1):
headers['X-Account-Meta-%d' % x] = 'v'
resp = retry(post, headers)
resp.read()
self.assertEqual(resp.status, 400)
def test_bad_metadata3(self):
- if skip:
+ if tf.skip:
raise SkipTest
def post(url, token, parsed, conn, extra_headers):
@@ -807,31 +812,55 @@ class TestAccount(unittest.TestCase):
conn.request('POST', parsed.path, '', headers)
return check_response(conn)
- # TODO: Find the test that adds these and remove them.
- headers = {'x-remove-account-meta-temp-url-key': 'remove',
- 'x-remove-account-meta-temp-url-key-2': 'remove'}
- resp = retry(post, headers)
-
headers = {}
- header_value = 'k' * MAX_META_VALUE_LENGTH
+ header_value = 'k' * self.max_meta_value_length
size = 0
x = 0
- while size < MAX_META_OVERALL_SIZE - 4 - MAX_META_VALUE_LENGTH:
- size += 4 + MAX_META_VALUE_LENGTH
+ while size < (self.max_meta_overall_size - 4
+ - self.max_meta_value_length):
+ size += 4 + self.max_meta_value_length
headers['X-Account-Meta-%04d' % x] = header_value
x += 1
- if MAX_META_OVERALL_SIZE - size > 1:
+ if self.max_meta_overall_size - size > 1:
headers['X-Account-Meta-k'] = \
- 'v' * (MAX_META_OVERALL_SIZE - size - 1)
+ 'v' * (self.max_meta_overall_size - size - 1)
resp = retry(post, headers)
resp.read()
self.assertEqual(resp.status, 204)
headers['X-Account-Meta-k'] = \
- 'v' * (MAX_META_OVERALL_SIZE - size)
+ 'v' * (self.max_meta_overall_size - size)
resp = retry(post, headers)
resp.read()
self.assertEqual(resp.status, 400)
+class TestAccountInNonDefaultDomain(unittest.TestCase):
+ def setUp(self):
+ if tf.skip or tf.skip2 or tf.skip_if_not_v3:
+ raise SkipTest('AUTH VERSION 3 SPECIFIC TEST')
+
+ def test_project_domain_id_header(self):
+ # make sure account exists (assumes account auto create)
+ def post(url, token, parsed, conn):
+ conn.request('POST', parsed.path, '',
+ {'X-Auth-Token': token})
+ return check_response(conn)
+
+ resp = retry(post, use_account=4)
+ resp.read()
+ self.assertEqual(resp.status, 204)
+
+ # account in non-default domain should have a project domain id
+ def head(url, token, parsed, conn):
+ conn.request('HEAD', parsed.path, '',
+ {'X-Auth-Token': token})
+ return check_response(conn)
+
+ resp = retry(head, use_account=4)
+ resp.read()
+ self.assertEqual(resp.status, 204)
+ self.assertTrue('X-Account-Project-Domain-Id' in resp.headers)
+
+
if __name__ == '__main__':
unittest.main()
diff --git a/test/functional/test_container.py b/test/functional/test_container.py
index 91702e9..d7896a4 100755
--- a/test/functional/test_container.py
+++ b/test/functional/test_container.py
@@ -20,19 +20,19 @@ import unittest
from nose import SkipTest
from uuid import uuid4
-from swift.common.constraints import MAX_META_COUNT, MAX_META_NAME_LENGTH, \
- MAX_META_OVERALL_SIZE, MAX_META_VALUE_LENGTH
-
-from swift_testing import check_response, retry, skip, skip2, skip3, \
- swift_test_perm, web_front_end, requires_acls, swift_test_user
+from test.functional import check_response, retry, requires_acls, \
+ load_constraint, requires_policies
+import test.functional as tf
class TestContainer(unittest.TestCase):
def setUp(self):
- if skip:
+ if tf.skip:
raise SkipTest
self.name = uuid4().hex
+ # this container isn't created by default, but will be cleaned up
+ self.container = uuid4().hex
def put(url, token, parsed, conn):
conn.request('PUT', parsed.path + '/' + self.name, '',
@@ -43,44 +43,58 @@ class TestContainer(unittest.TestCase):
resp.read()
self.assertEqual(resp.status, 201)
+ self.max_meta_count = load_constraint('max_meta_count')
+ self.max_meta_name_length = load_constraint('max_meta_name_length')
+ self.max_meta_overall_size = load_constraint('max_meta_overall_size')
+ self.max_meta_value_length = load_constraint('max_meta_value_length')
+
def tearDown(self):
- if skip:
+ if tf.skip:
raise SkipTest
- def get(url, token, parsed, conn):
- conn.request('GET', parsed.path + '/' + self.name + '?format=json',
- '', {'X-Auth-Token': token})
- return check_response(conn)
-
- def delete(url, token, parsed, conn, obj):
- conn.request('DELETE',
- '/'.join([parsed.path, self.name, obj['name']]), '',
- {'X-Auth-Token': token})
- return check_response(conn)
-
- while True:
- resp = retry(get)
- body = resp.read()
- self.assert_(resp.status // 100 == 2, resp.status)
- objs = json.loads(body)
- if not objs:
- break
- for obj in objs:
- resp = retry(delete, obj)
- resp.read()
- self.assertEqual(resp.status, 204)
-
- def delete(url, token, parsed, conn):
- conn.request('DELETE', parsed.path + '/' + self.name, '',
+ def get(url, token, parsed, conn, container):
+ conn.request(
+ 'GET', parsed.path + '/' + container + '?format=json', '',
+ {'X-Auth-Token': token})
+ return check_response(conn)
+
+ def delete(url, token, parsed, conn, container, obj):
+ conn.request(
+ 'DELETE', '/'.join([parsed.path, container, obj['name']]), '',
+ {'X-Auth-Token': token})
+ return check_response(conn)
+
+ for container in (self.name, self.container):
+ while True:
+ resp = retry(get, container)
+ body = resp.read()
+ if resp.status == 404:
+ break
+ self.assert_(resp.status // 100 == 2, resp.status)
+ objs = json.loads(body)
+ if not objs:
+ break
+ for obj in objs:
+ resp = retry(delete, container, obj)
+ resp.read()
+ self.assertEqual(resp.status, 204)
+
+ def delete(url, token, parsed, conn, container):
+ conn.request('DELETE', parsed.path + '/' + container, '',
{'X-Auth-Token': token})
return check_response(conn)
- resp = retry(delete)
+ resp = retry(delete, self.name)
resp.read()
self.assertEqual(resp.status, 204)
+ # container may have not been created
+ resp = retry(delete, self.container)
+ resp.read()
+ self.assert_(resp.status in (204, 404))
+
def test_multi_metadata(self):
- if skip:
+ if tf.skip:
raise SkipTest
def post(url, token, parsed, conn, name, value):
@@ -110,7 +124,7 @@ class TestContainer(unittest.TestCase):
self.assertEqual(resp.getheader('x-container-meta-two'), '2')
def test_unicode_metadata(self):
- if skip:
+ if tf.skip:
raise SkipTest
def post(url, token, parsed, conn, name, value):
@@ -125,7 +139,7 @@ class TestContainer(unittest.TestCase):
uni_key = u'X-Container-Meta-uni\u0E12'
uni_value = u'uni\u0E12'
- if (web_front_end == 'integral'):
+ if (tf.web_front_end == 'integral'):
resp = retry(post, uni_key, '1')
resp.read()
self.assertEqual(resp.status, 204)
@@ -141,7 +155,7 @@ class TestContainer(unittest.TestCase):
self.assert_(resp.status in (200, 204), resp.status)
self.assertEqual(resp.getheader('X-Container-Meta-uni'),
uni_value.encode('utf-8'))
- if (web_front_end == 'integral'):
+ if (tf.web_front_end == 'integral'):
resp = retry(post, uni_key, uni_value)
resp.read()
self.assertEqual(resp.status, 204)
@@ -152,7 +166,7 @@ class TestContainer(unittest.TestCase):
uni_value.encode('utf-8'))
def test_PUT_metadata(self):
- if skip:
+ if tf.skip:
raise SkipTest
def put(url, token, parsed, conn, name, value):
@@ -209,7 +223,7 @@ class TestContainer(unittest.TestCase):
self.assertEqual(resp.status, 204)
def test_POST_metadata(self):
- if skip:
+ if tf.skip:
raise SkipTest
def post(url, token, parsed, conn, value):
@@ -249,7 +263,7 @@ class TestContainer(unittest.TestCase):
self.assertEqual(resp.getheader('x-container-meta-test'), 'Value')
def test_PUT_bad_metadata(self):
- if skip:
+ if tf.skip:
raise SkipTest
def put(url, token, parsed, conn, name, extra_headers):
@@ -266,7 +280,7 @@ class TestContainer(unittest.TestCase):
name = uuid4().hex
resp = retry(
put, name,
- {'X-Container-Meta-' + ('k' * MAX_META_NAME_LENGTH): 'v'})
+ {'X-Container-Meta-' + ('k' * self.max_meta_name_length): 'v'})
resp.read()
self.assertEqual(resp.status, 201)
resp = retry(delete, name)
@@ -275,7 +289,8 @@ class TestContainer(unittest.TestCase):
name = uuid4().hex
resp = retry(
put, name,
- {'X-Container-Meta-' + ('k' * (MAX_META_NAME_LENGTH + 1)): 'v'})
+ {'X-Container-Meta-' + (
+ 'k' * (self.max_meta_name_length + 1)): 'v'})
resp.read()
self.assertEqual(resp.status, 400)
resp = retry(delete, name)
@@ -285,7 +300,7 @@ class TestContainer(unittest.TestCase):
name = uuid4().hex
resp = retry(
put, name,
- {'X-Container-Meta-Too-Long': 'k' * MAX_META_VALUE_LENGTH})
+ {'X-Container-Meta-Too-Long': 'k' * self.max_meta_value_length})
resp.read()
self.assertEqual(resp.status, 201)
resp = retry(delete, name)
@@ -294,7 +309,8 @@ class TestContainer(unittest.TestCase):
name = uuid4().hex
resp = retry(
put, name,
- {'X-Container-Meta-Too-Long': 'k' * (MAX_META_VALUE_LENGTH + 1)})
+ {'X-Container-Meta-Too-Long': 'k' * (
+ self.max_meta_value_length + 1)})
resp.read()
self.assertEqual(resp.status, 400)
resp = retry(delete, name)
@@ -303,7 +319,7 @@ class TestContainer(unittest.TestCase):
name = uuid4().hex
headers = {}
- for x in xrange(MAX_META_COUNT):
+ for x in xrange(self.max_meta_count):
headers['X-Container-Meta-%d' % x] = 'v'
resp = retry(put, name, headers)
resp.read()
@@ -313,7 +329,7 @@ class TestContainer(unittest.TestCase):
self.assertEqual(resp.status, 204)
name = uuid4().hex
headers = {}
- for x in xrange(MAX_META_COUNT + 1):
+ for x in xrange(self.max_meta_count + 1):
headers['X-Container-Meta-%d' % x] = 'v'
resp = retry(put, name, headers)
resp.read()
@@ -324,16 +340,17 @@ class TestContainer(unittest.TestCase):
name = uuid4().hex
headers = {}
- header_value = 'k' * MAX_META_VALUE_LENGTH
+ header_value = 'k' * self.max_meta_value_length
size = 0
x = 0
- while size < MAX_META_OVERALL_SIZE - 4 - MAX_META_VALUE_LENGTH:
- size += 4 + MAX_META_VALUE_LENGTH
+ while size < (self.max_meta_overall_size - 4
+ - self.max_meta_value_length):
+ size += 4 + self.max_meta_value_length
headers['X-Container-Meta-%04d' % x] = header_value
x += 1
- if MAX_META_OVERALL_SIZE - size > 1:
+ if self.max_meta_overall_size - size > 1:
headers['X-Container-Meta-k'] = \
- 'v' * (MAX_META_OVERALL_SIZE - size - 1)
+ 'v' * (self.max_meta_overall_size - size - 1)
resp = retry(put, name, headers)
resp.read()
self.assertEqual(resp.status, 201)
@@ -342,7 +359,7 @@ class TestContainer(unittest.TestCase):
self.assertEqual(resp.status, 204)
name = uuid4().hex
headers['X-Container-Meta-k'] = \
- 'v' * (MAX_META_OVERALL_SIZE - size)
+ 'v' * (self.max_meta_overall_size - size)
resp = retry(put, name, headers)
resp.read()
self.assertEqual(resp.status, 400)
@@ -351,7 +368,7 @@ class TestContainer(unittest.TestCase):
self.assertEqual(resp.status, 404)
def test_POST_bad_metadata(self):
- if skip:
+ if tf.skip:
raise SkipTest
def post(url, token, parsed, conn, extra_headers):
@@ -362,28 +379,30 @@ class TestContainer(unittest.TestCase):
resp = retry(
post,
- {'X-Container-Meta-' + ('k' * MAX_META_NAME_LENGTH): 'v'})
+ {'X-Container-Meta-' + ('k' * self.max_meta_name_length): 'v'})
resp.read()
self.assertEqual(resp.status, 204)
resp = retry(
post,
- {'X-Container-Meta-' + ('k' * (MAX_META_NAME_LENGTH + 1)): 'v'})
+ {'X-Container-Meta-' + (
+ 'k' * (self.max_meta_name_length + 1)): 'v'})
resp.read()
self.assertEqual(resp.status, 400)
resp = retry(
post,
- {'X-Container-Meta-Too-Long': 'k' * MAX_META_VALUE_LENGTH})
+ {'X-Container-Meta-Too-Long': 'k' * self.max_meta_value_length})
resp.read()
self.assertEqual(resp.status, 204)
resp = retry(
post,
- {'X-Container-Meta-Too-Long': 'k' * (MAX_META_VALUE_LENGTH + 1)})
+ {'X-Container-Meta-Too-Long': 'k' * (
+ self.max_meta_value_length + 1)})
resp.read()
self.assertEqual(resp.status, 400)
def test_POST_bad_metadata2(self):
- if skip:
+ if tf.skip:
raise SkipTest
def post(url, token, parsed, conn, extra_headers):
@@ -393,20 +412,20 @@ class TestContainer(unittest.TestCase):
return check_response(conn)
headers = {}
- for x in xrange(MAX_META_COUNT):
+ for x in xrange(self.max_meta_count):
headers['X-Container-Meta-%d' % x] = 'v'
resp = retry(post, headers)
resp.read()
self.assertEqual(resp.status, 204)
headers = {}
- for x in xrange(MAX_META_COUNT + 1):
+ for x in xrange(self.max_meta_count + 1):
headers['X-Container-Meta-%d' % x] = 'v'
resp = retry(post, headers)
resp.read()
self.assertEqual(resp.status, 400)
def test_POST_bad_metadata3(self):
- if skip:
+ if tf.skip:
raise SkipTest
def post(url, token, parsed, conn, extra_headers):
@@ -416,27 +435,28 @@ class TestContainer(unittest.TestCase):
return check_response(conn)
headers = {}
- header_value = 'k' * MAX_META_VALUE_LENGTH
+ header_value = 'k' * self.max_meta_value_length
size = 0
x = 0
- while size < MAX_META_OVERALL_SIZE - 4 - MAX_META_VALUE_LENGTH:
- size += 4 + MAX_META_VALUE_LENGTH
+ while size < (self.max_meta_overall_size - 4
+ - self.max_meta_value_length):
+ size += 4 + self.max_meta_value_length
headers['X-Container-Meta-%04d' % x] = header_value
x += 1
- if MAX_META_OVERALL_SIZE - size > 1:
+ if self.max_meta_overall_size - size > 1:
headers['X-Container-Meta-k'] = \
- 'v' * (MAX_META_OVERALL_SIZE - size - 1)
+ 'v' * (self.max_meta_overall_size - size - 1)
resp = retry(post, headers)
resp.read()
self.assertEqual(resp.status, 204)
headers['X-Container-Meta-k'] = \
- 'v' * (MAX_META_OVERALL_SIZE - size)
+ 'v' * (self.max_meta_overall_size - size)
resp = retry(post, headers)
resp.read()
self.assertEqual(resp.status, 400)
def test_public_container(self):
- if skip:
+ if tf.skip:
raise SkipTest
def get(url, token, parsed, conn):
@@ -477,7 +497,7 @@ class TestContainer(unittest.TestCase):
self.assert_(str(err).startswith('No result after '), err)
def test_cross_account_container(self):
- if skip or skip2:
+ if tf.skip or tf.skip2:
raise SkipTest
# Obtain the first account's string
first_account = ['unknown']
@@ -505,8 +525,8 @@ class TestContainer(unittest.TestCase):
def post(url, token, parsed, conn):
conn.request('POST', parsed.path + '/' + self.name, '',
{'X-Auth-Token': token,
- 'X-Container-Read': swift_test_perm[1],
- 'X-Container-Write': swift_test_perm[1]})
+ 'X-Container-Read': tf.swift_test_perm[1],
+ 'X-Container-Write': tf.swift_test_perm[1]})
return check_response(conn)
resp = retry(post)
@@ -533,7 +553,7 @@ class TestContainer(unittest.TestCase):
self.assertEqual(resp.status, 403)
def test_cross_account_public_container(self):
- if skip or skip2:
+ if tf.skip or tf.skip2:
raise SkipTest
# Obtain the first account's string
first_account = ['unknown']
@@ -586,7 +606,7 @@ class TestContainer(unittest.TestCase):
def post(url, token, parsed, conn):
conn.request('POST', parsed.path + '/' + self.name, '',
{'X-Auth-Token': token,
- 'X-Container-Write': swift_test_perm[1]})
+ 'X-Container-Write': tf.swift_test_perm[1]})
return check_response(conn)
resp = retry(post)
@@ -602,7 +622,7 @@ class TestContainer(unittest.TestCase):
self.assertEqual(resp.status, 201)
def test_nonadmin_user(self):
- if skip or skip3:
+ if tf.skip or tf.skip3:
raise SkipTest
# Obtain the first account's string
first_account = ['unknown']
@@ -630,7 +650,7 @@ class TestContainer(unittest.TestCase):
def post(url, token, parsed, conn):
conn.request('POST', parsed.path + '/' + self.name, '',
{'X-Auth-Token': token,
- 'X-Container-Read': swift_test_perm[2]})
+ 'X-Container-Read': tf.swift_test_perm[2]})
return check_response(conn)
resp = retry(post)
@@ -655,7 +675,7 @@ class TestContainer(unittest.TestCase):
def post(url, token, parsed, conn):
conn.request('POST', parsed.path + '/' + self.name, '',
{'X-Auth-Token': token,
- 'X-Container-Write': swift_test_perm[2]})
+ 'X-Container-Write': tf.swift_test_perm[2]})
return check_response(conn)
resp = retry(post)
@@ -672,7 +692,7 @@ class TestContainer(unittest.TestCase):
@requires_acls
def test_read_only_acl_listings(self):
- if skip3:
+ if tf.skip3:
raise SkipTest
def get(url, token, parsed, conn):
@@ -695,7 +715,7 @@ class TestContainer(unittest.TestCase):
self.assertEquals(resp.status, 403)
# grant read-only access
- acl_user = swift_test_user[2]
+ acl_user = tf.swift_test_user[2]
acl = {'read-only': [acl_user]}
headers = {'x-account-access-control': json.dumps(acl)}
resp = retry(post_account, headers=headers, use_account=1)
@@ -725,7 +745,7 @@ class TestContainer(unittest.TestCase):
@requires_acls
def test_read_only_acl_metadata(self):
- if skip3:
+ if tf.skip3:
raise SkipTest
def get(url, token, parsed, conn, name):
@@ -760,7 +780,7 @@ class TestContainer(unittest.TestCase):
self.assertEquals(resp.status, 403)
# grant read-only access
- acl_user = swift_test_user[2]
+ acl_user = tf.swift_test_user[2]
acl = {'read-only': [acl_user]}
headers = {'x-account-access-control': json.dumps(acl)}
resp = retry(post_account, headers=headers, use_account=1)
@@ -782,7 +802,7 @@ class TestContainer(unittest.TestCase):
@requires_acls
def test_read_write_acl_listings(self):
- if skip3:
+ if tf.skip3:
raise SkipTest
def get(url, token, parsed, conn):
@@ -810,7 +830,7 @@ class TestContainer(unittest.TestCase):
self.assertEquals(resp.status, 403)
# grant read-write access
- acl_user = swift_test_user[2]
+ acl_user = tf.swift_test_user[2]
acl = {'read-write': [acl_user]}
headers = {'x-account-access-control': json.dumps(acl)}
resp = retry(post, headers=headers, use_account=1)
@@ -853,7 +873,7 @@ class TestContainer(unittest.TestCase):
@requires_acls
def test_read_write_acl_metadata(self):
- if skip3:
+ if tf.skip3:
raise SkipTest
def get(url, token, parsed, conn, name):
@@ -888,7 +908,7 @@ class TestContainer(unittest.TestCase):
self.assertEquals(resp.status, 403)
# grant read-write access
- acl_user = swift_test_user[2]
+ acl_user = tf.swift_test_user[2]
acl = {'read-write': [acl_user]}
headers = {'x-account-access-control': json.dumps(acl)}
resp = retry(post_account, headers=headers, use_account=1)
@@ -924,7 +944,7 @@ class TestContainer(unittest.TestCase):
@requires_acls
def test_admin_acl_listing(self):
- if skip3:
+ if tf.skip3:
raise SkipTest
def get(url, token, parsed, conn):
@@ -952,7 +972,7 @@ class TestContainer(unittest.TestCase):
self.assertEquals(resp.status, 403)
# grant admin access
- acl_user = swift_test_user[2]
+ acl_user = tf.swift_test_user[2]
acl = {'admin': [acl_user]}
headers = {'x-account-access-control': json.dumps(acl)}
resp = retry(post, headers=headers, use_account=1)
@@ -995,7 +1015,7 @@ class TestContainer(unittest.TestCase):
@requires_acls
def test_admin_acl_metadata(self):
- if skip3:
+ if tf.skip3:
raise SkipTest
def get(url, token, parsed, conn, name):
@@ -1030,7 +1050,7 @@ class TestContainer(unittest.TestCase):
self.assertEquals(resp.status, 403)
# grant access
- acl_user = swift_test_user[2]
+ acl_user = tf.swift_test_user[2]
acl = {'admin': [acl_user]}
headers = {'x-account-access-control': json.dumps(acl)}
resp = retry(post_account, headers=headers, use_account=1)
@@ -1066,7 +1086,7 @@ class TestContainer(unittest.TestCase):
@requires_acls
def test_protected_container_sync(self):
- if skip3:
+ if tf.skip3:
raise SkipTest
def get(url, token, parsed, conn, name):
@@ -1100,7 +1120,7 @@ class TestContainer(unittest.TestCase):
self.assertEqual(resp.getheader('X-Container-Meta-Test'), value)
# grant read-only access
- acl_user = swift_test_user[2]
+ acl_user = tf.swift_test_user[2]
acl = {'read-only': [acl_user]}
headers = {'x-account-access-control': json.dumps(acl)}
resp = retry(post_account, headers=headers, use_account=1)
@@ -1122,7 +1142,7 @@ class TestContainer(unittest.TestCase):
self.assertEqual(resp.status, 403)
# grant read-write access
- acl_user = swift_test_user[2]
+ acl_user = tf.swift_test_user[2]
acl = {'read-write': [acl_user]}
headers = {'x-account-access-control': json.dumps(acl)}
resp = retry(post_account, headers=headers, use_account=1)
@@ -1160,7 +1180,7 @@ class TestContainer(unittest.TestCase):
self.assertEqual(resp.getheader('X-Container-Sync-Key'), 'secret')
# grant admin access
- acl_user = swift_test_user[2]
+ acl_user = tf.swift_test_user[2]
acl = {'admin': [acl_user]}
headers = {'x-account-access-control': json.dumps(acl)}
resp = retry(post_account, headers=headers, use_account=1)
@@ -1188,7 +1208,7 @@ class TestContainer(unittest.TestCase):
@requires_acls
def test_protected_container_acl(self):
- if skip3:
+ if tf.skip3:
raise SkipTest
def get(url, token, parsed, conn, name):
@@ -1224,7 +1244,7 @@ class TestContainer(unittest.TestCase):
self.assertEqual(resp.getheader('X-Container-Meta-Test'), value)
# grant read-only access
- acl_user = swift_test_user[2]
+ acl_user = tf.swift_test_user[2]
acl = {'read-only': [acl_user]}
headers = {'x-account-access-control': json.dumps(acl)}
resp = retry(post_account, headers=headers, use_account=1)
@@ -1250,7 +1270,7 @@ class TestContainer(unittest.TestCase):
self.assertEqual(resp.status, 403)
# grant read-write access
- acl_user = swift_test_user[2]
+ acl_user = tf.swift_test_user[2]
acl = {'read-write': [acl_user]}
headers = {'x-account-access-control': json.dumps(acl)}
resp = retry(post_account, headers=headers, use_account=1)
@@ -1292,7 +1312,7 @@ class TestContainer(unittest.TestCase):
self.assertEqual(resp.getheader('X-Container-Write'), 'jdoe')
# grant admin access
- acl_user = swift_test_user[2]
+ acl_user = tf.swift_test_user[2]
acl = {'admin': [acl_user]}
headers = {'x-account-access-control': json.dumps(acl)}
resp = retry(post_account, headers=headers, use_account=1)
@@ -1322,7 +1342,7 @@ class TestContainer(unittest.TestCase):
self.assertEqual(resp.getheader('X-Container-Read'), '.r:*')
def test_long_name_content_type(self):
- if skip:
+ if tf.skip:
raise SkipTest
def put(url, token, parsed, conn):
@@ -1338,7 +1358,7 @@ class TestContainer(unittest.TestCase):
'text/html; charset=UTF-8')
def test_null_name(self):
- if skip:
+ if tf.skip:
raise SkipTest
def put(url, token, parsed, conn):
@@ -1347,12 +1367,343 @@ class TestContainer(unittest.TestCase):
return check_response(conn)
resp = retry(put)
- if (web_front_end == 'apache2'):
+ if (tf.web_front_end == 'apache2'):
self.assertEqual(resp.status, 404)
else:
self.assertEqual(resp.read(), 'Invalid UTF8 or contains NULL')
self.assertEqual(resp.status, 412)
+ def test_create_container_gets_default_policy_by_default(self):
+ try:
+ default_policy = \
+ tf.FunctionalStoragePolicyCollection.from_info().default
+ except AssertionError:
+ raise SkipTest()
+
+ def put(url, token, parsed, conn):
+ conn.request('PUT', parsed.path + '/' + self.container, '',
+ {'X-Auth-Token': token})
+ return check_response(conn)
+ resp = retry(put)
+ resp.read()
+ self.assertEqual(resp.status // 100, 2)
+
+ def head(url, token, parsed, conn):
+ conn.request('HEAD', parsed.path + '/' + self.container, '',
+ {'X-Auth-Token': token})
+ return check_response(conn)
+ resp = retry(head)
+ resp.read()
+ headers = dict((k.lower(), v) for k, v in resp.getheaders())
+ self.assertEquals(headers.get('x-storage-policy'),
+ default_policy['name'])
+
+ def test_error_invalid_storage_policy_name(self):
+ def put(url, token, parsed, conn, headers):
+ new_headers = dict({'X-Auth-Token': token}, **headers)
+ conn.request('PUT', parsed.path + '/' + self.container, '',
+ new_headers)
+ return check_response(conn)
+
+ # create
+ resp = retry(put, {'X-Storage-Policy': uuid4().hex})
+ resp.read()
+ self.assertEqual(resp.status, 400)
+
+ @requires_policies
+ def test_create_non_default_storage_policy_container(self):
+ policy = self.policies.exclude(default=True).select()
+
+ def put(url, token, parsed, conn, headers=None):
+ base_headers = {'X-Auth-Token': token}
+ if headers:
+ base_headers.update(headers)
+ conn.request('PUT', parsed.path + '/' + self.container, '',
+ base_headers)
+ return check_response(conn)
+ headers = {'X-Storage-Policy': policy['name']}
+ resp = retry(put, headers=headers)
+ resp.read()
+ self.assertEqual(resp.status, 201)
+
+ def head(url, token, parsed, conn):
+ conn.request('HEAD', parsed.path + '/' + self.container, '',
+ {'X-Auth-Token': token})
+ return check_response(conn)
+ resp = retry(head)
+ resp.read()
+ headers = dict((k.lower(), v) for k, v in resp.getheaders())
+ self.assertEquals(headers.get('x-storage-policy'),
+ policy['name'])
+
+ # and test recreate with-out specifying Storage Policy
+ resp = retry(put)
+ resp.read()
+ self.assertEqual(resp.status, 202)
+ # should still be original storage policy
+ resp = retry(head)
+ resp.read()
+ headers = dict((k.lower(), v) for k, v in resp.getheaders())
+ self.assertEquals(headers.get('x-storage-policy'),
+ policy['name'])
+
+ # delete it
+ def delete(url, token, parsed, conn):
+ conn.request('DELETE', parsed.path + '/' + self.container, '',
+ {'X-Auth-Token': token})
+ return check_response(conn)
+ resp = retry(delete)
+ resp.read()
+ self.assertEqual(resp.status, 204)
+
+ # verify no policy header
+ resp = retry(head)
+ resp.read()
+ headers = dict((k.lower(), v) for k, v in resp.getheaders())
+ self.assertEquals(headers.get('x-storage-policy'), None)
+
+ @requires_policies
+ def test_conflict_change_storage_policy_with_put(self):
+ def put(url, token, parsed, conn, headers):
+ new_headers = dict({'X-Auth-Token': token}, **headers)
+ conn.request('PUT', parsed.path + '/' + self.container, '',
+ new_headers)
+ return check_response(conn)
+
+ # create
+ policy = self.policies.select()
+ resp = retry(put, {'X-Storage-Policy': policy['name']})
+ resp.read()
+ self.assertEqual(resp.status, 201)
+
+ # can't change it
+ other_policy = self.policies.exclude(name=policy['name']).select()
+ resp = retry(put, {'X-Storage-Policy': other_policy['name']})
+ resp.read()
+ self.assertEqual(resp.status, 409)
+
+ def head(url, token, parsed, conn):
+ conn.request('HEAD', parsed.path + '/' + self.container, '',
+ {'X-Auth-Token': token})
+ return check_response(conn)
+ # still original policy
+ resp = retry(head)
+ resp.read()
+ headers = dict((k.lower(), v) for k, v in resp.getheaders())
+ self.assertEquals(headers.get('x-storage-policy'),
+ policy['name'])
+
+ @requires_policies
+ def test_noop_change_storage_policy_with_post(self):
+ def put(url, token, parsed, conn, headers):
+ new_headers = dict({'X-Auth-Token': token}, **headers)
+ conn.request('PUT', parsed.path + '/' + self.container, '',
+ new_headers)
+ return check_response(conn)
+
+ # create
+ policy = self.policies.select()
+ resp = retry(put, {'X-Storage-Policy': policy['name']})
+ resp.read()
+ self.assertEqual(resp.status, 201)
+
+ def post(url, token, parsed, conn, headers):
+ new_headers = dict({'X-Auth-Token': token}, **headers)
+ conn.request('POST', parsed.path + '/' + self.container, '',
+ new_headers)
+ return check_response(conn)
+ # attempt update
+ for header in ('X-Storage-Policy', 'X-Storage-Policy-Index'):
+ other_policy = self.policies.exclude(name=policy['name']).select()
+ resp = retry(post, {header: other_policy['name']})
+ resp.read()
+ self.assertEqual(resp.status, 204)
+
+ def head(url, token, parsed, conn):
+ conn.request('HEAD', parsed.path + '/' + self.container, '',
+ {'X-Auth-Token': token})
+ return check_response(conn)
+ # still original policy
+ resp = retry(head)
+ resp.read()
+ headers = dict((k.lower(), v) for k, v in resp.getheaders())
+ self.assertEquals(headers.get('x-storage-policy'),
+ policy['name'])
+
+
+class BaseTestContainerACLs(unittest.TestCase):
+ # subclasses can change the account in which container
+ # is created/deleted by setUp/tearDown
+ account = 1
+
+ def _get_account(self, url, token, parsed, conn):
+ return parsed.path
+
+ def _get_tenant_id(self, url, token, parsed, conn):
+ account = parsed.path
+ return account.replace('/v1/AUTH_', '', 1)
+
+ def setUp(self):
+ if tf.skip or tf.skip2 or tf.skip_if_not_v3:
+ raise SkipTest('AUTH VERSION 3 SPECIFIC TEST')
+ self.name = uuid4().hex
+
+ def put(url, token, parsed, conn):
+ conn.request('PUT', parsed.path + '/' + self.name, '',
+ {'X-Auth-Token': token})
+ return check_response(conn)
+
+ resp = retry(put, use_account=self.account)
+ resp.read()
+ self.assertEqual(resp.status, 201)
+
+ def tearDown(self):
+ if tf.skip or tf.skip2 or tf.skip_if_not_v3:
+ raise SkipTest
+
+ def get(url, token, parsed, conn):
+ conn.request('GET', parsed.path + '/' + self.name + '?format=json',
+ '', {'X-Auth-Token': token})
+ return check_response(conn)
+
+ def delete(url, token, parsed, conn, obj):
+ conn.request('DELETE',
+ '/'.join([parsed.path, self.name, obj['name']]), '',
+ {'X-Auth-Token': token})
+ return check_response(conn)
+
+ while True:
+ resp = retry(get, use_account=self.account)
+ body = resp.read()
+ self.assert_(resp.status // 100 == 2, resp.status)
+ objs = json.loads(body)
+ if not objs:
+ break
+ for obj in objs:
+ resp = retry(delete, obj, use_account=self.account)
+ resp.read()
+ self.assertEqual(resp.status, 204)
+
+ def delete(url, token, parsed, conn):
+ conn.request('DELETE', parsed.path + '/' + self.name, '',
+ {'X-Auth-Token': token})
+ return check_response(conn)
+
+ resp = retry(delete, use_account=self.account)
+ resp.read()
+ self.assertEqual(resp.status, 204)
+
+ def _assert_cross_account_acl_granted(self, granted, grantee_account, acl):
+ '''
+ Check whether a given container ACL is granted when a user specified
+ by account_b attempts to access a container.
+ '''
+ # Obtain the first account's string
+ first_account = retry(self._get_account, use_account=self.account)
+
+ # Ensure we can't access the container with the grantee account
+ def get2(url, token, parsed, conn):
+ conn.request('GET', first_account + '/' + self.name, '',
+ {'X-Auth-Token': token})
+ return check_response(conn)
+
+ resp = retry(get2, use_account=grantee_account)
+ resp.read()
+ self.assertEqual(resp.status, 403)
+
+ def put2(url, token, parsed, conn):
+ conn.request('PUT', first_account + '/' + self.name + '/object',
+ 'test object', {'X-Auth-Token': token})
+ return check_response(conn)
+
+ resp = retry(put2, use_account=grantee_account)
+ resp.read()
+ self.assertEqual(resp.status, 403)
+
+ # Post ACL to the container
+ def post(url, token, parsed, conn):
+ conn.request('POST', parsed.path + '/' + self.name, '',
+ {'X-Auth-Token': token,
+ 'X-Container-Read': acl,
+ 'X-Container-Write': acl})
+ return check_response(conn)
+
+ resp = retry(post, use_account=self.account)
+ resp.read()
+ self.assertEqual(resp.status, 204)
+
+ # Check access to container from grantee account with ACL in place
+ resp = retry(get2, use_account=grantee_account)
+ resp.read()
+ expected = 204 if granted else 403
+ self.assertEqual(resp.status, expected)
+
+ resp = retry(put2, use_account=grantee_account)
+ resp.read()
+ expected = 201 if granted else 403
+ self.assertEqual(resp.status, expected)
+
+ # Make the container private again
+ def post(url, token, parsed, conn):
+ conn.request('POST', parsed.path + '/' + self.name, '',
+ {'X-Auth-Token': token, 'X-Container-Read': '',
+ 'X-Container-Write': ''})
+ return check_response(conn)
+
+ resp = retry(post, use_account=self.account)
+ resp.read()
+ self.assertEqual(resp.status, 204)
+
+ # Ensure we can't access the container with the grantee account again
+ resp = retry(get2, use_account=grantee_account)
+ resp.read()
+ self.assertEqual(resp.status, 403)
+
+ resp = retry(put2, use_account=grantee_account)
+ resp.read()
+ self.assertEqual(resp.status, 403)
+
+
+class TestContainerACLsAccount1(BaseTestContainerACLs):
+ def test_cross_account_acl_names_with_user_in_non_default_domain(self):
+ # names in acls are disallowed when grantee is in a non-default domain
+ acl = '%s:%s' % (tf.swift_test_tenant[3], tf.swift_test_user[3])
+ self._assert_cross_account_acl_granted(False, 4, acl)
+
+ def test_cross_account_acl_ids_with_user_in_non_default_domain(self):
+ # ids are allowed in acls when grantee is in a non-default domain
+ tenant_id = retry(self._get_tenant_id, use_account=4)
+ acl = '%s:%s' % (tenant_id, '*')
+ self._assert_cross_account_acl_granted(True, 4, acl)
+
+ def test_cross_account_acl_names_in_default_domain(self):
+ # names are allowed in acls when grantee and project are in
+ # the default domain
+ acl = '%s:%s' % (tf.swift_test_tenant[1], tf.swift_test_user[1])
+ self._assert_cross_account_acl_granted(True, 2, acl)
+
+ def test_cross_account_acl_ids_in_default_domain(self):
+ # ids are allowed in acls when grantee and project are in
+ # the default domain
+ tenant_id = retry(self._get_tenant_id, use_account=2)
+ acl = '%s:%s' % (tenant_id, '*')
+ self._assert_cross_account_acl_granted(True, 2, acl)
+
+
+class TestContainerACLsAccount4(BaseTestContainerACLs):
+ account = 4
+
+ def test_cross_account_acl_names_with_project_in_non_default_domain(self):
+ # names in acls are disallowed when project is in a non-default domain
+ acl = '%s:%s' % (tf.swift_test_tenant[0], tf.swift_test_user[0])
+ self._assert_cross_account_acl_granted(False, 1, acl)
+
+ def test_cross_account_acl_ids_with_project_in_non_default_domain(self):
+ # ids are allowed in acls when project is in a non-default domain
+ tenant_id = retry(self._get_tenant_id, use_account=1)
+ acl = '%s:%s' % (tenant_id, '*')
+ self._assert_cross_account_acl_granted(True, 1, acl)
+
if __name__ == '__main__':
unittest.main()
diff --git a/test/functional/test_object.py b/test/functional/test_object.py
index 675de30..e74a7f6 100755
--- a/test/functional/test_object.py
+++ b/test/functional/test_object.py
@@ -21,24 +21,22 @@ from uuid import uuid4
from swift.common.utils import json
-from swift_testing import check_response, retry, skip, skip3, \
- swift_test_perm, web_front_end, requires_acls, swift_test_user
+from test.functional import check_response, retry, requires_acls, \
+ requires_policies
+import test.functional as tf
class TestObject(unittest.TestCase):
def setUp(self):
- if skip:
+ if tf.skip:
raise SkipTest
self.container = uuid4().hex
- def put(url, token, parsed, conn):
- conn.request('PUT', parsed.path + '/' + self.container, '',
- {'X-Auth-Token': token})
- return check_response(conn)
- resp = retry(put)
- resp.read()
- self.assertEqual(resp.status, 201)
+ self.containers = []
+ self._create_container(self.container)
+ self._create_container(self.container, use_account=2)
+
self.obj = uuid4().hex
def put(url, token, parsed, conn):
@@ -50,40 +48,65 @@ class TestObject(unittest.TestCase):
resp.read()
self.assertEqual(resp.status, 201)
+ def _create_container(self, name=None, headers=None, use_account=1):
+ if not name:
+ name = uuid4().hex
+ self.containers.append(name)
+ headers = headers or {}
+
+ def put(url, token, parsed, conn, name):
+ new_headers = dict({'X-Auth-Token': token}, **headers)
+ conn.request('PUT', parsed.path + '/' + name, '',
+ new_headers)
+ return check_response(conn)
+ resp = retry(put, name, use_account=use_account)
+ resp.read()
+ self.assertEqual(resp.status, 201)
+ return name
+
def tearDown(self):
- if skip:
+ if tf.skip:
raise SkipTest
- def delete(url, token, parsed, conn, obj):
- conn.request('DELETE',
- '%s/%s/%s' % (parsed.path, self.container, obj),
- '', {'X-Auth-Token': token})
+ # get list of objects in container
+ def get(url, token, parsed, conn, container):
+ conn.request(
+ 'GET', parsed.path + '/' + container + '?format=json', '',
+ {'X-Auth-Token': token})
return check_response(conn)
- # get list of objects in container
- def list(url, token, parsed, conn):
- conn.request('GET',
- '%s/%s' % (parsed.path, self.container),
- '', {'X-Auth-Token': token})
+ # delete an object
+ def delete(url, token, parsed, conn, container, obj):
+ conn.request(
+ 'DELETE', '/'.join([parsed.path, container, obj['name']]), '',
+ {'X-Auth-Token': token})
return check_response(conn)
- resp = retry(list)
- object_listing = resp.read()
- self.assertEqual(resp.status, 200)
- # iterate over object listing and delete all objects
- for obj in object_listing.splitlines():
- resp = retry(delete, obj)
- resp.read()
- self.assertEqual(resp.status, 204)
+ for container in self.containers:
+ while True:
+ resp = retry(get, container)
+ body = resp.read()
+ if resp.status == 404:
+ break
+ self.assert_(resp.status // 100 == 2, resp.status)
+ objs = json.loads(body)
+ if not objs:
+ break
+ for obj in objs:
+ resp = retry(delete, container, obj)
+ resp.read()
+ self.assertEqual(resp.status, 204)
# delete the container
- def delete(url, token, parsed, conn):
- conn.request('DELETE', parsed.path + '/' + self.container, '',
+ def delete(url, token, parsed, conn, name):
+ conn.request('DELETE', parsed.path + '/' + name, '',
{'X-Auth-Token': token})
return check_response(conn)
- resp = retry(delete)
- resp.read()
- self.assertEqual(resp.status, 204)
+
+ for container in self.containers:
+ resp = retry(delete, container)
+ resp.read()
+ self.assert_(resp.status in (204, 404))
def test_if_none_match(self):
def put(url, token, parsed, conn):
@@ -111,8 +134,47 @@ class TestObject(unittest.TestCase):
resp.read()
self.assertEquals(resp.status, 400)
+ def test_non_integer_x_delete_after(self):
+ def put(url, token, parsed, conn):
+ conn.request('PUT', '%s/%s/%s' % (parsed.path, self.container,
+ 'non_integer_x_delete_after'),
+ '', {'X-Auth-Token': token,
+ 'Content-Length': '0',
+ 'X-Delete-After': '*'})
+ return check_response(conn)
+ resp = retry(put)
+ body = resp.read()
+ self.assertEquals(resp.status, 400)
+ self.assertEqual(body, 'Non-integer X-Delete-After')
+
+ def test_non_integer_x_delete_at(self):
+ def put(url, token, parsed, conn):
+ conn.request('PUT', '%s/%s/%s' % (parsed.path, self.container,
+ 'non_integer_x_delete_at'),
+ '', {'X-Auth-Token': token,
+ 'Content-Length': '0',
+ 'X-Delete-At': '*'})
+ return check_response(conn)
+ resp = retry(put)
+ body = resp.read()
+ self.assertEquals(resp.status, 400)
+ self.assertEqual(body, 'Non-integer X-Delete-At')
+
+ def test_x_delete_at_in_the_past(self):
+ def put(url, token, parsed, conn):
+ conn.request('PUT', '%s/%s/%s' % (parsed.path, self.container,
+ 'x_delete_at_in_the_past'),
+ '', {'X-Auth-Token': token,
+ 'Content-Length': '0',
+ 'X-Delete-At': '0'})
+ return check_response(conn)
+ resp = retry(put)
+ body = resp.read()
+ self.assertEquals(resp.status, 400)
+ self.assertEqual(body, 'X-Delete-At in past')
+
def test_copy_object(self):
- if skip:
+ if tf.skip:
raise SkipTest
source = '%s/%s' % (self.container, self.obj)
@@ -185,8 +247,118 @@ class TestObject(unittest.TestCase):
resp.read()
self.assertEqual(resp.status, 204)
+ def test_copy_between_accounts(self):
+ if tf.skip:
+ raise SkipTest
+
+ source = '%s/%s' % (self.container, self.obj)
+ dest = '%s/%s' % (self.container, 'test_copy')
+
+ # get contents of source
+ def get_source(url, token, parsed, conn):
+ conn.request('GET',
+ '%s/%s' % (parsed.path, source),
+ '', {'X-Auth-Token': token})
+ return check_response(conn)
+ resp = retry(get_source)
+ source_contents = resp.read()
+ self.assertEqual(resp.status, 200)
+ self.assertEqual(source_contents, 'test')
+
+ acct = tf.parsed[0].path.split('/', 2)[2]
+
+ # copy source to dest with X-Copy-From-Account
+ def put(url, token, parsed, conn):
+ conn.request('PUT', '%s/%s' % (parsed.path, dest), '',
+ {'X-Auth-Token': token,
+ 'Content-Length': '0',
+ 'X-Copy-From-Account': acct,
+ 'X-Copy-From': source})
+ return check_response(conn)
+ # try to put, will not succeed
+ # user does not have permissions to read from source
+ resp = retry(put, use_account=2)
+ self.assertEqual(resp.status, 403)
+
+ # add acl to allow reading from source
+ def post(url, token, parsed, conn):
+ conn.request('POST', '%s/%s' % (parsed.path, self.container), '',
+ {'X-Auth-Token': token,
+ 'X-Container-Read': tf.swift_test_perm[1]})
+ return check_response(conn)
+ resp = retry(post)
+ self.assertEqual(resp.status, 204)
+
+ # retry previous put, now should succeed
+ resp = retry(put, use_account=2)
+ self.assertEqual(resp.status, 201)
+
+ # contents of dest should be the same as source
+ def get_dest(url, token, parsed, conn):
+ conn.request('GET',
+ '%s/%s' % (parsed.path, dest),
+ '', {'X-Auth-Token': token})
+ return check_response(conn)
+ resp = retry(get_dest, use_account=2)
+ dest_contents = resp.read()
+ self.assertEqual(resp.status, 200)
+ self.assertEqual(dest_contents, source_contents)
+
+ # delete the copy
+ def delete(url, token, parsed, conn):
+ conn.request('DELETE', '%s/%s' % (parsed.path, dest), '',
+ {'X-Auth-Token': token})
+ return check_response(conn)
+ resp = retry(delete, use_account=2)
+ resp.read()
+ self.assertEqual(resp.status, 204)
+ # verify dest does not exist
+ resp = retry(get_dest, use_account=2)
+ resp.read()
+ self.assertEqual(resp.status, 404)
+
+ acct_dest = tf.parsed[1].path.split('/', 2)[2]
+
+ # copy source to dest with COPY
+ def copy(url, token, parsed, conn):
+ conn.request('COPY', '%s/%s' % (parsed.path, source), '',
+ {'X-Auth-Token': token,
+ 'Destination-Account': acct_dest,
+ 'Destination': dest})
+ return check_response(conn)
+ # try to copy, will not succeed
+ # user does not have permissions to write to destination
+ resp = retry(copy)
+ resp.read()
+ self.assertEqual(resp.status, 403)
+
+ # add acl to allow write to destination
+ def post(url, token, parsed, conn):
+ conn.request('POST', '%s/%s' % (parsed.path, self.container), '',
+ {'X-Auth-Token': token,
+ 'X-Container-Write': tf.swift_test_perm[0]})
+ return check_response(conn)
+ resp = retry(post, use_account=2)
+ self.assertEqual(resp.status, 204)
+
+ # now copy will succeed
+ resp = retry(copy)
+ resp.read()
+ self.assertEqual(resp.status, 201)
+
+ # contents of dest should be the same as source
+ resp = retry(get_dest, use_account=2)
+ dest_contents = resp.read()
+ self.assertEqual(resp.status, 200)
+ self.assertEqual(dest_contents, source_contents)
+
+ # delete the copy
+ resp = retry(delete, use_account=2)
+ resp.read()
+ self.assertEqual(resp.status, 204)
+
def test_public_object(self):
- if skip:
+ if tf.skip:
raise SkipTest
def get(url, token, parsed, conn):
@@ -225,7 +397,7 @@ class TestObject(unittest.TestCase):
self.assert_(str(err).startswith('No result after '))
def test_private_object(self):
- if skip or skip3:
+ if tf.skip or tf.skip3:
raise SkipTest
# Ensure we can't access the object with the third account
@@ -245,8 +417,8 @@ class TestObject(unittest.TestCase):
conn.request('PUT', '%s/%s' % (
parsed.path, shared_container), '',
{'X-Auth-Token': token,
- 'X-Container-Read': swift_test_perm[2],
- 'X-Container-Write': swift_test_perm[2]})
+ 'X-Container-Read': tf.swift_test_perm[2],
+ 'X-Container-Write': tf.swift_test_perm[2]})
return check_response(conn)
resp = retry(put)
resp.read()
@@ -319,8 +491,8 @@ class TestObject(unittest.TestCase):
@requires_acls
def test_read_only(self):
- if skip3:
- raise SkipTest
+ if tf.skip3:
+ raise tf.SkipTest
def get_listing(url, token, parsed, conn):
conn.request('GET', '%s/%s' % (parsed.path, self.container), '',
@@ -361,7 +533,7 @@ class TestObject(unittest.TestCase):
self.assertEquals(resp.status, 403)
# grant read-only access
- acl_user = swift_test_user[2]
+ acl_user = tf.swift_test_user[2]
acl = {'read-only': [acl_user]}
headers = {'x-account-access-control': json.dumps(acl)}
resp = retry(post_account, headers=headers, use_account=1)
@@ -400,7 +572,7 @@ class TestObject(unittest.TestCase):
@requires_acls
def test_read_write(self):
- if skip3:
+ if tf.skip3:
raise SkipTest
def get_listing(url, token, parsed, conn):
@@ -442,7 +614,7 @@ class TestObject(unittest.TestCase):
self.assertEquals(resp.status, 403)
# grant read-write access
- acl_user = swift_test_user[2]
+ acl_user = tf.swift_test_user[2]
acl = {'read-write': [acl_user]}
headers = {'x-account-access-control': json.dumps(acl)}
resp = retry(post_account, headers=headers, use_account=1)
@@ -481,7 +653,7 @@ class TestObject(unittest.TestCase):
@requires_acls
def test_admin(self):
- if skip3:
+ if tf.skip3:
raise SkipTest
def get_listing(url, token, parsed, conn):
@@ -523,7 +695,7 @@ class TestObject(unittest.TestCase):
self.assertEquals(resp.status, 403)
# grant admin access
- acl_user = swift_test_user[2]
+ acl_user = tf.swift_test_user[2]
acl = {'admin': [acl_user]}
headers = {'x-account-access-control': json.dumps(acl)}
resp = retry(post_account, headers=headers, use_account=1)
@@ -561,7 +733,7 @@ class TestObject(unittest.TestCase):
self.assert_(self.obj not in listing)
def test_manifest(self):
- if skip:
+ if tf.skip:
raise SkipTest
# Data for the object segments
segments1 = ['one', 'two', 'three', 'four', 'five']
@@ -672,7 +844,7 @@ class TestObject(unittest.TestCase):
self.assertEqual(resp.read(), ''.join(segments2))
self.assertEqual(resp.status, 200)
- if not skip3:
+ if not tf.skip3:
# Ensure we can't access the manifest with the third account
def get(url, token, parsed, conn):
@@ -687,7 +859,7 @@ class TestObject(unittest.TestCase):
def post(url, token, parsed, conn):
conn.request('POST', '%s/%s' % (parsed.path, self.container),
'', {'X-Auth-Token': token,
- 'X-Container-Read': swift_test_perm[2]})
+ 'X-Container-Read': tf.swift_test_perm[2]})
return check_response(conn)
resp = retry(post)
resp.read()
@@ -745,7 +917,7 @@ class TestObject(unittest.TestCase):
self.assertEqual(resp.read(), ''.join(segments3))
self.assertEqual(resp.status, 200)
- if not skip3:
+ if not tf.skip3:
# Ensure we can't access the manifest with the third account
# (because the segments are in a protected container even if the
@@ -763,7 +935,7 @@ class TestObject(unittest.TestCase):
def post(url, token, parsed, conn):
conn.request('POST', '%s/%s' % (parsed.path, acontainer),
'', {'X-Auth-Token': token,
- 'X-Container-Read': swift_test_perm[2]})
+ 'X-Container-Read': tf.swift_test_perm[2]})
return check_response(conn)
resp = retry(post)
resp.read()
@@ -831,7 +1003,7 @@ class TestObject(unittest.TestCase):
self.assertEqual(resp.status, 204)
def test_delete_content_type(self):
- if skip:
+ if tf.skip:
raise SkipTest
def put(url, token, parsed, conn):
@@ -853,7 +1025,7 @@ class TestObject(unittest.TestCase):
'text/html; charset=UTF-8')
def test_delete_if_delete_at_bad(self):
- if skip:
+ if tf.skip:
raise SkipTest
def put(url, token, parsed, conn):
@@ -875,7 +1047,7 @@ class TestObject(unittest.TestCase):
self.assertEqual(resp.status, 400)
def test_null_name(self):
- if skip:
+ if tf.skip:
raise SkipTest
def put(url, token, parsed, conn):
@@ -884,23 +1056,20 @@ class TestObject(unittest.TestCase):
self.container), 'test', {'X-Auth-Token': token})
return check_response(conn)
resp = retry(put)
- if (web_front_end == 'apache2'):
+ if (tf.web_front_end == 'apache2'):
self.assertEqual(resp.status, 404)
else:
self.assertEqual(resp.read(), 'Invalid UTF8 or contains NULL')
self.assertEqual(resp.status, 412)
def test_cors(self):
- if skip:
+ if tf.skip:
raise SkipTest
- def is_strict_mode(url, token, parsed, conn):
- conn.request('GET', '/info')
- resp = conn.getresponse()
- if resp.status // 100 == 2:
- info = json.loads(resp.read())
- return info.get('swift', {}).get('strict_cors_mode', False)
- return False
+ try:
+ strict_cors = tf.cluster_info['swift']['strict_cors_mode']
+ except KeyError:
+ raise SkipTest("cors mode is unknown")
def put_cors_cont(url, token, parsed, conn, orig):
conn.request(
@@ -924,8 +1093,6 @@ class TestObject(unittest.TestCase):
'', headers)
return conn.getresponse()
- strict_cors = retry(is_strict_mode)
-
resp = retry(put_cors_cont, '*')
resp.read()
self.assertEquals(resp.status // 100, 2)
@@ -1001,6 +1168,64 @@ class TestObject(unittest.TestCase):
self.assertEquals(headers.get('access-control-allow-origin'),
'http://m.com')
+ @requires_policies
+ def test_cross_policy_copy(self):
+ # create container in first policy
+ policy = self.policies.select()
+ container = self._create_container(
+ headers={'X-Storage-Policy': policy['name']})
+ obj = uuid4().hex
+
+ # create a container in second policy
+ other_policy = self.policies.exclude(name=policy['name']).select()
+ other_container = self._create_container(
+ headers={'X-Storage-Policy': other_policy['name']})
+ other_obj = uuid4().hex
+
+ def put_obj(url, token, parsed, conn, container, obj):
+ # to keep track of things, use the original path as the body
+ content = '%s/%s' % (container, obj)
+ path = '%s/%s' % (parsed.path, content)
+ conn.request('PUT', path, content, {'X-Auth-Token': token})
+ return check_response(conn)
+
+ # create objects
+ for c, o in zip((container, other_container), (obj, other_obj)):
+ resp = retry(put_obj, c, o)
+ resp.read()
+ self.assertEqual(resp.status, 201)
+
+ def put_copy_from(url, token, parsed, conn, container, obj, source):
+ dest_path = '%s/%s/%s' % (parsed.path, container, obj)
+ conn.request('PUT', dest_path, '',
+ {'X-Auth-Token': token,
+ 'Content-Length': '0',
+ 'X-Copy-From': source})
+ return check_response(conn)
+
+ copy_requests = (
+ (container, other_obj, '%s/%s' % (other_container, other_obj)),
+ (other_container, obj, '%s/%s' % (container, obj)),
+ )
+
+ # copy objects
+ for c, o, source in copy_requests:
+ resp = retry(put_copy_from, c, o, source)
+ resp.read()
+ self.assertEqual(resp.status, 201)
+
+ def get_obj(url, token, parsed, conn, container, obj):
+ path = '%s/%s/%s' % (parsed.path, container, obj)
+ conn.request('GET', path, '', {'X-Auth-Token': token})
+ return check_response(conn)
+
+ # validate contents, contents should be source
+ validate_requests = copy_requests
+ for c, o, body in validate_requests:
+ resp = retry(get_obj, c, o)
+ self.assertEqual(resp.status, 200)
+ self.assertEqual(body, resp.read())
+
if __name__ == '__main__':
unittest.main()
diff --git a/test/functional/tests.py b/test/functional/tests.py
index b8633b0..daa8897 100644
--- a/test/functional/tests.py
+++ b/test/functional/tests.py
@@ -14,10 +14,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-# Modifications by Red Hat, Inc.
-
from datetime import datetime
-import os
import hashlib
import hmac
import json
@@ -25,72 +22,25 @@ import locale
import random
import StringIO
import time
-import threading
+import os
import unittest
import urllib
import uuid
+from copy import deepcopy
+import eventlet
from nose import SkipTest
-from ConfigParser import ConfigParser
+from swift.common.http import is_success, is_client_error
-from test import get_config
+from test.functional import normalized_urls, load_constraint, cluster_info
+from test.functional import check_response, retry
+import test.functional as tf
from test.functional.swift_test_client import Account, Connection, File, \
ResponseError
-from swift.common.constraints import MAX_FILE_SIZE, MAX_META_NAME_LENGTH, \
- MAX_META_VALUE_LENGTH, MAX_META_COUNT, MAX_META_OVERALL_SIZE, \
- MAX_OBJECT_NAME_LENGTH, CONTAINER_LISTING_LIMIT, ACCOUNT_LISTING_LIMIT, \
- MAX_ACCOUNT_NAME_LENGTH, MAX_CONTAINER_NAME_LENGTH, MAX_HEADER_SIZE
from gluster.swift.common.constraints import \
set_object_name_component_length, get_object_name_component_length
-default_constraints = dict((
- ('max_file_size', MAX_FILE_SIZE),
- ('max_meta_name_length', MAX_META_NAME_LENGTH),
- ('max_meta_value_length', MAX_META_VALUE_LENGTH),
- ('max_meta_count', MAX_META_COUNT),
- ('max_meta_overall_size', MAX_META_OVERALL_SIZE),
- ('max_object_name_length', MAX_OBJECT_NAME_LENGTH),
- ('container_listing_limit', CONTAINER_LISTING_LIMIT),
- ('account_listing_limit', ACCOUNT_LISTING_LIMIT),
- ('max_account_name_length', MAX_ACCOUNT_NAME_LENGTH),
- ('max_container_name_length', MAX_CONTAINER_NAME_LENGTH),
- ('max_header_size', MAX_HEADER_SIZE)))
-constraints_conf = ConfigParser()
-conf_exists = constraints_conf.read('/etc/swift/swift.conf')
-# Constraints are set first from the test config, then from
-# /etc/swift/swift.conf if it exists. If swift.conf doesn't exist,
-# then limit test coverage. This allows SAIO tests to work fine but
-# requires remote functional testing to know something about the cluster
-# that is being tested.
-config = get_config('func_test')
-for k in default_constraints:
- if k in config:
- # prefer what's in test.conf
- config[k] = int(config[k])
- elif conf_exists:
- # swift.conf exists, so use what's defined there (or swift defaults)
- # This normally happens when the test is running locally to the cluster
- # as in a SAIO.
- config[k] = default_constraints[k]
- else:
- # .functests don't know what the constraints of the tested cluster are,
- # so the tests can't reliably pass or fail. Therefore, skip those
- # tests.
- config[k] = '%s constraint is not defined' % k
-
-web_front_end = config.get('web_front_end', 'integral')
-normalized_urls = config.get('normalized_urls', False)
set_object_name_component_length()
-
-def load_constraint(name):
- c = config[name]
- if not isinstance(c, int):
- raise SkipTest(c)
- return c
-
-locale.setlocale(locale.LC_COLLATE, config.get('collate', 'C'))
-
-
def create_limit_filename(name_limit):
"""
Convert a split a large object name with
@@ -116,42 +66,6 @@ def create_limit_filename(name_limit):
return "".join(filename_list)
-def chunks(s, length=3):
- i, j = 0, length
- while i < len(s):
- yield s[i:j]
- i, j = j, j + length
-
-
-def timeout(seconds, method, *args, **kwargs):
- class TimeoutThread(threading.Thread):
- def __init__(self, method, *args, **kwargs):
- threading.Thread.__init__(self)
-
- self.method = method
- self.args = args
- self.kwargs = kwargs
- self.exception = None
-
- def run(self):
- try:
- self.method(*self.args, **self.kwargs)
- except Exception as e:
- self.exception = e
-
- t = TimeoutThread(method, *args, **kwargs)
- t.start()
- t.join(seconds)
-
- if t.exception:
- raise t.exception
-
- if t.isAlive():
- t._Thread__stop()
- return True
- return False
-
-
class Utils(object):
@classmethod
def create_ascii_name(cls, length=None):
@@ -207,10 +121,10 @@ class Base2(object):
class TestAccountEnv(object):
@classmethod
def setUp(cls):
- cls.conn = Connection(config)
+ cls.conn = Connection(tf.config)
cls.conn.authenticate()
- cls.account = Account(cls.conn, config.get('account',
- config['username']))
+ cls.account = Account(cls.conn, tf.config.get('account',
+ tf.config['username']))
cls.account.delete_containers()
cls.containers = []
@@ -386,6 +300,28 @@ class TestAccount(Base):
self.assertEqual(sorted(containers, cmp=locale.strcoll),
containers)
+ def testQuotedWWWAuthenticateHeader(self):
+ # check that the www-authenticate header value with the swift realm
+ # is correctly quoted.
+ conn = Connection(tf.config)
+ conn.authenticate()
+ inserted_html = '<b>Hello World'
+ hax = 'AUTH_haxx"\nContent-Length: %d\n\n%s' % (len(inserted_html),
+ inserted_html)
+ quoted_hax = urllib.quote(hax)
+ conn.connection.request('GET', '/v1/' + quoted_hax, None, {})
+ resp = conn.connection.getresponse()
+ resp_headers = dict(resp.getheaders())
+ self.assertTrue('www-authenticate' in resp_headers,
+ 'www-authenticate not found in %s' % resp_headers)
+ actual = resp_headers['www-authenticate']
+ expected = 'Swift realm="%s"' % quoted_hax
+ # other middleware e.g. auth_token may also set www-authenticate
+ # headers in which case actual values will be a comma separated list.
+ # check that expected value is among the actual values
+ self.assertTrue(expected in actual,
+ '%s not found in %s' % (expected, actual))
+
class TestAccountUTF8(Base2, TestAccount):
set_up = False
@@ -394,10 +330,10 @@ class TestAccountUTF8(Base2, TestAccount):
class TestAccountNoContainersEnv(object):
@classmethod
def setUp(cls):
- cls.conn = Connection(config)
+ cls.conn = Connection(tf.config)
cls.conn.authenticate()
- cls.account = Account(cls.conn, config.get('account',
- config['username']))
+ cls.account = Account(cls.conn, tf.config.get('account',
+ tf.config['username']))
cls.account.delete_containers()
@@ -423,10 +359,10 @@ class TestAccountNoContainersUTF8(Base2, TestAccountNoContainers):
class TestContainerEnv(object):
@classmethod
def setUp(cls):
- cls.conn = Connection(config)
+ cls.conn = Connection(tf.config)
cls.conn.authenticate()
- cls.account = Account(cls.conn, config.get('account',
- config['username']))
+ cls.account = Account(cls.conn, tf.config.get('account',
+ tf.config['username']))
cls.account.delete_containers()
cls.container = cls.account.container(Utils.create_name())
@@ -714,11 +650,11 @@ class TestContainerUTF8(Base2, TestContainer):
class TestContainerPathsEnv(object):
@classmethod
def setUp(cls):
- raise SkipTest('Objects ending in / are not supported')
- cls.conn = Connection(config)
+ raise SkipTest('Objects ending in / are not supported')
+ cls.conn = Connection(tf.config)
cls.conn.authenticate()
- cls.account = Account(cls.conn, config.get('account',
- config['username']))
+ cls.account = Account(cls.conn, tf.config.get('account',
+ tf.config['username']))
cls.account.delete_containers()
cls.file_size = 8
@@ -894,11 +830,24 @@ class TestContainerPaths(Base):
class TestFileEnv(object):
@classmethod
def setUp(cls):
- cls.conn = Connection(config)
+ cls.conn = Connection(tf.config)
cls.conn.authenticate()
- cls.account = Account(cls.conn, config.get('account',
- config['username']))
+ cls.account = Account(cls.conn, tf.config.get('account',
+ tf.config['username']))
+ # creating another account and connection
+ # for account to account copy tests
+ config2 = deepcopy(tf.config)
+ config2['account'] = tf.config['account2']
+ config2['username'] = tf.config['username2']
+ config2['password'] = tf.config['password2']
+ cls.conn2 = Connection(config2)
+ cls.conn2.authenticate()
+
+ cls.account = Account(cls.conn, tf.config.get('account',
+ tf.config['username']))
cls.account.delete_containers()
+ cls.account2 = cls.conn2.get_account()
+ cls.account2.delete_containers()
cls.container = cls.account.container(Utils.create_name())
if not cls.container.create():
@@ -952,6 +901,62 @@ class TestFile(Base):
self.assert_(file_item.initialize())
self.assert_(metadata == file_item.metadata)
+ def testCopyAccount(self):
+ # makes sure to test encoded characters
+ source_filename = 'dealde%2Fl04 011e%204c8df/flash.png'
+ file_item = self.env.container.file(source_filename)
+
+ metadata = {Utils.create_ascii_name(): Utils.create_name()}
+
+ data = file_item.write_random()
+ file_item.sync_metadata(metadata)
+
+ dest_cont = self.env.account.container(Utils.create_name())
+ self.assert_(dest_cont.create())
+
+ acct = self.env.conn.account_name
+ # copy both from within and across containers
+ for cont in (self.env.container, dest_cont):
+ # copy both with and without initial slash
+ for prefix in ('', '/'):
+ dest_filename = Utils.create_name()
+
+ file_item = self.env.container.file(source_filename)
+ file_item.copy_account(acct,
+ '%s%s' % (prefix, cont),
+ dest_filename)
+
+ self.assert_(dest_filename in cont.files())
+
+ file_item = cont.file(dest_filename)
+
+ self.assert_(data == file_item.read())
+ self.assert_(file_item.initialize())
+ self.assert_(metadata == file_item.metadata)
+
+ dest_cont = self.env.account2.container(Utils.create_name())
+ self.assert_(dest_cont.create(hdrs={
+ 'X-Container-Write': self.env.conn.user_acl
+ }))
+
+ acct = self.env.conn2.account_name
+ # copy both with and without initial slash
+ for prefix in ('', '/'):
+ dest_filename = Utils.create_name()
+
+ file_item = self.env.container.file(source_filename)
+ file_item.copy_account(acct,
+ '%s%s' % (prefix, dest_cont),
+ dest_filename)
+
+ self.assert_(dest_filename in dest_cont.files())
+
+ file_item = dest_cont.file(dest_filename)
+
+ self.assert_(data == file_item.read())
+ self.assert_(file_item.initialize())
+ self.assert_(metadata == file_item.metadata)
+
def testCopy404s(self):
source_filename = Utils.create_name()
file_item = self.env.container.file(source_filename)
@@ -990,6 +995,77 @@ class TestFile(Base):
'%s%s' % (prefix, Utils.create_name()),
Utils.create_name()))
+ def testCopyAccount404s(self):
+ acct = self.env.conn.account_name
+ acct2 = self.env.conn2.account_name
+ source_filename = Utils.create_name()
+ file_item = self.env.container.file(source_filename)
+ file_item.write_random()
+
+ dest_cont = self.env.account.container(Utils.create_name())
+ self.assert_(dest_cont.create(hdrs={
+ 'X-Container-Read': self.env.conn2.user_acl
+ }))
+ dest_cont2 = self.env.account2.container(Utils.create_name())
+ self.assert_(dest_cont2.create(hdrs={
+ 'X-Container-Write': self.env.conn.user_acl,
+ 'X-Container-Read': self.env.conn.user_acl
+ }))
+
+ for acct, cont in ((acct, dest_cont), (acct2, dest_cont2)):
+ for prefix in ('', '/'):
+ # invalid source container
+ source_cont = self.env.account.container(Utils.create_name())
+ file_item = source_cont.file(source_filename)
+ self.assert_(not file_item.copy_account(
+ acct,
+ '%s%s' % (prefix, self.env.container),
+ Utils.create_name()))
+ if acct == acct2:
+ # there is no such source container
+ # and foreign user can have no permission to read it
+ self.assert_status(403)
+ else:
+ self.assert_status(404)
+
+ self.assert_(not file_item.copy_account(
+ acct,
+ '%s%s' % (prefix, cont),
+ Utils.create_name()))
+ self.assert_status(404)
+
+ # invalid source object
+ file_item = self.env.container.file(Utils.create_name())
+ self.assert_(not file_item.copy_account(
+ acct,
+ '%s%s' % (prefix, self.env.container),
+ Utils.create_name()))
+ if acct == acct2:
+ # there is no such object
+ # and foreign user can have no permission to read it
+ self.assert_status(403)
+ else:
+ self.assert_status(404)
+
+ self.assert_(not file_item.copy_account(
+ acct,
+ '%s%s' % (prefix, cont),
+ Utils.create_name()))
+ self.assert_status(404)
+
+ # invalid destination container
+ file_item = self.env.container.file(source_filename)
+ self.assert_(not file_item.copy_account(
+ acct,
+ '%s%s' % (prefix, Utils.create_name()),
+ Utils.create_name()))
+ if acct == acct2:
+ # there is no such destination container
+ # and foreign user can have no permission to write there
+ self.assert_status(403)
+ else:
+ self.assert_status(404)
+
def testCopyNoDestinationHeader(self):
source_filename = Utils.create_name()
file_item = self.env.container.file(source_filename)
@@ -1044,6 +1120,49 @@ class TestFile(Base):
self.assert_(file_item.initialize())
self.assert_(metadata == file_item.metadata)
+ def testCopyFromAccountHeader(self):
+ acct = self.env.conn.account_name
+ src_cont = self.env.account.container(Utils.create_name())
+ self.assert_(src_cont.create(hdrs={
+ 'X-Container-Read': self.env.conn2.user_acl
+ }))
+ source_filename = Utils.create_name()
+ file_item = src_cont.file(source_filename)
+
+ metadata = {}
+ for i in range(1):
+ metadata[Utils.create_ascii_name()] = Utils.create_name()
+ file_item.metadata = metadata
+
+ data = file_item.write_random()
+
+ dest_cont = self.env.account.container(Utils.create_name())
+ self.assert_(dest_cont.create())
+ dest_cont2 = self.env.account2.container(Utils.create_name())
+ self.assert_(dest_cont2.create(hdrs={
+ 'X-Container-Write': self.env.conn.user_acl
+ }))
+
+ for cont in (src_cont, dest_cont, dest_cont2):
+ # copy both with and without initial slash
+ for prefix in ('', '/'):
+ dest_filename = Utils.create_name()
+
+ file_item = cont.file(dest_filename)
+ file_item.write(hdrs={'X-Copy-From-Account': acct,
+ 'X-Copy-From': '%s%s/%s' % (
+ prefix,
+ src_cont.name,
+ source_filename)})
+
+ self.assert_(dest_filename in cont.files())
+
+ file_item = cont.file(dest_filename)
+
+ self.assert_(data == file_item.read())
+ self.assert_(file_item.initialize())
+ self.assert_(metadata == file_item.metadata)
+
def testCopyFromHeader404s(self):
source_filename = Utils.create_name()
file_item = self.env.container.file(source_filename)
@@ -1075,6 +1194,52 @@ class TestFile(Base):
self.env.container.name, source_filename)})
self.assert_status(404)
+ def testCopyFromAccountHeader404s(self):
+ acct = self.env.conn2.account_name
+ src_cont = self.env.account2.container(Utils.create_name())
+ self.assert_(src_cont.create(hdrs={
+ 'X-Container-Read': self.env.conn.user_acl
+ }))
+ source_filename = Utils.create_name()
+ file_item = src_cont.file(source_filename)
+ file_item.write_random()
+ dest_cont = self.env.account.container(Utils.create_name())
+ self.assert_(dest_cont.create())
+
+ for prefix in ('', '/'):
+ # invalid source container
+ file_item = dest_cont.file(Utils.create_name())
+ self.assertRaises(ResponseError, file_item.write,
+ hdrs={'X-Copy-From-Account': acct,
+ 'X-Copy-From': '%s%s/%s' %
+ (prefix,
+ Utils.create_name(),
+ source_filename)})
+ # looks like cached responses leak "not found"
+ # to un-authorized users, not going to fix it now, but...
+ self.assert_status([403, 404])
+
+ # invalid source object
+ file_item = self.env.container.file(Utils.create_name())
+ self.assertRaises(ResponseError, file_item.write,
+ hdrs={'X-Copy-From-Account': acct,
+ 'X-Copy-From': '%s%s/%s' %
+ (prefix,
+ src_cont,
+ Utils.create_name())})
+ self.assert_status(404)
+
+ # invalid destination container
+ dest_cont = self.env.account.container(Utils.create_name())
+ file_item = dest_cont.file(Utils.create_name())
+ self.assertRaises(ResponseError, file_item.write,
+ hdrs={'X-Copy-From-Account': acct,
+ 'X-Copy-From': '%s%s/%s' %
+ (prefix,
+ src_cont,
+ source_filename)})
+ self.assert_status(404)
+
def testNameLimit(self):
limit = load_constraint('max_object_name_length')
@@ -1191,7 +1356,12 @@ class TestFile(Base):
self.assertEqual(file_types, file_types_read)
def testRangedGets(self):
- file_length = 10000
+ # We set the file_length to a strange multiple here. This is to check
+ # that ranges still work in the EC case when the requested range
+ # spans EC segment boundaries. The 1 MiB base value is chosen because
+ # that's a common EC segment size. The 1.33 multiple is to ensure we
+ # aren't aligned on segment boundaries
+ file_length = int(1048576 * 1.33)
range_size = file_length / 10
file_item = self.env.container.file(Utils.create_name())
data = file_item.write_random(file_length)
@@ -1254,6 +1424,15 @@ class TestFile(Base):
limit = load_constraint('max_file_size')
tsecs = 3
+ def timeout(seconds, method, *args, **kwargs):
+ try:
+ with eventlet.Timeout(seconds):
+ method(*args, **kwargs)
+ except eventlet.Timeout:
+ return True
+ else:
+ return False
+
for i in (limit - 100, limit - 10, limit - 1, limit, limit + 1,
limit + 10, limit + 100):
@@ -1295,6 +1474,16 @@ class TestFile(Base):
cfg={'no_content_length': True})
self.assert_status(400)
+ # no content-length
+ self.assertRaises(ResponseError, file_item.write_random, file_length,
+ cfg={'no_content_length': True})
+ self.assert_status(411)
+
+ self.assertRaises(ResponseError, file_item.write_random, file_length,
+ hdrs={'transfer-encoding': 'gzip,chunked'},
+ cfg={'no_content_length': True})
+ self.assert_status(501)
+
# bad request types
#for req in ('LICK', 'GETorHEAD_base', 'container_info',
# 'best_response'):
@@ -1565,8 +1754,16 @@ class TestFile(Base):
self.assertEqual(etag, header_etag)
def testChunkedPut(self):
- if (web_front_end == 'apache2'):
- raise SkipTest()
+ if (tf.web_front_end == 'apache2'):
+ raise SkipTest("Chunked PUT can only be tested with apache2 web"
+ " front end")
+
+ def chunks(s, length=3):
+ i, j = 0, length
+ while i < len(s):
+ yield s[i:j]
+ i, j = j, j + length
+
data = File.random_data(10000)
etag = File.compute_md5sum(data)
@@ -1590,10 +1787,10 @@ class TestFileUTF8(Base2, TestFile):
class TestDloEnv(object):
@classmethod
def setUp(cls):
- cls.conn = Connection(config)
+ cls.conn = Connection(tf.config)
cls.conn.authenticate()
- cls.account = Account(cls.conn, config.get('account',
- config['username']))
+ cls.account = Account(cls.conn, tf.config.get('account',
+ tf.config['username']))
cls.account.delete_containers()
cls.container = cls.account.container(Utils.create_name())
@@ -1657,6 +1854,9 @@ class TestDlo(Base):
file_item = self.env.container.file('man1')
file_contents = file_item.read(parms={'multipart-manifest': 'get'})
self.assertEqual(file_contents, "man1-contents")
+ self.assertEqual(file_item.info()['x_object_manifest'],
+ "%s/%s/seg_lower" %
+ (self.env.container.name, self.env.segment_prefix))
def test_get_range(self):
file_item = self.env.container.file('man1')
@@ -1691,9 +1891,38 @@ class TestDlo(Base):
self.assertEqual(
file_contents,
"aaaaaaaaaabbbbbbbbbbccccccccccddddddddddeeeeeeeeeeffffffffff")
+ # The copied object must not have X-Object-Manifest
+ self.assertTrue("x_object_manifest" not in file_item.info())
+
+ def test_copy_account(self):
+ # dlo use same account and same container only
+ acct = self.env.conn.account_name
+ # Adding a new segment, copying the manifest, and then deleting the
+ # segment proves that the new object is really the concatenated
+ # segments and not just a manifest.
+ f_segment = self.env.container.file("%s/seg_lowerf" %
+ (self.env.segment_prefix))
+ f_segment.write('ffffffffff')
+ try:
+ man1_item = self.env.container.file('man1')
+ man1_item.copy_account(acct,
+ self.env.container.name,
+ "copied-man1")
+ finally:
+ # try not to leave this around for other tests to stumble over
+ f_segment.delete()
+
+ file_item = self.env.container.file('copied-man1')
+ file_contents = file_item.read()
+ self.assertEqual(
+ file_contents,
+ "aaaaaaaaaabbbbbbbbbbccccccccccddddddddddeeeeeeeeeeffffffffff")
+ # The copied object must not have X-Object-Manifest
+ self.assertTrue("x_object_manifest" not in file_item.info())
def test_copy_manifest(self):
- # Copying the manifest should result in another manifest
+ # Copying the manifest with multipart-manifest=get query string
+ # should result in another manifest
try:
man1_item = self.env.container.file('man1')
man1_item.copy(self.env.container.name, "copied-man1",
@@ -1707,10 +1936,57 @@ class TestDlo(Base):
self.assertEqual(
copied_contents,
"aaaaaaaaaabbbbbbbbbbccccccccccddddddddddeeeeeeeeee")
+ self.assertEqual(man1_item.info()['x_object_manifest'],
+ copied.info()['x_object_manifest'])
finally:
# try not to leave this around for other tests to stumble over
self.env.container.file("copied-man1").delete()
+ def test_dlo_if_match_get(self):
+ manifest = self.env.container.file("man1")
+ etag = manifest.info()['etag']
+
+ self.assertRaises(ResponseError, manifest.read,
+ hdrs={'If-Match': 'not-%s' % etag})
+ self.assert_status(412)
+
+ manifest.read(hdrs={'If-Match': etag})
+ self.assert_status(200)
+
+ def test_dlo_if_none_match_get(self):
+ manifest = self.env.container.file("man1")
+ etag = manifest.info()['etag']
+
+ self.assertRaises(ResponseError, manifest.read,
+ hdrs={'If-None-Match': etag})
+ self.assert_status(304)
+
+ manifest.read(hdrs={'If-None-Match': "not-%s" % etag})
+ self.assert_status(200)
+
+ def test_dlo_if_match_head(self):
+ manifest = self.env.container.file("man1")
+ etag = manifest.info()['etag']
+
+ self.assertRaises(ResponseError, manifest.info,
+ hdrs={'If-Match': 'not-%s' % etag})
+ self.assert_status(412)
+
+ manifest.info(hdrs={'If-Match': etag})
+ self.assert_status(200)
+
+ def test_dlo_if_none_match_head(self):
+ manifest = self.env.container.file("man1")
+ etag = manifest.info()['etag']
+
+ self.assertRaises(ResponseError, manifest.info,
+ hdrs={'If-None-Match': etag})
+ self.assert_status(304)
+
+ manifest.info(hdrs={'If-None-Match': "not-%s" % etag})
+ self.assert_status(200)
+
+
class TestDloUTF8(Base2, TestDlo):
set_up = False
@@ -1718,10 +1994,10 @@ class TestDloUTF8(Base2, TestDlo):
class TestFileComparisonEnv(object):
@classmethod
def setUp(cls):
- cls.conn = Connection(config)
+ cls.conn = Connection(tf.config)
cls.conn.authenticate()
- cls.account = Account(cls.conn, config.get('account',
- config['username']))
+ cls.account = Account(cls.conn, tf.config.get('account',
+ tf.config['username']))
cls.account.delete_containers()
cls.container = cls.account.container(Utils.create_name())
@@ -1773,19 +2049,25 @@ class TestFileComparison(Base):
for file_item in self.env.files:
hdrs = {'If-Modified-Since': self.env.time_old_f1}
self.assert_(file_item.read(hdrs=hdrs))
+ self.assert_(file_item.info(hdrs=hdrs))
hdrs = {'If-Modified-Since': self.env.time_new}
self.assertRaises(ResponseError, file_item.read, hdrs=hdrs)
self.assert_status(304)
+ self.assertRaises(ResponseError, file_item.info, hdrs=hdrs)
+ self.assert_status(304)
def testIfUnmodifiedSince(self):
for file_item in self.env.files:
hdrs = {'If-Unmodified-Since': self.env.time_new}
self.assert_(file_item.read(hdrs=hdrs))
+ self.assert_(file_item.info(hdrs=hdrs))
hdrs = {'If-Unmodified-Since': self.env.time_old_f2}
self.assertRaises(ResponseError, file_item.read, hdrs=hdrs)
self.assert_status(412)
+ self.assertRaises(ResponseError, file_item.info, hdrs=hdrs)
+ self.assert_status(412)
def testIfMatchAndUnmodified(self):
for file_item in self.env.files:
@@ -1835,17 +2117,24 @@ class TestSloEnv(object):
@classmethod
def setUp(cls):
- cls.conn = Connection(config)
+ cls.conn = Connection(tf.config)
cls.conn.authenticate()
+ config2 = deepcopy(tf.config)
+ config2['account'] = tf.config['account2']
+ config2['username'] = tf.config['username2']
+ config2['password'] = tf.config['password2']
+ cls.conn2 = Connection(config2)
+ cls.conn2.authenticate()
+ cls.account2 = cls.conn2.get_account()
+ cls.account2.delete_containers()
if cls.slo_enabled is None:
- cluster_info = cls.conn.cluster_info()
cls.slo_enabled = 'slo' in cluster_info
if not cls.slo_enabled:
return
- cls.account = Account(cls.conn, config.get('account',
- config['username']))
+ cls.account = Account(cls.conn, tf.config.get('account',
+ tf.config['username']))
cls.account.delete_containers()
cls.container = cls.account.container(Utils.create_name())
@@ -1911,7 +2200,7 @@ class TestSlo(Base):
set_up = False
def setUp(self):
- raise SkipTest("SLO not enabled yet in gluster-swift")
+ raise SkipTest("SLO not enabled yet in gluster-swift")
super(TestSlo, self).setUp()
if self.env.slo_enabled is False:
raise SkipTest("SLO not enabled")
@@ -2021,6 +2310,29 @@ class TestSlo(Base):
copied_contents = copied.read(parms={'multipart-manifest': 'get'})
self.assertEqual(4 * 1024 * 1024 + 1, len(copied_contents))
+ def test_slo_copy_account(self):
+ acct = self.env.conn.account_name
+ # same account copy
+ file_item = self.env.container.file("manifest-abcde")
+ file_item.copy_account(acct, self.env.container.name, "copied-abcde")
+
+ copied = self.env.container.file("copied-abcde")
+ copied_contents = copied.read(parms={'multipart-manifest': 'get'})
+ self.assertEqual(4 * 1024 * 1024 + 1, len(copied_contents))
+
+ # copy to different account
+ acct = self.env.conn2.account_name
+ dest_cont = self.env.account2.container(Utils.create_name())
+ self.assert_(dest_cont.create(hdrs={
+ 'X-Container-Write': self.env.conn.user_acl
+ }))
+ file_item = self.env.container.file("manifest-abcde")
+ file_item.copy_account(acct, dest_cont, "copied-abcde")
+
+ copied = dest_cont.file("copied-abcde")
+ copied_contents = copied.read(parms={'multipart-manifest': 'get'})
+ self.assertEqual(4 * 1024 * 1024 + 1, len(copied_contents))
+
def test_slo_copy_the_manifest(self):
file_item = self.env.container.file("manifest-abcde")
file_item.copy(self.env.container.name, "copied-abcde-manifest-only",
@@ -2033,6 +2345,40 @@ class TestSlo(Base):
except ValueError:
self.fail("COPY didn't copy the manifest (invalid json on GET)")
+ def test_slo_copy_the_manifest_account(self):
+ acct = self.env.conn.account_name
+ # same account
+ file_item = self.env.container.file("manifest-abcde")
+ file_item.copy_account(acct,
+ self.env.container.name,
+ "copied-abcde-manifest-only",
+ parms={'multipart-manifest': 'get'})
+
+ copied = self.env.container.file("copied-abcde-manifest-only")
+ copied_contents = copied.read(parms={'multipart-manifest': 'get'})
+ try:
+ json.loads(copied_contents)
+ except ValueError:
+ self.fail("COPY didn't copy the manifest (invalid json on GET)")
+
+ # different account
+ acct = self.env.conn2.account_name
+ dest_cont = self.env.account2.container(Utils.create_name())
+ self.assert_(dest_cont.create(hdrs={
+ 'X-Container-Write': self.env.conn.user_acl
+ }))
+ file_item.copy_account(acct,
+ dest_cont,
+ "copied-abcde-manifest-only",
+ parms={'multipart-manifest': 'get'})
+
+ copied = dest_cont.file("copied-abcde-manifest-only")
+ copied_contents = copied.read(parms={'multipart-manifest': 'get'})
+ try:
+ json.loads(copied_contents)
+ except ValueError:
+ self.fail("COPY didn't copy the manifest (invalid json on GET)")
+
def test_slo_get_the_manifest(self):
manifest = self.env.container.file("manifest-abcde")
got_body = manifest.read(parms={'multipart-manifest': 'get'})
@@ -2051,6 +2397,50 @@ class TestSlo(Base):
self.assertEqual('application/json; charset=utf-8',
got_info['content_type'])
+ def test_slo_if_match_get(self):
+ manifest = self.env.container.file("manifest-abcde")
+ etag = manifest.info()['etag']
+
+ self.assertRaises(ResponseError, manifest.read,
+ hdrs={'If-Match': 'not-%s' % etag})
+ self.assert_status(412)
+
+ manifest.read(hdrs={'If-Match': etag})
+ self.assert_status(200)
+
+ def test_slo_if_none_match_get(self):
+ manifest = self.env.container.file("manifest-abcde")
+ etag = manifest.info()['etag']
+
+ self.assertRaises(ResponseError, manifest.read,
+ hdrs={'If-None-Match': etag})
+ self.assert_status(304)
+
+ manifest.read(hdrs={'If-None-Match': "not-%s" % etag})
+ self.assert_status(200)
+
+ def test_slo_if_match_head(self):
+ manifest = self.env.container.file("manifest-abcde")
+ etag = manifest.info()['etag']
+
+ self.assertRaises(ResponseError, manifest.info,
+ hdrs={'If-Match': 'not-%s' % etag})
+ self.assert_status(412)
+
+ manifest.info(hdrs={'If-Match': etag})
+ self.assert_status(200)
+
+ def test_slo_if_none_match_head(self):
+ manifest = self.env.container.file("manifest-abcde")
+ etag = manifest.info()['etag']
+
+ self.assertRaises(ResponseError, manifest.info,
+ hdrs={'If-None-Match': etag})
+ self.assert_status(304)
+
+ manifest.info(hdrs={'If-None-Match': "not-%s" % etag})
+ self.assert_status(200)
+
class TestSloUTF8(Base2, TestSlo):
set_up = False
@@ -2061,11 +2451,19 @@ class TestObjectVersioningEnv(object):
@classmethod
def setUp(cls):
- cls.conn = Connection(config)
+ cls.conn = Connection(tf.config)
cls.conn.authenticate()
- cls.account = Account(cls.conn, config.get('account',
- config['username']))
+ cls.account = Account(cls.conn, tf.config.get('account',
+ tf.config['username']))
+
+ # Second connection for ACL tests
+ config2 = deepcopy(tf.config)
+ config2['account'] = tf.config['account2']
+ config2['username'] = tf.config['username2']
+ config2['password'] = tf.config['password2']
+ cls.conn2 = Connection(config2)
+ cls.conn2.authenticate()
# avoid getting a prefix that stops halfway through an encoded
# character
@@ -2085,6 +2483,69 @@ class TestObjectVersioningEnv(object):
cls.versioning_enabled = 'versions' in container_info
+class TestCrossPolicyObjectVersioningEnv(object):
+ # tri-state: None initially, then True/False
+ versioning_enabled = None
+ multiple_policies_enabled = None
+ policies = None
+
+ @classmethod
+ def setUp(cls):
+ cls.conn = Connection(tf.config)
+ cls.conn.authenticate()
+
+ if cls.multiple_policies_enabled is None:
+ try:
+ cls.policies = tf.FunctionalStoragePolicyCollection.from_info()
+ except AssertionError:
+ pass
+
+ if cls.policies and len(cls.policies) > 1:
+ cls.multiple_policies_enabled = True
+ else:
+ cls.multiple_policies_enabled = False
+ # We have to lie here that versioning is enabled. We actually
+ # don't know, but it does not matter. We know these tests cannot
+ # run without multiple policies present. If multiple policies are
+ # present, we won't be setting this field to any value, so it
+ # should all still work.
+ cls.versioning_enabled = True
+ return
+
+ policy = cls.policies.select()
+ version_policy = cls.policies.exclude(name=policy['name']).select()
+
+ cls.account = Account(cls.conn, tf.config.get('account',
+ tf.config['username']))
+
+ # Second connection for ACL tests
+ config2 = deepcopy(tf.config)
+ config2['account'] = tf.config['account2']
+ config2['username'] = tf.config['username2']
+ config2['password'] = tf.config['password2']
+ cls.conn2 = Connection(config2)
+ cls.conn2.authenticate()
+
+ # avoid getting a prefix that stops halfway through an encoded
+ # character
+ prefix = Utils.create_name().decode("utf-8")[:10].encode("utf-8")
+
+ cls.versions_container = cls.account.container(prefix + "-versions")
+ if not cls.versions_container.create(
+ {'X-Storage-Policy': policy['name']}):
+ raise ResponseError(cls.conn.response)
+
+ cls.container = cls.account.container(prefix + "-objs")
+ if not cls.container.create(
+ hdrs={'X-Versions-Location': cls.versions_container.name,
+ 'X-Storage-Policy': version_policy['name']}):
+ raise ResponseError(cls.conn.response)
+
+ container_info = cls.container.info()
+ # if versioning is off, then X-Versions-Location won't persist
+ cls.versioning_enabled = 'versions' in container_info
+
+
class TestObjectVersioning(Base):
env = TestObjectVersioningEnv
set_up = False
@@ -2099,6 +2560,15 @@ class TestObjectVersioning(Base):
"Expected versioning_enabled to be True/False, got %r" %
(self.env.versioning_enabled,))
+ def tearDown(self):
+ super(TestObjectVersioning, self).tearDown()
+ try:
+ # delete versions first!
+ self.env.versions_container.delete_files()
+ self.env.container.delete_files()
+ except ResponseError:
+ pass
+
def test_overwriting(self):
container = self.env.container
versions_container = self.env.versions_container
@@ -2130,31 +2600,100 @@ class TestObjectVersioning(Base):
versioned_obj.delete()
self.assertRaises(ResponseError, versioned_obj.read)
+ def test_versioning_dlo(self):
+ raise SkipTest('SOF incompatible test')
+ container = self.env.container
+ versions_container = self.env.versions_container
+ obj_name = Utils.create_name()
+
+ for i in ('1', '2', '3'):
+ time.sleep(.01) # guarantee that the timestamp changes
+ obj_name_seg = obj_name + '/' + i
+ versioned_obj = container.file(obj_name_seg)
+ versioned_obj.write(i)
+ versioned_obj.write(i + i)
+
+ self.assertEqual(3, versions_container.info()['object_count'])
+
+ man_file = container.file(obj_name)
+ man_file.write('', hdrs={"X-Object-Manifest": "%s/%s/" %
+ (self.env.container.name, obj_name)})
+
+ # guarantee that the timestamp changes
+ time.sleep(.01)
+
+ # write manifest file again
+ man_file.write('', hdrs={"X-Object-Manifest": "%s/%s/" %
+ (self.env.container.name, obj_name)})
+
+ self.assertEqual(3, versions_container.info()['object_count'])
+ self.assertEqual("112233", man_file.read())
+
+ def test_versioning_check_acl(self):
+ container = self.env.container
+ versions_container = self.env.versions_container
+ versions_container.create(hdrs={'X-Container-Read': '.r:*,.rlistings'})
+
+ obj_name = Utils.create_name()
+ versioned_obj = container.file(obj_name)
+ versioned_obj.write("aaaaa")
+ self.assertEqual("aaaaa", versioned_obj.read())
+
+ versioned_obj.write("bbbbb")
+ self.assertEqual("bbbbb", versioned_obj.read())
+
+ # Use token from second account and try to delete the object
+ org_token = self.env.account.conn.storage_token
+ self.env.account.conn.storage_token = self.env.conn2.storage_token
+ try:
+ self.assertRaises(ResponseError, versioned_obj.delete)
+ finally:
+ self.env.account.conn.storage_token = org_token
+
+ # Verify with token from first account
+ self.assertEqual("bbbbb", versioned_obj.read())
+
+ versioned_obj.delete()
+ self.assertEqual("aaaaa", versioned_obj.read())
+
class TestObjectVersioningUTF8(Base2, TestObjectVersioning):
set_up = False
+class TestCrossPolicyObjectVersioning(TestObjectVersioning):
+ env = TestCrossPolicyObjectVersioningEnv
+ set_up = False
+
+ def setUp(self):
+ super(TestCrossPolicyObjectVersioning, self).setUp()
+ if self.env.multiple_policies_enabled is False:
+ raise SkipTest('Cross policy test requires multiple policies')
+ elif self.env.multiple_policies_enabled is not True:
+ # just some sanity checking
+ raise Exception("Expected multiple_policies_enabled "
+ "to be True/False, got %r" % (
+ self.env.versioning_enabled,))
+
+
class TestTempurlEnv(object):
tempurl_enabled = None # tri-state: None initially, then True/False
@classmethod
def setUp(cls):
- cls.conn = Connection(config)
+ cls.conn = Connection(tf.config)
cls.conn.authenticate()
if cls.tempurl_enabled is None:
- cluster_info = cls.conn.cluster_info()
cls.tempurl_enabled = 'tempurl' in cluster_info
if not cls.tempurl_enabled:
return
- cls.tempurl_methods = cluster_info['tempurl']['methods']
cls.tempurl_key = Utils.create_name()
cls.tempurl_key2 = Utils.create_name()
cls.account = Account(
- cls.conn, config.get('account', config['username']))
+ cls.conn, tf.config.get('account', tf.config['username']))
cls.account.delete_containers()
cls.account.update_metadata({
'temp-url-key': cls.tempurl_key,
@@ -2219,6 +2758,59 @@ class TestTempurl(Base):
contents = self.env.obj.read(parms=parms, cfg={'no_auth_token': True})
self.assertEqual(contents, "obj contents")
+ def test_GET_DLO_inside_container(self):
+ seg1 = self.env.container.file(
+ "get-dlo-inside-seg1" + Utils.create_name())
+ seg2 = self.env.container.file(
+ "get-dlo-inside-seg2" + Utils.create_name())
+ seg1.write("one fish two fish ")
+ seg2.write("red fish blue fish")
+
+ manifest = self.env.container.file("manifest" + Utils.create_name())
+ manifest.write(
+ '',
+ hdrs={"X-Object-Manifest": "%s/get-dlo-inside-seg" %
+ (self.env.container.name,)})
+
+ expires = int(time.time()) + 86400
+ sig = self.tempurl_sig(
+ 'GET', expires, self.env.conn.make_path(manifest.path),
+ self.env.tempurl_key)
+ parms = {'temp_url_sig': sig,
+ 'temp_url_expires': str(expires)}
+
+ contents = manifest.read(parms=parms, cfg={'no_auth_token': True})
+ self.assertEqual(contents, "one fish two fish red fish blue fish")
+
+ def test_GET_DLO_outside_container(self):
+ seg1 = self.env.container.file(
+ "get-dlo-outside-seg1" + Utils.create_name())
+ seg2 = self.env.container.file(
+ "get-dlo-outside-seg2" + Utils.create_name())
+ seg1.write("one fish two fish ")
+ seg2.write("red fish blue fish")
+
+ container2 = self.env.account.container(Utils.create_name())
+ container2.create()
+
+ manifest = container2.file("manifest" + Utils.create_name())
+ manifest.write(
+ '',
+ hdrs={"X-Object-Manifest": "%s/get-dlo-outside-seg" %
+ (self.env.container.name,)})
+
+ expires = int(time.time()) + 86400
+ sig = self.tempurl_sig(
+ 'GET', expires, self.env.conn.make_path(manifest.path),
+ self.env.tempurl_key)
+ parms = {'temp_url_sig': sig,
+ 'temp_url_expires': str(expires)}
+
+ # cross container tempurl works fine for account tempurl key
+ contents = manifest.read(parms=parms, cfg={'no_auth_token': True})
+ self.assertEqual(contents, "one fish two fish red fish blue fish")
+ self.assert_status([200])
+
def test_PUT(self):
new_obj = self.env.container.file(Utils.create_name())
@@ -2237,6 +2829,42 @@ class TestTempurl(Base):
self.assert_(new_obj.info(parms=put_parms,
cfg={'no_auth_token': True}))
+ def test_PUT_manifest_access(self):
+ new_obj = self.env.container.file(Utils.create_name())
+
+ # give out a signature which allows a PUT to new_obj
+ expires = int(time.time()) + 86400
+ sig = self.tempurl_sig(
+ 'PUT', expires, self.env.conn.make_path(new_obj.path),
+ self.env.tempurl_key)
+ put_parms = {'temp_url_sig': sig,
+ 'temp_url_expires': str(expires)}
+
+ # try to create manifest pointing to some random container
+ try:
+ new_obj.write('', {
+ 'x-object-manifest': '%s/foo' % 'some_random_container'
+ }, parms=put_parms, cfg={'no_auth_token': True})
+ except ResponseError as e:
+ self.assertEqual(e.status, 400)
+ else:
+ self.fail('request did not error')
+
+ # create some other container
+ other_container = self.env.account.container(Utils.create_name())
+ if not other_container.create():
+ raise ResponseError(self.conn.response)
+
+ # try to create manifest pointing to new container
+ try:
+ new_obj.write('', {
+ 'x-object-manifest': '%s/foo' % other_container
+ }, parms=put_parms, cfg={'no_auth_token': True})
+ except ResponseError as e:
+ self.assertEqual(e.status, 400)
+ else:
+ self.fail('request did not error')
+
def test_HEAD(self):
expires = int(time.time()) + 86400
sig = self.tempurl_sig(
@@ -2310,22 +2938,288 @@ class TestTempurlUTF8(Base2, TestTempurl):
set_up = False
+class TestContainerTempurlEnv(object):
+ tempurl_enabled = None # tri-state: None initially, then True/False
+
+ @classmethod
+ def setUp(cls):
+ cls.conn = Connection(tf.config)
+ cls.conn.authenticate()
+
+ if cls.tempurl_enabled is None:
+ cls.tempurl_enabled = 'tempurl' in cluster_info
+ if not cls.tempurl_enabled:
+ return
+
+ cls.tempurl_key = Utils.create_name()
+ cls.tempurl_key2 = Utils.create_name()
+
+ cls.account = Account(
+ cls.conn, tf.config.get('account', tf.config['username']))
+ cls.account.delete_containers()
+
+ # creating another account and connection
+ # for ACL tests
+ config2 = deepcopy(tf.config)
+ config2['account'] = tf.config['account2']
+ config2['username'] = tf.config['username2']
+ config2['password'] = tf.config['password2']
+ cls.conn2 = Connection(config2)
+ cls.conn2.authenticate()
+ cls.account2 = Account(
+ cls.conn2, config2.get('account', config2['username']))
+ cls.account2 = cls.conn2.get_account()
+
+ cls.container = cls.account.container(Utils.create_name())
+ if not cls.container.create({
+ 'x-container-meta-temp-url-key': cls.tempurl_key,
+ 'x-container-meta-temp-url-key-2': cls.tempurl_key2,
+ 'x-container-read': cls.account2.name}):
+ raise ResponseError(cls.conn.response)
+
+ cls.obj = cls.container.file(Utils.create_name())
+ cls.obj.write("obj contents")
+ cls.other_obj = cls.container.file(Utils.create_name())
+ cls.other_obj.write("other obj contents")
+
+
+class TestContainerTempurl(Base):
+ env = TestContainerTempurlEnv
+ set_up = False
+
+ def setUp(self):
+ super(TestContainerTempurl, self).setUp()
+ if self.env.tempurl_enabled is False:
+ raise SkipTest("TempURL not enabled")
+ elif self.env.tempurl_enabled is not True:
+ # just some sanity checking
+ raise Exception(
+ "Expected tempurl_enabled to be True/False, got %r" %
+ (self.env.tempurl_enabled,))
+
+ expires = int(time.time()) + 86400
+ sig = self.tempurl_sig(
+ 'GET', expires, self.env.conn.make_path(self.env.obj.path),
+ self.env.tempurl_key)
+ self.obj_tempurl_parms = {'temp_url_sig': sig,
+ 'temp_url_expires': str(expires)}
+
+ def tempurl_sig(self, method, expires, path, key):
+ return hmac.new(
+ key,
+ '%s\n%s\n%s' % (method, expires, urllib.unquote(path)),
+ hashlib.sha1).hexdigest()
+
+ def test_GET(self):
+ contents = self.env.obj.read(
+ parms=self.obj_tempurl_parms,
+ cfg={'no_auth_token': True})
+ self.assertEqual(contents, "obj contents")
+
+ # GET tempurls also allow HEAD requests
+ self.assert_(self.env.obj.info(parms=self.obj_tempurl_parms,
+ cfg={'no_auth_token': True}))
+
+ def test_GET_with_key_2(self):
+ expires = int(time.time()) + 86400
+ sig = self.tempurl_sig(
+ 'GET', expires, self.env.conn.make_path(self.env.obj.path),
+ self.env.tempurl_key2)
+ parms = {'temp_url_sig': sig,
+ 'temp_url_expires': str(expires)}
+
+ contents = self.env.obj.read(parms=parms, cfg={'no_auth_token': True})
+ self.assertEqual(contents, "obj contents")
+
+ def test_PUT(self):
+ new_obj = self.env.container.file(Utils.create_name())
+
+ expires = int(time.time()) + 86400
+ sig = self.tempurl_sig(
+ 'PUT', expires, self.env.conn.make_path(new_obj.path),
+ self.env.tempurl_key)
+ put_parms = {'temp_url_sig': sig,
+ 'temp_url_expires': str(expires)}
+
+ new_obj.write('new obj contents',
+ parms=put_parms, cfg={'no_auth_token': True})
+ self.assertEqual(new_obj.read(), "new obj contents")
+
+ # PUT tempurls also allow HEAD requests
+ self.assert_(new_obj.info(parms=put_parms,
+ cfg={'no_auth_token': True}))
+
+ def test_HEAD(self):
+ expires = int(time.time()) + 86400
+ sig = self.tempurl_sig(
+ 'HEAD', expires, self.env.conn.make_path(self.env.obj.path),
+ self.env.tempurl_key)
+ head_parms = {'temp_url_sig': sig,
+ 'temp_url_expires': str(expires)}
+
+ self.assert_(self.env.obj.info(parms=head_parms,
+ cfg={'no_auth_token': True}))
+ # HEAD tempurls don't allow PUT or GET requests, despite the fact that
+ # PUT and GET tempurls both allow HEAD requests
+ self.assertRaises(ResponseError, self.env.other_obj.read,
+ cfg={'no_auth_token': True},
+ parms=self.obj_tempurl_parms)
+ self.assert_status([401])
+
+ self.assertRaises(ResponseError, self.env.other_obj.write,
+ 'new contents',
+ cfg={'no_auth_token': True},
+ parms=self.obj_tempurl_parms)
+ self.assert_status([401])
+
+ def test_different_object(self):
+ contents = self.env.obj.read(
+ parms=self.obj_tempurl_parms,
+ cfg={'no_auth_token': True})
+ self.assertEqual(contents, "obj contents")
+
+ self.assertRaises(ResponseError, self.env.other_obj.read,
+ cfg={'no_auth_token': True},
+ parms=self.obj_tempurl_parms)
+ self.assert_status([401])
+
+ def test_changing_sig(self):
+ contents = self.env.obj.read(
+ parms=self.obj_tempurl_parms,
+ cfg={'no_auth_token': True})
+ self.assertEqual(contents, "obj contents")
+
+ parms = self.obj_tempurl_parms.copy()
+ if parms['temp_url_sig'][0] == 'a':
+ parms['temp_url_sig'] = 'b' + parms['temp_url_sig'][1:]
+ else:
+ parms['temp_url_sig'] = 'a' + parms['temp_url_sig'][1:]
+
+ self.assertRaises(ResponseError, self.env.obj.read,
+ cfg={'no_auth_token': True},
+ parms=parms)
+ self.assert_status([401])
+
+ def test_changing_expires(self):
+ contents = self.env.obj.read(
+ parms=self.obj_tempurl_parms,
+ cfg={'no_auth_token': True})
+ self.assertEqual(contents, "obj contents")
+
+ parms = self.obj_tempurl_parms.copy()
+ if parms['temp_url_expires'][-1] == '0':
+ parms['temp_url_expires'] = parms['temp_url_expires'][:-1] + '1'
+ else:
+ parms['temp_url_expires'] = parms['temp_url_expires'][:-1] + '0'
+
+ self.assertRaises(ResponseError, self.env.obj.read,
+ cfg={'no_auth_token': True},
+ parms=parms)
+ self.assert_status([401])
+
+ def test_tempurl_keys_visible_to_account_owner(self):
+ if not tf.cluster_info.get('tempauth'):
+ raise SkipTest('TEMP AUTH SPECIFIC TEST')
+ metadata = self.env.container.info()
+ self.assertEqual(metadata.get('tempurl_key'), self.env.tempurl_key)
+ self.assertEqual(metadata.get('tempurl_key2'), self.env.tempurl_key2)
+
+ def test_tempurl_keys_hidden_from_acl_readonly(self):
+ if not tf.cluster_info.get('tempauth'):
+ raise SkipTest('TEMP AUTH SPECIFIC TEST')
+ original_token = self.env.container.conn.storage_token
+ self.env.container.conn.storage_token = self.env.conn2.storage_token
+ metadata = self.env.container.info()
+ self.env.container.conn.storage_token = original_token
+
+ self.assertTrue('tempurl_key' not in metadata,
+ 'Container TempURL key found, should not be visible '
+ 'to readonly ACLs')
+ self.assertTrue('tempurl_key2' not in metadata,
+ 'Container TempURL key-2 found, should not be visible '
+ 'to readonly ACLs')
+
+ def test_GET_DLO_inside_container(self):
+ seg1 = self.env.container.file(
+ "get-dlo-inside-seg1" + Utils.create_name())
+ seg2 = self.env.container.file(
+ "get-dlo-inside-seg2" + Utils.create_name())
+ seg1.write("one fish two fish ")
+ seg2.write("red fish blue fish")
+
+ manifest = self.env.container.file("manifest" + Utils.create_name())
+ manifest.write(
+ '',
+ hdrs={"X-Object-Manifest": "%s/get-dlo-inside-seg" %
+ (self.env.container.name,)})
+
+ expires = int(time.time()) + 86400
+ sig = self.tempurl_sig(
+ 'GET', expires, self.env.conn.make_path(manifest.path),
+ self.env.tempurl_key)
+ parms = {'temp_url_sig': sig,
+ 'temp_url_expires': str(expires)}
+
+ contents = manifest.read(parms=parms, cfg={'no_auth_token': True})
+ self.assertEqual(contents, "one fish two fish red fish blue fish")
+
+ def test_GET_DLO_outside_container(self):
+ container2 = self.env.account.container(Utils.create_name())
+ container2.create()
+ seg1 = container2.file(
+ "get-dlo-outside-seg1" + Utils.create_name())
+ seg2 = container2.file(
+ "get-dlo-outside-seg2" + Utils.create_name())
+ seg1.write("one fish two fish ")
+ seg2.write("red fish blue fish")
+
+ manifest = self.env.container.file("manifest" + Utils.create_name())
+ manifest.write(
+ '',
+ hdrs={"X-Object-Manifest": "%s/get-dlo-outside-seg" %
+ (container2.name,)})
+
+ expires = int(time.time()) + 86400
+ sig = self.tempurl_sig(
+ 'GET', expires, self.env.conn.make_path(manifest.path),
+ self.env.tempurl_key)
+ parms = {'temp_url_sig': sig,
+ 'temp_url_expires': str(expires)}
+
+ # cross container tempurl does not work for container tempurl key
+ try:
+ manifest.read(parms=parms, cfg={'no_auth_token': True})
+ except ResponseError as e:
+ self.assertEqual(e.status, 401)
+ else:
+ self.fail('request did not error')
+ try:
+ manifest.info(parms=parms, cfg={'no_auth_token': True})
+ except ResponseError as e:
+ self.assertEqual(e.status, 401)
+ else:
+ self.fail('request did not error')
+
+
+class TestContainerTempurlUTF8(Base2, TestContainerTempurl):
+ set_up = False
+
+
class TestSloTempurlEnv(object):
enabled = None # tri-state: None initially, then True/False
@classmethod
def setUp(cls):
- cls.conn = Connection(config)
+ cls.conn = Connection(tf.config)
cls.conn.authenticate()
if cls.enabled is None:
- cluster_info = cls.conn.cluster_info()
cls.enabled = 'tempurl' in cluster_info and 'slo' in cluster_info
cls.tempurl_key = Utils.create_name()
cls.account = Account(
- cls.conn, config.get('account', config['username']))
+ cls.conn, tf.config.get('account', tf.config['username']))
cls.account.delete_containers()
cls.account.update_metadata({'temp-url-key': cls.tempurl_key})
@@ -2398,5 +3292,174 @@ class TestSloTempurlUTF8(Base2, TestSloTempurl):
set_up = False
+class TestServiceToken(unittest.TestCase):
+
+ def setUp(self):
+ if tf.skip_service_tokens:
+ raise SkipTest
+
+ self.SET_TO_USERS_TOKEN = 1
+ self.SET_TO_SERVICE_TOKEN = 2
+
+ # keystoneauth and tempauth differ in allowing PUT account
+ # Even if keystoneauth allows it, the proxy-server uses
+ # allow_account_management to decide if accounts can be created
+ self.put_account_expect = is_client_error
+ if tf.swift_test_auth_version != '1':
+ if cluster_info.get('swift').get('allow_account_management'):
+ self.put_account_expect = is_success
+
+ def _scenario_generator(self):
+ paths = ((None, None), ('c', None), ('c', 'o'))
+ for path in paths:
+ for method in ('PUT', 'POST', 'HEAD', 'GET', 'OPTIONS'):
+ yield method, path[0], path[1]
+ for path in reversed(paths):
+ yield 'DELETE', path[0], path[1]
+
+ def _assert_is_authed_response(self, method, container, object, resp):
+ resp.read()
+ expect = is_success
+ if method == 'DELETE' and not container:
+ expect = is_client_error
+ if method == 'PUT' and not container:
+ expect = self.put_account_expect
+ self.assertTrue(expect(resp.status), 'Unexpected %s for %s %s %s'
+ % (resp.status, method, container, object))
+
+ def _assert_not_authed_response(self, method, container, object, resp):
+ resp.read()
+ expect = is_client_error
+ if method == 'OPTIONS':
+ expect = is_success
+ self.assertTrue(expect(resp.status), 'Unexpected %s for %s %s %s'
+ % (resp.status, method, container, object))
+
+ def prepare_request(self, method, use_service_account=False,
+ container=None, obj=None, body=None, headers=None,
+ x_auth_token=None,
+ x_service_token=None, dbg=False):
+ """
+ Setup for making the request
+
+ When retry() calls the do_request() function, it calls it the
+ test user's token, the parsed path, a connection and (optionally)
+ a token from the test service user. We save options here so that
+ do_request() can make the appropriate request.
+
+ :param method: The operation (e.g'. 'HEAD')
+ :param use_service_account: Optional. Set True to change the path to
+ be the service account
+ :param container: Optional. Adds a container name to the path
+ :param obj: Optional. Adds an object name to the path
+ :param body: Optional. Adds a body (string) in the request
+ :param headers: Optional. Adds additional headers.
+ :param x_auth_token: Optional. Default is SET_TO_USERS_TOKEN. One of:
+ SET_TO_USERS_TOKEN Put the test user's token in
+ X-Auth-Token
+ SET_TO_SERVICE_TOKEN Put the service token in X-Auth-Token
+ :param x_service_token: Optional. Default is to not set X-Service-Token
+ to any value. If specified, is one of following:
+ SET_TO_USERS_TOKEN Put the test user's token in
+ X-Service-Token
+ SET_TO_SERVICE_TOKEN Put the service token in
+ X-Service-Token
+ :param dbg: Optional. Set true to check request arguments
+ """
+ self.method = method
+ self.use_service_account = use_service_account
+ self.container = container
+ self.obj = obj
+ self.body = body
+ self.headers = headers
+ if x_auth_token:
+ self.x_auth_token = x_auth_token
+ else:
+ self.x_auth_token = self.SET_TO_USERS_TOKEN
+ self.x_service_token = x_service_token
+ self.dbg = dbg
+
+ def do_request(self, url, token, parsed, conn, service_token=''):
+ if self.use_service_account:
+ path = self._service_account(parsed.path)
+ else:
+ path = parsed.path
+ if self.container:
+ path += '/%s' % self.container
+ if self.obj:
+ path += '/%s' % self.obj
+ headers = {}
+ if self.body:
+ headers.update({'Content-Length': len(self.body)})
+ if self.headers:
+ headers.update(self.headers)
+ if self.x_auth_token == self.SET_TO_USERS_TOKEN:
+ headers.update({'X-Auth-Token': token})
+ elif self.x_auth_token == self.SET_TO_SERVICE_TOKEN:
+ headers.update({'X-Auth-Token': service_token})
+ if self.x_service_token == self.SET_TO_USERS_TOKEN:
+ headers.update({'X-Service-Token': token})
+ elif self.x_service_token == self.SET_TO_SERVICE_TOKEN:
+ headers.update({'X-Service-Token': service_token})
+ if self.dbg:
+ print('DEBUG: conn.request: method:%s path:%s'
+ ' body:%s headers:%s' % (self.method, path, self.body,
+ headers))
+ conn.request(self.method, path, self.body, headers=headers)
+ return check_response(conn)
+
+ def _service_account(self, path):
+ parts = path.split('/', 3)
+ account = parts[2]
+ try:
+ project_id = account[account.index('_') + 1:]
+ except ValueError:
+ project_id = account
+ parts[2] = '%s%s' % (tf.swift_test_service_prefix, project_id)
+ return '/'.join(parts)
+
+ def test_user_access_own_auth_account(self):
+ # This covers ground tested elsewhere (tests a user doing HEAD
+ # on own account). However, if this fails, none of the remaining
+ # tests will work
+ self.prepare_request('HEAD')
+ resp = retry(self.do_request)
+ resp.read()
+ self.assert_(resp.status in (200, 204), resp.status)
+
+ def test_user_cannot_access_service_account(self):
+ for method, container, obj in self._scenario_generator():
+ self.prepare_request(method, use_service_account=True,
+ container=container, obj=obj)
+ resp = retry(self.do_request)
+ self._assert_not_authed_response(method, container, obj, resp)
+
+ def test_service_user_denied_with_x_auth_token(self):
+ for method, container, obj in self._scenario_generator():
+ self.prepare_request(method, use_service_account=True,
+ container=container, obj=obj,
+ x_auth_token=self.SET_TO_SERVICE_TOKEN)
+ resp = retry(self.do_request, service_user=5)
+ self._assert_not_authed_response(method, container, obj, resp)
+
+ def test_service_user_denied_with_x_service_token(self):
+ for method, container, obj in self._scenario_generator():
+ self.prepare_request(method, use_service_account=True,
+ container=container, obj=obj,
+ x_auth_token=self.SET_TO_SERVICE_TOKEN,
+ x_service_token=self.SET_TO_SERVICE_TOKEN)
+ resp = retry(self.do_request, service_user=5)
+ self._assert_not_authed_response(method, container, obj, resp)
+
+ def test_user_plus_service_can_access_service_account(self):
+ for method, container, obj in self._scenario_generator():
+ self.prepare_request(method, use_service_account=True,
+ container=container, obj=obj,
+ x_auth_token=self.SET_TO_USERS_TOKEN,
+ x_service_token=self.SET_TO_SERVICE_TOKEN)
+ resp = retry(self.do_request, service_user=5)
+ self._assert_is_authed_response(method, container, obj, resp)
+
+
if __name__ == '__main__':
unittest.main()
diff --git a/test/functional_auth/common_conf/object-server.conf b/test/functional_auth/common_conf/object-server.conf
index 2599c87..28076cd 100644
--- a/test/functional_auth/common_conf/object-server.conf
+++ b/test/functional_auth/common_conf/object-server.conf
@@ -51,3 +51,5 @@ network_chunk_size = 65556
# across all conf files!
auto_create_account_prefix = gs
expiring_objects_account_name = expiring
+
+gluster_swift_mode = true
diff --git a/test/unit/__init__.py b/test/unit/__init__.py
index a1bfef8..372fb58 100644
--- a/test/unit/__init__.py
+++ b/test/unit/__init__.py
@@ -20,71 +20,288 @@ import copy
import logging
import errno
import sys
-from contextlib import contextmanager
-from collections import defaultdict
+from contextlib import contextmanager, closing
+from collections import defaultdict, Iterable
+import itertools
+from numbers import Number
from tempfile import NamedTemporaryFile
import time
+import eventlet
from eventlet.green import socket
from tempfile import mkdtemp
from shutil import rmtree
+from swift.common.utils import Timestamp
from test import get_config
-from swift.common.utils import config_true_value, LogAdapter
+from swift.common import swob, utils
+from swift.common.ring import Ring, RingData
from hashlib import md5
-from eventlet import sleep, Timeout
import logging.handlers
from httplib import HTTPException
-from numbers import Number
+from swift.common import storage_policy
+from swift.common.storage_policy import StoragePolicy, ECStoragePolicy
+import functools
+import cPickle as pickle
+from gzip import GzipFile
+import mock as mocklib
+import inspect
+
+EMPTY_ETAG = md5().hexdigest()
+
+# try not to import this module from swift
+if not os.path.basename(sys.argv[0]).startswith('swift'):
+ # never patch HASH_PATH_SUFFIX AGAIN!
+ utils.HASH_PATH_SUFFIX = 'endcap'
+
+
+def patch_policies(thing_or_policies=None, legacy_only=False,
+ with_ec_default=False, fake_ring_args=None):
+ if isinstance(thing_or_policies, (
+ Iterable, storage_policy.StoragePolicyCollection)):
+ return PatchPolicies(thing_or_policies, fake_ring_args=fake_ring_args)
+
+ if legacy_only:
+ default_policies = [
+ StoragePolicy(0, name='legacy', is_default=True),
+ ]
+ default_ring_args = [{}]
+ elif with_ec_default:
+ default_policies = [
+ ECStoragePolicy(0, name='ec', is_default=True,
+ ec_type='jerasure_rs_vand', ec_ndata=10,
+ ec_nparity=4, ec_segment_size=4096),
+ StoragePolicy(1, name='unu'),
+ ]
+ default_ring_args = [{'replicas': 14}, {}]
+ else:
+ default_policies = [
+ StoragePolicy(0, name='nulo', is_default=True),
+ StoragePolicy(1, name='unu'),
+ ]
+ default_ring_args = [{}, {}]
+
+ fake_ring_args = fake_ring_args or default_ring_args
+ decorator = PatchPolicies(default_policies, fake_ring_args=fake_ring_args)
+
+ if not thing_or_policies:
+ return decorator
+ else:
+ # it's a thing, we return the wrapped thing instead of the decorator
+ return decorator(thing_or_policies)
+
+
+class PatchPolicies(object):
+ """
+ Why not mock.patch? In my case, when used as a decorator on the class it
+ seemed to patch setUp at the wrong time (i.e. in setup the global wasn't
+ patched yet)
+ """
+
+ def __init__(self, policies, fake_ring_args=None):
+ if isinstance(policies, storage_policy.StoragePolicyCollection):
+ self.policies = policies
+ else:
+ self.policies = storage_policy.StoragePolicyCollection(policies)
+ self.fake_ring_args = fake_ring_args or [None] * len(self.policies)
+
+ def _setup_rings(self):
+ """
+ Our tests tend to use the policies rings like their own personal
+ playground - which can be a problem in the particular case of a
+ patched TestCase class where the FakeRing objects are scoped in the
+ call to the patch_policies wrapper outside of the TestCase instance
+ which can lead to some bled state.
+
+ To help tests get better isolation without having to think about it,
+ here we're capturing the args required to *build* a new FakeRing
+ instances so we can ensure each test method gets a clean ring setup.
+
+ The TestCase can always "tweak" these fresh rings in setUp - or if
+ they'd prefer to get the same "reset" behavior with custom FakeRing's
+ they can pass in their own fake_ring_args to patch_policies instead of
+ setting the object_ring on the policy definitions.
+ """
+ for policy, fake_ring_arg in zip(self.policies, self.fake_ring_args):
+ if fake_ring_arg is not None:
+ policy.object_ring = FakeRing(**fake_ring_arg)
+
+ def __call__(self, thing):
+ if isinstance(thing, type):
+ return self._patch_class(thing)
+ else:
+ return self._patch_method(thing)
+
+ def _patch_class(self, cls):
+ """
+ Creating a new class that inherits from decorated class is the more
+ common way I've seen class decorators done - but it seems to cause
+ infinite recursion when super is called from inside methods in the
+ decorated class.
+ """
+
+ orig_setUp = cls.setUp
+ orig_tearDown = cls.tearDown
+
+ def setUp(cls_self):
+ self._orig_POLICIES = storage_policy._POLICIES
+ if not getattr(cls_self, '_policies_patched', False):
+ storage_policy._POLICIES = self.policies
+ self._setup_rings()
+ cls_self._policies_patched = True
+
+ orig_setUp(cls_self)
+
+ def tearDown(cls_self):
+ orig_tearDown(cls_self)
+ storage_policy._POLICIES = self._orig_POLICIES
+
+ cls.setUp = setUp
+ cls.tearDown = tearDown
+
+ return cls
+
+ def _patch_method(self, f):
+ @functools.wraps(f)
+ def mywrapper(*args, **kwargs):
+ self._orig_POLICIES = storage_policy._POLICIES
+ try:
+ storage_policy._POLICIES = self.policies
+ self._setup_rings()
+ return f(*args, **kwargs)
+ finally:
+ storage_policy._POLICIES = self._orig_POLICIES
+ return mywrapper
+
+ def __enter__(self):
+ self._orig_POLICIES = storage_policy._POLICIES
+ storage_policy._POLICIES = self.policies
+ def __exit__(self, *args):
+ storage_policy._POLICIES = self._orig_POLICIES
-class FakeRing(object):
- def __init__(self, replicas=3, max_more_nodes=0):
+class FakeRing(Ring):
+
+ def __init__(self, replicas=3, max_more_nodes=0, part_power=0,
+ base_port=1000):
+ """
+ :param part_power: make part calculation based on the path
+
+ If you set a part_power when you setup your FakeRing the parts you get
+ out of ring methods will actually be based on the path - otherwise we
+ exercise the real ring code, but ignore the result and return 1.
+ """
+ self._base_port = base_port
+ self.max_more_nodes = max_more_nodes
+ self._part_shift = 32 - part_power
# 9 total nodes (6 more past the initial 3) is the cap, no matter if
# this is set higher, or R^2 for R replicas
- self.replicas = replicas
- self.max_more_nodes = max_more_nodes
- self.devs = {}
+ self.set_replicas(replicas)
+ self._reload()
+
+ def _reload(self):
+ self._rtime = time.time()
def set_replicas(self, replicas):
self.replicas = replicas
- self.devs = {}
+ self._devs = []
+ for x in range(self.replicas):
+ ip = '10.0.0.%s' % x
+ port = self._base_port + x
+ self._devs.append({
+ 'ip': ip,
+ 'replication_ip': ip,
+ 'port': port,
+ 'replication_port': port,
+ 'device': 'sd' + (chr(ord('a') + x)),
+ 'zone': x % 3,
+ 'region': x % 2,
+ 'id': x,
+ })
@property
def replica_count(self):
return self.replicas
- def get_part(self, account, container=None, obj=None):
- return 1
-
- def get_nodes(self, account, container=None, obj=None):
- devs = []
- for x in xrange(self.replicas):
- devs.append(self.devs.get(x))
- if devs[x] is None:
- self.devs[x] = devs[x] = \
- {'ip': '10.0.0.%s' % x,
- 'port': 1000 + x,
- 'device': 'sd' + (chr(ord('a') + x)),
- 'zone': x % 3,
- 'region': x % 2,
- 'id': x}
- return 1, devs
-
- def get_part_nodes(self, part):
- return self.get_nodes('blah')[1]
+ def _get_part_nodes(self, part):
+ return [dict(node, index=i) for i, node in enumerate(list(self._devs))]
def get_more_nodes(self, part):
# replicas^2 is the true cap
for x in xrange(self.replicas, min(self.replicas + self.max_more_nodes,
self.replicas * self.replicas)):
yield {'ip': '10.0.0.%s' % x,
- 'port': 1000 + x,
+ 'replication_ip': '10.0.0.%s' % x,
+ 'port': self._base_port + x,
+ 'replication_port': self._base_port + x,
'device': 'sda',
'zone': x % 3,
'region': x % 2,
'id': x}
+def write_fake_ring(path, *devs):
+ """
+ Pretty much just a two node, two replica, 2 part power ring...
+ """
+ dev1 = {'id': 0, 'zone': 0, 'device': 'sda1', 'ip': '127.0.0.1',
+ 'port': 6000}
+ dev2 = {'id': 0, 'zone': 0, 'device': 'sdb1', 'ip': '127.0.0.1',
+ 'port': 6000}
+
+ dev1_updates, dev2_updates = devs or ({}, {})
+
+ dev1.update(dev1_updates)
+ dev2.update(dev2_updates)
+
+ replica2part2dev_id = [[0, 1, 0, 1], [1, 0, 1, 0]]
+ devs = [dev1, dev2]
+ part_shift = 30
+ with closing(GzipFile(path, 'wb')) as f:
+ pickle.dump(RingData(replica2part2dev_id, devs, part_shift), f)
+
+
+class FabricatedRing(Ring):
+ """
+ When a FakeRing just won't do - you can fabricate one to meet
+ your tests needs.
+ """
+
+ def __init__(self, replicas=6, devices=8, nodes=4, port=6000,
+ part_power=4):
+ self.devices = devices
+ self.nodes = nodes
+ self.port = port
+ self.replicas = 6
+ self.part_power = part_power
+ self._part_shift = 32 - self.part_power
+ self._reload()
+
+ def _reload(self, *args, **kwargs):
+ self._rtime = time.time() * 2
+ if hasattr(self, '_replica2part2dev_id'):
+ return
+ self._devs = [{
+ 'region': 1,
+ 'zone': 1,
+ 'weight': 1.0,
+ 'id': i,
+ 'device': 'sda%d' % i,
+ 'ip': '10.0.0.%d' % (i % self.nodes),
+ 'replication_ip': '10.0.0.%d' % (i % self.nodes),
+ 'port': self.port,
+ 'replication_port': self.port,
+ } for i in range(self.devices)]
+
+ self._replica2part2dev_id = [
+ [None] * 2 ** self.part_power
+ for i in range(self.replicas)
+ ]
+ dev_ids = itertools.cycle(range(self.devices))
+ for p in range(2 ** self.part_power):
+ for r in range(self.replicas):
+ self._replica2part2dev_id[r][p] = next(dev_ids)
+
+
class FakeMemcache(object):
def __init__(self):
@@ -152,24 +369,13 @@ def tmpfile(content):
xattr_data = {}
-def _get_inode(fd_or_name):
- try:
- if isinstance(fd_or_name, int):
- fd = fd_or_name
- else:
- try:
- fd = fd_or_name.fileno()
- except AttributeError:
- fd = None
- if fd is None:
- ino = os.stat(fd_or_name).st_ino
- else:
- ino = os.fstat(fd).st_ino
- except OSError as err:
- ioerr = IOError()
- ioerr.errno = err.errno
- raise ioerr
- return ino
+def _get_inode(fd):
+ if not isinstance(fd, int):
+ try:
+ fd = fd.fileno()
+ except AttributeError:
+ return os.stat(fd).st_ino
+ return os.fstat(fd).st_ino
def _setxattr(fd, k, v):
@@ -183,9 +389,7 @@ def _getxattr(fd, k):
inode = _get_inode(fd)
data = xattr_data.get(inode, {}).get(k)
if not data:
- e = IOError("Fake IOError")
- e.errno = errno.ENODATA
- raise e
+ raise IOError(errno.ENODATA, "Fake IOError")
return data
import xattr
@@ -214,6 +418,22 @@ def temptree(files, contents=''):
rmtree(tempdir)
+def with_tempdir(f):
+ """
+ Decorator to give a single test a tempdir as argument to test method.
+ """
+ @functools.wraps(f)
+ def wrapped(*args, **kwargs):
+ tempdir = mkdtemp()
+ args = list(args)
+ args.append(tempdir)
+ try:
+ return f(*args, **kwargs)
+ finally:
+ rmtree(tempdir)
+ return wrapped
+
+
class NullLoggingHandler(logging.Handler):
def emit(self, record):
@@ -239,8 +459,8 @@ class UnmockTimeModule(object):
logging.time = UnmockTimeModule()
-class FakeLogger(logging.Logger):
- # a thread safe logger
+class FakeLogger(logging.Logger, object):
+ # a thread safe fake logger
def __init__(self, *args, **kwargs):
self._clear()
@@ -250,42 +470,57 @@ class FakeLogger(logging.Logger):
self.facility = kwargs['facility']
self.statsd_client = None
self.thread_locals = None
+ self.parent = None
+
+ store_in = {
+ logging.ERROR: 'error',
+ logging.WARNING: 'warning',
+ logging.INFO: 'info',
+ logging.DEBUG: 'debug',
+ logging.CRITICAL: 'critical',
+ }
+
+ def _log(self, level, msg, *args, **kwargs):
+ store_name = self.store_in[level]
+ cargs = [msg]
+ if any(args):
+ cargs.extend(args)
+ captured = dict(kwargs)
+ if 'exc_info' in kwargs and \
+ not isinstance(kwargs['exc_info'], tuple):
+ captured['exc_info'] = sys.exc_info()
+ self.log_dict[store_name].append((tuple(cargs), captured))
+ super(FakeLogger, self)._log(level, msg, *args, **kwargs)
def _clear(self):
self.log_dict = defaultdict(list)
- self.lines_dict = defaultdict(list)
-
- def _store_in(store_name):
- def stub_fn(self, *args, **kwargs):
- self.log_dict[store_name].append((args, kwargs))
- return stub_fn
-
- def _store_and_log_in(store_name):
- def stub_fn(self, *args, **kwargs):
- self.log_dict[store_name].append((args, kwargs))
- self._log(store_name, args[0], args[1:], **kwargs)
- return stub_fn
+ self.lines_dict = {'critical': [], 'error': [], 'info': [],
+ 'warning': [], 'debug': []}
def get_lines_for_level(self, level):
+ if level not in self.lines_dict:
+ raise KeyError(
+ "Invalid log level '%s'; valid levels are %s" %
+ (level,
+ ', '.join("'%s'" % lvl for lvl in sorted(self.lines_dict))))
return self.lines_dict[level]
- error = _store_and_log_in('error')
- info = _store_and_log_in('info')
- warning = _store_and_log_in('warning')
- warn = _store_and_log_in('warning')
- debug = _store_and_log_in('debug')
+ def all_log_lines(self):
+ return dict((level, msgs) for level, msgs in self.lines_dict.items()
+ if len(msgs) > 0)
- def exception(self, *args, **kwargs):
- self.log_dict['exception'].append((args, kwargs,
- str(sys.exc_info()[1])))
- print 'FakeLogger Exception: %s' % self.log_dict
+ def _store_in(store_name):
+ def stub_fn(self, *args, **kwargs):
+ self.log_dict[store_name].append((args, kwargs))
+ return stub_fn
# mock out the StatsD logging methods:
+ update_stats = _store_in('update_stats')
increment = _store_in('increment')
decrement = _store_in('decrement')
timing = _store_in('timing')
timing_since = _store_in('timing_since')
- update_stats = _store_in('update_stats')
+ transfer_rate = _store_in('transfer_rate')
set_statsd_prefix = _store_in('set_statsd_prefix')
def get_increments(self):
@@ -328,7 +563,7 @@ class FakeLogger(logging.Logger):
print 'WARNING: unable to format log message %r %% %r' % (
record.msg, record.args)
raise
- self.lines_dict[record.levelno].append(line)
+ self.lines_dict[record.levelname.lower()].append(line)
def handle(self, record):
self._handle(record)
@@ -345,19 +580,40 @@ class DebugLogger(FakeLogger):
def __init__(self, *args, **kwargs):
FakeLogger.__init__(self, *args, **kwargs)
- self.formatter = logging.Formatter("%(server)s: %(message)s")
+ self.formatter = logging.Formatter(
+ "%(server)s %(levelname)s: %(message)s")
def handle(self, record):
self._handle(record)
print self.formatter.format(record)
- def write(self, *args):
- print args
+
+class DebugLogAdapter(utils.LogAdapter):
+
+ def _send_to_logger(name):
+ def stub_fn(self, *args, **kwargs):
+ return getattr(self.logger, name)(*args, **kwargs)
+ return stub_fn
+
+ # delegate to FakeLogger's mocks
+ update_stats = _send_to_logger('update_stats')
+ increment = _send_to_logger('increment')
+ decrement = _send_to_logger('decrement')
+ timing = _send_to_logger('timing')
+ timing_since = _send_to_logger('timing_since')
+ transfer_rate = _send_to_logger('transfer_rate')
+ set_statsd_prefix = _send_to_logger('set_statsd_prefix')
+
+ def __getattribute__(self, name):
+ try:
+ return object.__getattribute__(self, name)
+ except AttributeError:
+ return getattr(self.__dict__['logger'], name)
def debug_logger(name='test'):
"""get a named adapted debug logger"""
- return LogAdapter(DebugLogger(), name)
+ return DebugLogAdapter(DebugLogger(), name)
original_syslog_handler = logging.handlers.SysLogHandler
@@ -374,7 +630,8 @@ def fake_syslog_handler():
logging.handlers.SysLogHandler = FakeLogger
-if config_true_value(get_config('unit_test').get('fake_syslog', 'False')):
+if utils.config_true_value(
+ get_config('unit_test').get('fake_syslog', 'False')):
fake_syslog_handler()
@@ -447,17 +704,66 @@ def mock(update):
delattr(module, attr)
+class SlowBody(object):
+ """
+ This will work with our fake_http_connect, if you hand in these
+ instead of strings it will make reads take longer by the given
+ amount. It should be a little bit easier to extend than the
+ current slow kwarg - which inserts whitespace in the response.
+ Also it should be easy to detect if you have one of these (or a
+ subclass) for the body inside of FakeConn if we wanted to do
+ something smarter than just duck-type the str/buffer api
+ enough to get by.
+ """
+
+ def __init__(self, body, slowness):
+ self.body = body
+ self.slowness = slowness
+
+ def slowdown(self):
+ eventlet.sleep(self.slowness)
+
+ def __getitem__(self, s):
+ return SlowBody(self.body[s], self.slowness)
+
+ def __len__(self):
+ return len(self.body)
+
+ def __radd__(self, other):
+ self.slowdown()
+ return other + self.body
+
+
def fake_http_connect(*code_iter, **kwargs):
class FakeConn(object):
def __init__(self, status, etag=None, body='', timestamp='1',
- expect_status=None, headers=None):
- self.status = status
- if expect_status is None:
- self.expect_status = self.status
+ headers=None, expect_headers=None, connection_id=None,
+ give_send=None):
+ # connect exception
+ if isinstance(status, (Exception, eventlet.Timeout)):
+ raise status
+ if isinstance(status, tuple):
+ self.expect_status = list(status[:-1])
+ self.status = status[-1]
+ self.explicit_expect_list = True
else:
- self.expect_status = expect_status
+ self.expect_status, self.status = ([], status)
+ self.explicit_expect_list = False
+ if not self.expect_status:
+ # when a swift backend service returns a status before reading
+ # from the body (mostly an error response) eventlet.wsgi will
+ # respond with that status line immediately instead of 100
+ # Continue, even if the client sent the Expect 100 header.
+ # BufferedHttp and the proxy both see these error statuses
+ # when they call getexpect, so our FakeConn tries to act like
+ # our backend services and return certain types of responses
+ # as expect statuses just like a real backend server would do.
+ if self.status in (507, 412, 409):
+ self.expect_status = [status]
+ else:
+ self.expect_status = [100, 100]
self.reason = 'Fake'
self.host = '1.2.3.4'
self.port = '1234'
@@ -466,30 +772,41 @@ def fake_http_connect(*code_iter, **kwargs):
self.etag = etag
self.body = body
self.headers = headers or {}
+ self.expect_headers = expect_headers or {}
self.timestamp = timestamp
+ self.connection_id = connection_id
+ self.give_send = give_send
if 'slow' in kwargs and isinstance(kwargs['slow'], list):
try:
self._next_sleep = kwargs['slow'].pop(0)
except IndexError:
self._next_sleep = None
+ # be nice to trixy bits with node_iter's
+ eventlet.sleep()
def getresponse(self):
- if kwargs.get('raise_exc'):
+ if self.expect_status and self.explicit_expect_list:
+ raise Exception('Test did not consume all fake '
+ 'expect status: %r' % (self.expect_status,))
+ if isinstance(self.status, (Exception, eventlet.Timeout)):
+ raise self.status
+ exc = kwargs.get('raise_exc')
+ if exc:
+ if isinstance(exc, (Exception, eventlet.Timeout)):
+ raise exc
raise Exception('test')
if kwargs.get('raise_timeout_exc'):
- raise Timeout()
+ raise eventlet.Timeout()
return self
def getexpect(self):
- if self.expect_status == -2:
- raise HTTPException()
- if self.expect_status == -3:
- return FakeConn(507)
- if self.expect_status == -4:
- return FakeConn(201)
- if self.expect_status == 412:
- return FakeConn(412)
- return FakeConn(100)
+ expect_status = self.expect_status.pop(0)
+ if isinstance(self.expect_status, (Exception, eventlet.Timeout)):
+ raise self.expect_status
+ headers = dict(self.expect_headers)
+ if expect_status == 409:
+ headers['X-Backend-Timestamp'] = self.timestamp
+ return FakeConn(expect_status, headers=headers)
def getheaders(self):
etag = self.etag
@@ -499,19 +816,23 @@ def fake_http_connect(*code_iter, **kwargs):
else:
etag = '"68b329da9893e34099c7d8ad5cb9c940"'
- headers = {'content-length': len(self.body),
- 'content-type': 'x-application/test',
- 'x-timestamp': self.timestamp,
- 'last-modified': self.timestamp,
- 'x-object-meta-test': 'testing',
- 'x-delete-at': '9876543210',
- 'etag': etag,
- 'x-works': 'yes'}
+ headers = swob.HeaderKeyDict({
+ 'content-length': len(self.body),
+ 'content-type': 'x-application/test',
+ 'x-timestamp': self.timestamp,
+ 'x-backend-timestamp': self.timestamp,
+ 'last-modified': self.timestamp,
+ 'x-object-meta-test': 'testing',
+ 'x-delete-at': '9876543210',
+ 'etag': etag,
+ 'x-works': 'yes',
+ })
if self.status // 100 == 2:
headers['x-account-container-count'] = \
kwargs.get('count', 12345)
if not self.timestamp:
- del headers['x-timestamp']
+ # when timestamp is None, HeaderKeyDict raises KeyError
+ headers.pop('x-timestamp', None)
try:
if container_ts_iter.next() is False:
headers['x-container-timestamp'] = '1'
@@ -538,34 +859,45 @@ def fake_http_connect(*code_iter, **kwargs):
if am_slow:
if self.sent < 4:
self.sent += 1
- sleep(value)
+ eventlet.sleep(value)
return ' '
rv = self.body[:amt]
self.body = self.body[amt:]
return rv
def send(self, amt=None):
+ if self.give_send:
+ self.give_send(self.connection_id, amt)
am_slow, value = self.get_slow()
if am_slow:
if self.received < 4:
self.received += 1
- sleep(value)
+ eventlet.sleep(value)
def getheader(self, name, default=None):
- return dict(self.getheaders()).get(name.lower(), default)
+ return swob.HeaderKeyDict(self.getheaders()).get(name, default)
+
+ def close(self):
+ pass
timestamps_iter = iter(kwargs.get('timestamps') or ['1'] * len(code_iter))
etag_iter = iter(kwargs.get('etags') or [None] * len(code_iter))
- if isinstance(kwargs.get('headers'), list):
+ if isinstance(kwargs.get('headers'), (list, tuple)):
headers_iter = iter(kwargs['headers'])
else:
headers_iter = iter([kwargs.get('headers', {})] * len(code_iter))
+ if isinstance(kwargs.get('expect_headers'), (list, tuple)):
+ expect_headers_iter = iter(kwargs['expect_headers'])
+ else:
+ expect_headers_iter = iter([kwargs.get('expect_headers', {})] *
+ len(code_iter))
x = kwargs.get('missing_container', [False] * len(code_iter))
if not isinstance(x, (tuple, list)):
x = [x] * len(code_iter)
container_ts_iter = iter(x)
code_iter = iter(code_iter)
+ conn_id_and_code_iter = enumerate(code_iter)
static_body = kwargs.get('body', None)
body_iter = kwargs.get('body_iter', None)
if body_iter:
@@ -573,21 +905,22 @@ def fake_http_connect(*code_iter, **kwargs):
def connect(*args, **ckwargs):
if kwargs.get('slow_connect', False):
- sleep(0.1)
+ eventlet.sleep(0.1)
if 'give_content_type' in kwargs:
if len(args) >= 7 and 'Content-Type' in args[6]:
kwargs['give_content_type'](args[6]['Content-Type'])
else:
kwargs['give_content_type']('')
+ i, status = conn_id_and_code_iter.next()
if 'give_connect' in kwargs:
- kwargs['give_connect'](*args, **ckwargs)
- status = code_iter.next()
- if isinstance(status, tuple):
- status, expect_status = status
- else:
- expect_status = status
+ give_conn_fn = kwargs['give_connect']
+ argspec = inspect.getargspec(give_conn_fn)
+ if argspec.keywords or 'connection_id' in argspec.args:
+ ckwargs['connection_id'] = i
+ give_conn_fn(*args, **ckwargs)
etag = etag_iter.next()
headers = headers_iter.next()
+ expect_headers = expect_headers_iter.next()
timestamp = timestamps_iter.next()
if status <= 0:
@@ -597,8 +930,39 @@ def fake_http_connect(*code_iter, **kwargs):
else:
body = body_iter.next()
return FakeConn(status, etag, body=body, timestamp=timestamp,
- expect_status=expect_status, headers=headers)
+ headers=headers, expect_headers=expect_headers,
+ connection_id=i, give_send=kwargs.get('give_send'))
connect.code_iter = code_iter
return connect
+
+
+@contextmanager
+def mocked_http_conn(*args, **kwargs):
+ requests = []
+
+ def capture_requests(ip, port, method, path, headers, qs, ssl):
+ req = {
+ 'ip': ip,
+ 'port': port,
+ 'method': method,
+ 'path': path,
+ 'headers': headers,
+ 'qs': qs,
+ 'ssl': ssl,
+ }
+ requests.append(req)
+ kwargs.setdefault('give_connect', capture_requests)
+ fake_conn = fake_http_connect(*args, **kwargs)
+ fake_conn.requests = requests
+ with mocklib.patch('swift.common.bufferedhttp.http_connect_raw',
+ new=fake_conn):
+ yield fake_conn
+ left_over_status = list(fake_conn.code_iter)
+ if left_over_status:
+ raise AssertionError('left over status %r' % left_over_status)
+
+
+def make_timestamp_iter():
+ return iter(Timestamp(t) for t in itertools.count(int(time.time())))
diff --git a/test/unit/obj/test_diskfile.py b/test/unit/obj/test_diskfile.py
index 1fe0904..6e2d0b1 100644
--- a/test/unit/obj/test_diskfile.py
+++ b/test/unit/obj/test_diskfile.py
@@ -26,18 +26,18 @@ from eventlet import tpool
from mock import Mock, patch
from hashlib import md5
from copy import deepcopy
+from contextlib import nested
from gluster.swift.common.exceptions import AlreadyExistsAsDir, \
AlreadyExistsAsFile
-from swift.common.exceptions import DiskFileNotExist, DiskFileError, \
- DiskFileNoSpace, DiskFileNotOpen
+from swift.common.exceptions import DiskFileNoSpace, DiskFileNotOpen, \
+ DiskFileNotExist, DiskFileExpired
from swift.common.utils import ThreadPool
-from gluster.swift.common.exceptions import GlusterFileSystemOSError
import gluster.swift.common.utils
from gluster.swift.common.utils import normalize_timestamp
import gluster.swift.obj.diskfile
-from gluster.swift.obj.diskfile import DiskFileWriter, DiskFile, OnDiskManager
-from gluster.swift.common.utils import DEFAULT_UID, DEFAULT_GID, X_TYPE, \
+from gluster.swift.obj.diskfile import DiskFileWriter, DiskFileManager
+from gluster.swift.common.utils import DEFAULT_UID, DEFAULT_GID, \
X_OBJECT_TYPE, DIR_OBJECT
from test.unit.common.test_utils import _initxattr, _destroyxattr
@@ -136,7 +136,7 @@ class TestDiskFile(unittest.TestCase):
self.td = tempfile.mkdtemp()
self.conf = dict(devices=self.td, mb_per_sync=2,
keep_cache_size=(1024 * 1024), mount_check=False)
- self.mgr = OnDiskManager(self.conf, self.lg)
+ self.mgr = DiskFileManager(self.conf, self.lg)
def tearDown(self):
tpool.execute = self._orig_tpool_exc
@@ -150,7 +150,7 @@ class TestDiskFile(unittest.TestCase):
shutil.rmtree(self.td)
def _get_diskfile(self, d, p, a, c, o, **kwargs):
- return self.mgr.get_diskfile(d, a, c, o, **kwargs)
+ return self.mgr.get_diskfile(d, p, a, c, o, **kwargs)
def test_constructor_no_slash(self):
gdf = self._get_diskfile("vol0", "p57", "ufo47", "bar", "z")
@@ -161,18 +161,16 @@ class TestDiskFile(unittest.TestCase):
assert gdf._gid == DEFAULT_GID
assert gdf._obj == "z"
assert gdf._obj_path == ""
- assert gdf._datadir == os.path.join(self.td, "vol0", "bar"), gdf._datadir
- assert gdf._datadir == gdf._put_datadir
+ assert gdf._put_datadir == os.path.join(self.td, "vol0", "bar"), gdf._put_datadir
assert gdf._data_file == os.path.join(self.td, "vol0", "bar", "z")
assert gdf._is_dir is False
- assert gdf._logger == self.lg
assert gdf._fd is None
def test_constructor_leadtrail_slash(self):
gdf = self._get_diskfile("vol0", "p57", "ufo47", "bar", "/b/a/z/")
assert gdf._obj == "z"
assert gdf._obj_path == "b/a"
- assert gdf._datadir == os.path.join(self.td, "vol0", "bar", "b", "a"), gdf._datadir
+ assert gdf._put_datadir == os.path.join(self.td, "vol0", "bar", "b", "a"), gdf._put_datadir
def test_open_no_metadata(self):
the_path = os.path.join(self.td, "vol0", "bar")
@@ -323,7 +321,7 @@ class TestDiskFile(unittest.TestCase):
def test_reader_disk_chunk_size(self):
conf = dict(disk_chunk_size=64)
conf.update(self.conf)
- self.mgr = OnDiskManager(conf, self.lg)
+ self.mgr = DiskFileManager(conf, self.lg)
gdf = self._create_and_get_diskfile("vol0", "p57", "ufo47", "bar", "z")
with gdf.open():
reader = gdf.reader()
@@ -669,7 +667,7 @@ class TestDiskFile(unittest.TestCase):
assert gdf._obj == "z"
assert gdf._obj_path == ""
assert gdf._container_path == os.path.join(self.td, "vol0", "bar")
- assert gdf._datadir == the_cont
+ assert gdf._put_datadir == the_cont
assert gdf._data_file == os.path.join(self.td, "vol0", "bar", "z")
body = '1234\n'
@@ -699,7 +697,7 @@ class TestDiskFile(unittest.TestCase):
assert gdf._obj == "z"
assert gdf._obj_path == ""
assert gdf._container_path == os.path.join(self.td, "vol0", "bar")
- assert gdf._datadir == the_cont
+ assert gdf._put_datadir == the_cont
assert gdf._data_file == os.path.join(self.td, "vol0", "bar", "z")
body = '1234\n'
@@ -734,7 +732,7 @@ class TestDiskFile(unittest.TestCase):
assert gdf._obj == "z"
assert gdf._obj_path == ""
assert gdf._container_path == os.path.join(self.td, "vol0", "bar")
- assert gdf._datadir == the_cont
+ assert gdf._put_datadir == the_cont
assert gdf._data_file == os.path.join(self.td, "vol0", "bar", "z")
body = '1234\n'
@@ -760,10 +758,9 @@ class TestDiskFile(unittest.TestCase):
try:
with gdf.create() as dw:
assert dw._tmppath is not None
- tmppath = dw._tmppath
dw.write(body)
dw.put(metadata)
- except GlusterFileSystemOSError:
+ except OSError:
pass
else:
self.fail("Expected exception DiskFileError")
@@ -775,7 +772,7 @@ class TestDiskFile(unittest.TestCase):
assert gdf._obj == "z"
assert gdf._obj_path == the_obj_path
assert gdf._container_path == os.path.join(self.td, "vol0", "bar")
- assert gdf._datadir == os.path.join(self.td, "vol0", "bar", "b", "a")
+ assert gdf._put_datadir == os.path.join(self.td, "vol0", "bar", "b", "a")
assert gdf._data_file == os.path.join(
self.td, "vol0", "bar", "b", "a", "z")
@@ -811,8 +808,8 @@ class TestDiskFile(unittest.TestCase):
assert not gdf._is_dir
later = float(gdf.read_metadata()['X-Timestamp']) + 1
gdf.delete(normalize_timestamp(later))
- assert os.path.isdir(gdf._datadir)
- assert not os.path.exists(os.path.join(gdf._datadir, gdf._obj))
+ assert os.path.isdir(gdf._put_datadir)
+ assert not os.path.exists(os.path.join(gdf._put_datadir, gdf._obj))
def test_delete_same_timestamp(self):
the_path = os.path.join(self.td, "vol0", "bar")
@@ -826,8 +823,8 @@ class TestDiskFile(unittest.TestCase):
assert not gdf._is_dir
now = float(gdf.read_metadata()['X-Timestamp'])
gdf.delete(normalize_timestamp(now))
- assert os.path.isdir(gdf._datadir)
- assert os.path.exists(os.path.join(gdf._datadir, gdf._obj))
+ assert os.path.isdir(gdf._put_datadir)
+ assert os.path.exists(os.path.join(gdf._put_datadir, gdf._obj))
def test_delete_file_not_found(self):
the_path = os.path.join(self.td, "vol0", "bar")
@@ -845,8 +842,8 @@ class TestDiskFile(unittest.TestCase):
os.unlink(the_file)
gdf.delete(normalize_timestamp(later))
- assert os.path.isdir(gdf._datadir)
- assert not os.path.exists(os.path.join(gdf._datadir, gdf._obj))
+ assert os.path.isdir(gdf._put_datadir)
+ assert not os.path.exists(os.path.join(gdf._put_datadir, gdf._obj))
def test_delete_file_unlink_error(self):
the_path = os.path.join(self.td, "vol0", "bar")
@@ -879,8 +876,8 @@ class TestDiskFile(unittest.TestCase):
finally:
os.chmod(the_path, stats.st_mode)
- assert os.path.isdir(gdf._datadir)
- assert os.path.exists(os.path.join(gdf._datadir, gdf._obj))
+ assert os.path.isdir(gdf._put_datadir)
+ assert os.path.exists(os.path.join(gdf._put_datadir, gdf._obj))
def test_delete_is_dir(self):
the_path = os.path.join(self.td, "vol0", "bar")
@@ -890,18 +887,18 @@ class TestDiskFile(unittest.TestCase):
assert gdf._data_file == the_dir
later = float(gdf.read_metadata()['X-Timestamp']) + 1
gdf.delete(normalize_timestamp(later))
- assert os.path.isdir(gdf._datadir)
- assert not os.path.exists(os.path.join(gdf._datadir, gdf._obj))
+ assert os.path.isdir(gdf._put_datadir)
+ assert not os.path.exists(os.path.join(gdf._put_datadir, gdf._obj))
def test_create(self):
gdf = self._get_diskfile("vol0", "p57", "ufo47", "bar", "dir/z")
saved_tmppath = ''
saved_fd = None
with gdf.create() as dw:
- assert gdf._datadir == os.path.join(self.td, "vol0", "bar", "dir")
- assert os.path.isdir(gdf._datadir)
+ assert gdf._put_datadir == os.path.join(self.td, "vol0", "bar", "dir")
+ assert os.path.isdir(gdf._put_datadir)
saved_tmppath = dw._tmppath
- assert os.path.dirname(saved_tmppath) == gdf._datadir
+ assert os.path.dirname(saved_tmppath) == gdf._put_datadir
assert os.path.basename(saved_tmppath)[:3] == '.z.'
assert os.path.exists(saved_tmppath)
dw.write("123")
@@ -921,10 +918,10 @@ class TestDiskFile(unittest.TestCase):
gdf = self._get_diskfile("vol0", "p57", "ufo47", "bar", "dir/z")
saved_tmppath = ''
with gdf.create() as dw:
- assert gdf._datadir == os.path.join(self.td, "vol0", "bar", "dir")
- assert os.path.isdir(gdf._datadir)
+ assert gdf._put_datadir == os.path.join(self.td, "vol0", "bar", "dir")
+ assert os.path.isdir(gdf._put_datadir)
saved_tmppath = dw._tmppath
- assert os.path.dirname(saved_tmppath) == gdf._datadir
+ assert os.path.dirname(saved_tmppath) == gdf._put_datadir
assert os.path.basename(saved_tmppath)[:3] == '.z.'
assert os.path.exists(saved_tmppath)
dw.write("123")
@@ -936,10 +933,10 @@ class TestDiskFile(unittest.TestCase):
gdf = self._get_diskfile("vol0", "p57", "ufo47", "bar", "dir/z")
saved_tmppath = ''
with gdf.create() as dw:
- assert gdf._datadir == os.path.join(self.td, "vol0", "bar", "dir")
- assert os.path.isdir(gdf._datadir)
+ assert gdf._put_datadir == os.path.join(self.td, "vol0", "bar", "dir")
+ assert os.path.isdir(gdf._put_datadir)
saved_tmppath = dw._tmppath
- assert os.path.dirname(saved_tmppath) == gdf._datadir
+ assert os.path.dirname(saved_tmppath) == gdf._put_datadir
assert os.path.basename(saved_tmppath)[:3] == '.z.'
assert os.path.exists(saved_tmppath)
dw.write("123")
@@ -973,3 +970,70 @@ class TestDiskFile(unittest.TestCase):
assert os.path.exists(gdf._data_file) # Real file exists
assert not os.path.exists(tmppath) # Temp file does not exist
+
+ def test_fd_closed_when_diskfile_open_raises_exception_race(self):
+ # do_open() succeeds but read_metadata() fails(GlusterFS)
+ _m_do_open = Mock(return_value=999)
+ _m_do_fstat = Mock(return_value=
+ os.stat_result((33261, 2753735, 2053, 1, 1000,
+ 1000, 6873, 1431415969,
+ 1376895818, 1433139196)))
+ _m_rmd = Mock(side_effect=IOError(errno.ENOENT,
+ os.strerror(errno.ENOENT)))
+ _m_do_close = Mock()
+ _m_log = Mock()
+
+ with nested(
+ patch("gluster.swift.obj.diskfile.do_open", _m_do_open),
+ patch("gluster.swift.obj.diskfile.do_fstat", _m_do_fstat),
+ patch("gluster.swift.obj.diskfile.read_metadata", _m_rmd),
+ patch("gluster.swift.obj.diskfile.do_close", _m_do_close),
+ patch("gluster.swift.obj.diskfile.logging.warn", _m_log)):
+ gdf = self._get_diskfile("vol0", "p57", "ufo47", "bar", "z")
+ try:
+ with gdf.open():
+ pass
+ except DiskFileNotExist:
+ pass
+ else:
+ self.fail("Expecting DiskFileNotExist")
+ _m_do_fstat.assert_called_once_with(999)
+ _m_rmd.assert_called_once_with(999)
+ _m_do_close.assert_called_once_with(999)
+ self.assertFalse(gdf._fd)
+ # Make sure ENOENT failure is logged
+ self.assertTrue("failed with ENOENT" in _m_log.call_args[0][0])
+
+ def test_fd_closed_when_diskfile_open_raises_DiskFileExpired(self):
+ # A GET/DELETE on an expired object should close fd
+ the_path = os.path.join(self.td, "vol0", "bar")
+ the_file = os.path.join(the_path, "z")
+ os.makedirs(the_path)
+ with open(the_file, "w") as fd:
+ fd.write("1234")
+ md = {
+ 'X-Type': 'Object',
+ 'X-Object-Type': 'file',
+ 'Content-Length': str(os.path.getsize(the_file)),
+ 'ETag': md5("1234").hexdigest(),
+ 'X-Timestamp': os.stat(the_file).st_mtime,
+ 'X-Delete-At': 0, # This is in the past
+ 'Content-Type': 'application/octet-stream'}
+ _metadata[_mapit(the_file)] = md
+
+ _m_do_close = Mock()
+
+ with patch("gluster.swift.obj.diskfile.do_close", _m_do_close):
+ gdf = self._get_diskfile("vol0", "p57", "ufo47", "bar", "z")
+ try:
+ with gdf.open():
+ pass
+ except DiskFileExpired:
+ # Confirm that original exception is re-raised
+ pass
+ else:
+ self.fail("Expecting DiskFileExpired")
+ self.assertEqual(_m_do_close.call_count, 1)
+ self.assertFalse(gdf._fd)
+ # Close the actual fd, as we had mocked do_close
+ os.close(_m_do_close.call_args[0][0])
diff --git a/test/unit/obj/test_expirer.py b/test/unit/obj/test_expirer.py
index 236775e..9701027 100644
--- a/test/unit/obj/test_expirer.py
+++ b/test/unit/obj/test_expirer.py
@@ -16,12 +16,14 @@
import urllib
from time import time
from unittest import main, TestCase
-from test.unit import FakeLogger
+from test.unit import FakeRing, mocked_http_conn, debug_logger
from copy import deepcopy
+from tempfile import mkdtemp
+from shutil import rmtree
import mock
-from swift.common import internal_client
+from swift.common import internal_client, utils
from swift.obj import expirer
@@ -39,6 +41,7 @@ def not_sleep(seconds):
class TestObjectExpirer(TestCase):
maxDiff = None
+ internal_client = None
def setUp(self):
global not_sleep
@@ -49,9 +52,14 @@ class TestObjectExpirer(TestCase):
internal_client.loadapp = lambda *a, **kw: None
internal_client.sleep = not_sleep
- def teardown(self):
+ self.rcache = mkdtemp()
+ self.conf = {'recon_cache_path': self.rcache}
+ self.logger = debug_logger('test-recon')
+
+ def tearDown(self):
+ rmtree(self.rcache)
internal_client.sleep = self.old_sleep
- internal_client.loadapp = self.loadapp
+ internal_client.loadapp = self.old_loadapp
def test_get_process_values_from_kwargs(self):
x = expirer.ObjectExpirer({})
@@ -59,7 +67,9 @@ class TestObjectExpirer(TestCase):
'processes': 5,
'process': 1,
}
- self.assertEqual((5, 1), x.get_process_values(vals))
+ x.get_process_values(vals)
+ self.assertEqual(x.processes, 5)
+ self.assertEqual(x.process, 1)
def test_get_process_values_from_config(self):
vals = {
@@ -67,7 +77,9 @@ class TestObjectExpirer(TestCase):
'process': 1,
}
x = expirer.ObjectExpirer(vals)
- self.assertEqual((5, 1), x.get_process_values({}))
+ x.get_process_values({})
+ self.assertEqual(x.processes, 5)
+ self.assertEqual(x.process, 1)
def test_get_process_values_negative_process(self):
vals = {
@@ -123,11 +135,13 @@ class TestObjectExpirer(TestCase):
super(ObjectExpirer, self).__init__(conf)
self.processes = 3
self.deleted_objects = {}
+ self.obj_containers_in_order = []
def delete_object(self, actual_obj, timestamp, container, obj):
if container not in self.deleted_objects:
self.deleted_objects[container] = set()
self.deleted_objects[container].add(obj)
+ self.obj_containers_in_order.append(container)
class InternalClient(object):
@@ -139,21 +153,22 @@ class TestObjectExpirer(TestCase):
sum([len(self.containers[x]) for x in self.containers])
def iter_containers(self, *a, **kw):
- return [{'name': x} for x in self.containers.keys()]
+ return [{'name': unicode(x)} for x in self.containers.keys()]
def iter_objects(self, account, container):
- return [{'name': x} for x in self.containers[container]]
+ return [{'name': unicode(x)}
+ for x in self.containers[container]]
def delete_container(*a, **kw):
pass
containers = {
- 0: set('1-one 2-two 3-three'.split()),
- 1: set('2-two 3-three 4-four'.split()),
- 2: set('5-five 6-six'.split()),
- 3: set('7-seven'.split()),
+ '0': set('1-one 2-two 3-three'.split()),
+ '1': set('2-two 3-three 4-four'.split()),
+ '2': set('5-five 6-six'.split()),
+ '3': set(u'7-seven\u2661'.split()),
}
- x = ObjectExpirer({})
+ x = ObjectExpirer(self.conf)
x.swift = InternalClient(containers)
deleted_objects = {}
@@ -162,10 +177,16 @@ class TestObjectExpirer(TestCase):
x.run_once()
self.assertNotEqual(deleted_objects, x.deleted_objects)
deleted_objects = deepcopy(x.deleted_objects)
+ self.assertEqual(containers['3'].pop(),
+ deleted_objects['3'].pop().decode('utf8'))
self.assertEqual(containers, deleted_objects)
+ self.assertEqual(len(set(x.obj_containers_in_order[:4])), 4)
def test_delete_object(self):
class InternalClient(object):
+
+ container_ring = None
+
def __init__(self, test, account, container, obj):
self.test = test
self.account = account
@@ -173,12 +194,6 @@ class TestObjectExpirer(TestCase):
self.obj = obj
self.delete_object_called = False
- def delete_object(self, account, container, obj):
- self.test.assertEqual(self.account, account)
- self.test.assertEqual(self.container, container)
- self.test.assertEqual(self.obj, obj)
- self.delete_object_called = True
-
class DeleteActualObject(object):
def __init__(self, test, actual_obj, timestamp):
self.test = test
@@ -196,48 +211,55 @@ class TestObjectExpirer(TestCase):
actual_obj = 'actual_obj'
timestamp = 'timestamp'
- x = expirer.ObjectExpirer({})
- x.logger = FakeLogger()
+ x = expirer.ObjectExpirer({}, logger=self.logger)
x.swift = \
InternalClient(self, x.expiring_objects_account, container, obj)
x.delete_actual_object = \
DeleteActualObject(self, actual_obj, timestamp)
+ delete_object_called = []
+
+ def pop_queue(c, o):
+ self.assertEqual(container, c)
+ self.assertEqual(obj, o)
+ delete_object_called[:] = [True]
+
+ x.pop_queue = pop_queue
+
x.delete_object(actual_obj, timestamp, container, obj)
- self.assertTrue(x.swift.delete_object_called)
+ self.assertTrue(delete_object_called)
self.assertTrue(x.delete_actual_object.called)
def test_report(self):
- x = expirer.ObjectExpirer({})
- x.logger = FakeLogger()
+ x = expirer.ObjectExpirer({}, logger=self.logger)
x.report()
- self.assertEqual(x.logger.log_dict['info'], [])
+ self.assertEqual(x.logger.get_lines_for_level('info'), [])
x.logger._clear()
x.report(final=True)
- self.assertTrue('completed' in x.logger.log_dict['info'][-1][0][0],
- x.logger.log_dict['info'])
- self.assertTrue('so far' not in x.logger.log_dict['info'][-1][0][0],
- x.logger.log_dict['info'])
+ self.assertTrue(
+ 'completed' in str(x.logger.get_lines_for_level('info')))
+ self.assertTrue(
+ 'so far' not in str(x.logger.get_lines_for_level('info')))
x.logger._clear()
x.report_last_time = time() - x.report_interval
x.report()
- self.assertTrue('completed' not in x.logger.log_dict['info'][-1][0][0],
- x.logger.log_dict['info'])
- self.assertTrue('so far' in x.logger.log_dict['info'][-1][0][0],
- x.logger.log_dict['info'])
+ self.assertTrue(
+ 'completed' not in str(x.logger.get_lines_for_level('info')))
+ self.assertTrue(
+ 'so far' in str(x.logger.get_lines_for_level('info')))
def test_run_once_nothing_to_do(self):
- x = expirer.ObjectExpirer({})
- x.logger = FakeLogger()
+ x = expirer.ObjectExpirer(self.conf, logger=self.logger)
x.swift = 'throw error because a string does not have needed methods'
x.run_once()
- self.assertEqual(x.logger.log_dict['exception'],
- [(("Unhandled exception",), {},
- "'str' object has no attribute "
- "'get_account_info'")])
+ self.assertEqual(x.logger.get_lines_for_level('error'),
+ ["Unhandled exception: "])
+ log_args, log_kwargs = x.logger.log_dict['error'][0]
+ self.assertEqual(str(log_kwargs['exc_info'][1]),
+ "'str' object has no attribute 'get_account_info'")
def test_run_once_calls_report(self):
class InternalClient(object):
@@ -247,15 +269,47 @@ class TestObjectExpirer(TestCase):
def iter_containers(*a, **kw):
return []
- x = expirer.ObjectExpirer({})
- x.logger = FakeLogger()
+ x = expirer.ObjectExpirer(self.conf, logger=self.logger)
x.swift = InternalClient()
x.run_once()
self.assertEqual(
- x.logger.log_dict['info'],
- [(('Pass beginning; 1 possible containers; '
- '2 possible objects',), {}),
- (('Pass completed in 0s; 0 objects expired',), {})])
+ x.logger.get_lines_for_level('info'), [
+ 'Pass beginning; 1 possible containers; 2 possible objects',
+ 'Pass completed in 0s; 0 objects expired',
+ ])
+
+ def test_run_once_unicode_problem(self):
+ class InternalClient(object):
+
+ container_ring = FakeRing()
+
+ def get_account_info(*a, **kw):
+ return 1, 2
+
+ def iter_containers(*a, **kw):
+ return [{'name': u'1234'}]
+
+ def iter_objects(*a, **kw):
+ return [{'name': u'1234-troms\xf8'}]
+
+ def make_request(*a, **kw):
+ pass
+
+ def delete_container(*a, **kw):
+ pass
+
+ x = expirer.ObjectExpirer(self.conf, logger=self.logger)
+ x.swift = InternalClient()
+
+ requests = []
+
+ def capture_requests(ipaddr, port, method, path, *args, **kwargs):
+ requests.append((method, path))
+
+ with mocked_http_conn(
+ 200, 200, 200, give_connect=capture_requests):
+ x.run_once()
+ self.assertEqual(len(requests), 3)
def test_container_timestamp_break(self):
class InternalClient(object):
@@ -271,28 +325,28 @@ class TestObjectExpirer(TestCase):
def iter_objects(*a, **kw):
raise Exception('This should not have been called')
- x = expirer.ObjectExpirer({})
- x.logger = FakeLogger()
+ x = expirer.ObjectExpirer(self.conf,
+ logger=self.logger)
x.swift = InternalClient([{'name': str(int(time() + 86400))}])
x.run_once()
- for exccall in x.logger.log_dict['exception']:
- self.assertTrue(
- 'This should not have been called' not in exccall[0][0])
- self.assertEqual(
- x.logger.log_dict['info'],
- [(('Pass beginning; 1 possible containers; '
- '2 possible objects',), {}),
- (('Pass completed in 0s; 0 objects expired',), {})])
+ logs = x.logger.all_log_lines()
+ self.assertEqual(logs['info'], [
+ 'Pass beginning; 1 possible containers; 2 possible objects',
+ 'Pass completed in 0s; 0 objects expired',
+ ])
+ self.assertTrue('error' not in logs)
# Reverse test to be sure it still would blow up the way expected.
- x = expirer.ObjectExpirer({})
- x.logger = FakeLogger()
- x.swift = InternalClient([{'name': str(int(time() - 86400))}])
+ fake_swift = InternalClient([{'name': str(int(time() - 86400))}])
+ x = expirer.ObjectExpirer(self.conf, logger=self.logger,
+ swift=fake_swift)
x.run_once()
self.assertEqual(
- x.logger.log_dict['exception'],
- [(('Unhandled exception',), {},
- str(Exception('This should not have been called')))])
+ x.logger.get_lines_for_level('error'), [
+ 'Unhandled exception: '])
+ log_args, log_kwargs = x.logger.log_dict['error'][-1]
+ self.assertEqual(str(log_kwargs['exc_info'][1]),
+ 'This should not have been called')
def test_object_timestamp_break(self):
class InternalClient(object):
@@ -315,41 +369,36 @@ class TestObjectExpirer(TestCase):
def should_not_be_called(*a, **kw):
raise Exception('This should not have been called')
- x = expirer.ObjectExpirer({})
- x.logger = FakeLogger()
- x.swift = InternalClient(
+ fake_swift = InternalClient(
[{'name': str(int(time() - 86400))}],
[{'name': '%d-actual-obj' % int(time() + 86400)}])
+ x = expirer.ObjectExpirer(self.conf, logger=self.logger,
+ swift=fake_swift)
x.run_once()
- for exccall in x.logger.log_dict['exception']:
- self.assertTrue(
- 'This should not have been called' not in exccall[0][0])
- self.assertEqual(
- x.logger.log_dict['info'],
- [(('Pass beginning; 1 possible containers; '
- '2 possible objects',), {}),
- (('Pass completed in 0s; 0 objects expired',), {})])
-
+ self.assertTrue('error' not in x.logger.all_log_lines())
+ self.assertEqual(x.logger.get_lines_for_level('info'), [
+ 'Pass beginning; 1 possible containers; 2 possible objects',
+ 'Pass completed in 0s; 0 objects expired',
+ ])
# Reverse test to be sure it still would blow up the way expected.
- x = expirer.ObjectExpirer({})
- x.logger = FakeLogger()
ts = int(time() - 86400)
- x.swift = InternalClient(
+ fake_swift = InternalClient(
[{'name': str(int(time() - 86400))}],
[{'name': '%d-actual-obj' % ts}])
+ x = expirer.ObjectExpirer(self.conf, logger=self.logger,
+ swift=fake_swift)
x.delete_actual_object = should_not_be_called
x.run_once()
- excswhiledeleting = []
- for exccall in x.logger.log_dict['exception']:
- if exccall[0][0].startswith('Exception while deleting '):
- excswhiledeleting.append(exccall[0][0])
self.assertEqual(
- excswhiledeleting,
+ x.logger.get_lines_for_level('error'),
['Exception while deleting object %d %d-actual-obj '
- 'This should not have been called' % (ts, ts)])
+ 'This should not have been called: ' % (ts, ts)])
def test_failed_delete_keeps_entry(self):
class InternalClient(object):
+
+ container_ring = None
+
def __init__(self, containers, objects):
self.containers = containers
self.objects = objects
@@ -363,9 +412,6 @@ class TestObjectExpirer(TestCase):
def delete_container(*a, **kw):
pass
- def delete_object(*a, **kw):
- raise Exception('This should not have been called')
-
def iter_objects(self, *a, **kw):
return self.objects
@@ -375,49 +421,48 @@ class TestObjectExpirer(TestCase):
def should_not_get_called(container, obj):
raise Exception('This should not have been called')
- x = expirer.ObjectExpirer({})
- x.logger = FakeLogger()
- x.iter_containers = lambda: [str(int(time() - 86400))]
ts = int(time() - 86400)
- x.delete_actual_object = deliberately_blow_up
- x.swift = InternalClient(
+ fake_swift = InternalClient(
[{'name': str(int(time() - 86400))}],
[{'name': '%d-actual-obj' % ts}])
+ x = expirer.ObjectExpirer(self.conf, logger=self.logger,
+ swift=fake_swift)
+ x.iter_containers = lambda: [str(int(time() - 86400))]
+ x.delete_actual_object = deliberately_blow_up
+ x.pop_queue = should_not_get_called
x.run_once()
- excswhiledeleting = []
- for exccall in x.logger.log_dict['exception']:
- if exccall[0][0].startswith('Exception while deleting '):
- excswhiledeleting.append(exccall[0][0])
+ error_lines = x.logger.get_lines_for_level('error')
self.assertEqual(
- excswhiledeleting,
+ error_lines,
['Exception while deleting object %d %d-actual-obj '
- 'failed to delete actual object' % (ts, ts)])
+ 'failed to delete actual object: ' % (ts, ts)])
self.assertEqual(
- x.logger.log_dict['info'],
- [(('Pass beginning; 1 possible containers; '
- '2 possible objects',), {}),
- (('Pass completed in 0s; 0 objects expired',), {})])
+ x.logger.get_lines_for_level('info'), [
+ 'Pass beginning; 1 possible containers; 2 possible objects',
+ 'Pass completed in 0s; 0 objects expired',
+ ])
# Reverse test to be sure it still would blow up the way expected.
- x = expirer.ObjectExpirer({})
- x.logger = FakeLogger()
ts = int(time() - 86400)
- x.delete_actual_object = lambda o, t: None
- x.swift = InternalClient(
+ fake_swift = InternalClient(
[{'name': str(int(time() - 86400))}],
[{'name': '%d-actual-obj' % ts}])
+ self.logger._clear()
+ x = expirer.ObjectExpirer(self.conf, logger=self.logger,
+ swift=fake_swift)
+ x.delete_actual_object = lambda o, t: None
+ x.pop_queue = should_not_get_called
x.run_once()
- excswhiledeleting = []
- for exccall in x.logger.log_dict['exception']:
- if exccall[0][0].startswith('Exception while deleting '):
- excswhiledeleting.append(exccall[0][0])
self.assertEqual(
- excswhiledeleting,
+ self.logger.get_lines_for_level('error'),
['Exception while deleting object %d %d-actual-obj This should '
- 'not have been called' % (ts, ts)])
+ 'not have been called: ' % (ts, ts)])
def test_success_gets_counted(self):
class InternalClient(object):
+
+ container_ring = None
+
def __init__(self, containers, objects):
self.containers = containers
self.objects = objects
@@ -437,23 +482,27 @@ class TestObjectExpirer(TestCase):
def iter_objects(self, *a, **kw):
return self.objects
- x = expirer.ObjectExpirer({})
- x.logger = FakeLogger()
+ fake_swift = InternalClient(
+ [{'name': str(int(time() - 86400))}],
+ [{'name': '%d-acc/c/actual-obj' % int(time() - 86400)}])
+ x = expirer.ObjectExpirer(self.conf, logger=self.logger,
+ swift=fake_swift)
x.delete_actual_object = lambda o, t: None
+ x.pop_queue = lambda c, o: None
self.assertEqual(x.report_objects, 0)
- x.swift = InternalClient(
- [{'name': str(int(time() - 86400))}],
- [{'name': '%d-actual-obj' % int(time() - 86400)}])
- x.run_once()
- self.assertEqual(x.report_objects, 1)
- self.assertEqual(
- x.logger.log_dict['info'],
- [(('Pass beginning; 1 possible containers; '
- '2 possible objects',), {}),
- (('Pass completed in 0s; 1 objects expired',), {})])
+ with mock.patch('swift.obj.expirer.MAX_OBJECTS_TO_CACHE', 0):
+ x.run_once()
+ self.assertEqual(x.report_objects, 1)
+ self.assertEqual(
+ x.logger.get_lines_for_level('info'),
+ ['Pass beginning; 1 possible containers; 2 possible objects',
+ 'Pass completed in 0s; 1 objects expired'])
def test_delete_actual_object_does_not_get_unicode(self):
class InternalClient(object):
+
+ container_ring = None
+
def __init__(self, containers, objects):
self.containers = containers
self.objects = objects
@@ -479,24 +528,28 @@ class TestObjectExpirer(TestCase):
if isinstance(actual_obj, unicode):
got_unicode[0] = True
- x = expirer.ObjectExpirer({})
- x.logger = FakeLogger()
- x.delete_actual_object = delete_actual_object_test_for_unicode
- self.assertEqual(x.report_objects, 0)
- x.swift = InternalClient(
+ fake_swift = InternalClient(
[{'name': str(int(time() - 86400))}],
[{'name': u'%d-actual-obj' % int(time() - 86400)}])
+ x = expirer.ObjectExpirer(self.conf, logger=self.logger,
+ swift=fake_swift)
+ x.delete_actual_object = delete_actual_object_test_for_unicode
+ x.pop_queue = lambda c, o: None
+ self.assertEqual(x.report_objects, 0)
x.run_once()
self.assertEqual(x.report_objects, 1)
self.assertEqual(
- x.logger.log_dict['info'],
- [(('Pass beginning; 1 possible containers; '
- '2 possible objects',), {}),
- (('Pass completed in 0s; 1 objects expired',), {})])
+ x.logger.get_lines_for_level('info'), [
+ 'Pass beginning; 1 possible containers; 2 possible objects',
+ 'Pass completed in 0s; 1 objects expired',
+ ])
self.assertFalse(got_unicode[0])
def test_failed_delete_continues_on(self):
class InternalClient(object):
+
+ container_ring = None
+
def __init__(self, containers, objects):
self.containers = containers
self.objects = objects
@@ -519,8 +572,7 @@ class TestObjectExpirer(TestCase):
def fail_delete_actual_object(actual_obj, timestamp):
raise Exception('failed to delete actual object')
- x = expirer.ObjectExpirer({})
- x.logger = FakeLogger()
+ x = expirer.ObjectExpirer(self.conf, logger=self.logger)
cts = int(time() - 86400)
ots = int(time() - 86400)
@@ -538,28 +590,24 @@ class TestObjectExpirer(TestCase):
x.swift = InternalClient(containers, objects)
x.delete_actual_object = fail_delete_actual_object
x.run_once()
- excswhiledeleting = []
- for exccall in x.logger.log_dict['exception']:
- if exccall[0][0].startswith('Exception while deleting '):
- excswhiledeleting.append(exccall[0][0])
- self.assertEqual(sorted(excswhiledeleting), sorted([
+ error_lines = x.logger.get_lines_for_level('error')
+ self.assertEqual(sorted(error_lines), sorted([
'Exception while deleting object %d %d-actual-obj failed to '
- 'delete actual object' % (cts, ots),
+ 'delete actual object: ' % (cts, ots),
'Exception while deleting object %d %d-next-obj failed to '
- 'delete actual object' % (cts, ots),
+ 'delete actual object: ' % (cts, ots),
'Exception while deleting object %d %d-actual-obj failed to '
- 'delete actual object' % (cts + 1, ots),
+ 'delete actual object: ' % (cts + 1, ots),
'Exception while deleting object %d %d-next-obj failed to '
- 'delete actual object' % (cts + 1, ots),
+ 'delete actual object: ' % (cts + 1, ots),
'Exception while deleting container %d failed to delete '
- 'container' % (cts,),
+ 'container: ' % (cts,),
'Exception while deleting container %d failed to delete '
- 'container' % (cts + 1,)]))
- self.assertEqual(
- x.logger.log_dict['info'],
- [(('Pass beginning; 1 possible containers; '
- '2 possible objects',), {}),
- (('Pass completed in 0s; 0 objects expired',), {})])
+ 'container: ' % (cts + 1,)]))
+ self.assertEqual(x.logger.get_lines_for_level('info'), [
+ 'Pass beginning; 1 possible containers; 2 possible objects',
+ 'Pass completed in 0s; 0 objects expired',
+ ])
def test_run_forever_initial_sleep_random(self):
global last_not_sleep
@@ -594,8 +642,7 @@ class TestObjectExpirer(TestCase):
raise Exception('exception %d' % raises[0])
raise SystemExit('exiting exception %d' % raises[0])
- x = expirer.ObjectExpirer({})
- x.logger = FakeLogger()
+ x = expirer.ObjectExpirer({}, logger=self.logger)
orig_sleep = expirer.sleep
try:
expirer.sleep = not_sleep
@@ -606,9 +653,11 @@ class TestObjectExpirer(TestCase):
finally:
expirer.sleep = orig_sleep
self.assertEqual(str(err), 'exiting exception 2')
- self.assertEqual(x.logger.log_dict['exception'],
- [(('Unhandled exception',), {},
- 'exception 1')])
+ self.assertEqual(x.logger.get_lines_for_level('error'),
+ ['Unhandled exception: '])
+ log_args, log_kwargs = x.logger.log_dict['error'][0]
+ self.assertEqual(str(log_kwargs['exc_info'][1]),
+ 'exception 1')
def test_delete_actual_object(self):
got_env = [None]
@@ -643,7 +692,7 @@ class TestObjectExpirer(TestCase):
self.assertEqual(got_env[0]['HTTP_X_IF_DELETE_AT'], ts)
self.assertEqual(got_env[0]['PATH_INFO'], '/v1/path/to/object name')
- def test_delete_actual_object_handles_404(self):
+ def test_delete_actual_object_raises_404(self):
def fake_app(env, start_response):
start_response('404 Not Found', [('Content-Length', '0')])
@@ -652,7 +701,8 @@ class TestObjectExpirer(TestCase):
internal_client.loadapp = lambda *a, **kw: fake_app
x = expirer.ObjectExpirer({})
- x.delete_actual_object('/path/to/object', '1234')
+ self.assertRaises(internal_client.UnexpectedResponse,
+ x.delete_actual_object, '/path/to/object', '1234')
def test_delete_actual_object_handles_412(self):
@@ -692,10 +742,31 @@ class TestObjectExpirer(TestCase):
x = expirer.ObjectExpirer({})
x.swift.make_request = mock.MagicMock()
x.delete_actual_object(name, timestamp)
- self.assertTrue(x.swift.make_request.called)
+ self.assertEqual(x.swift.make_request.call_count, 1)
self.assertEqual(x.swift.make_request.call_args[0][1],
'/v1/' + urllib.quote(name))
+ def test_pop_queue(self):
+ class InternalClient(object):
+ container_ring = FakeRing()
+ x = expirer.ObjectExpirer({}, logger=self.logger,
+ swift=InternalClient())
+ requests = []
+
+ def capture_requests(ipaddr, port, method, path, *args, **kwargs):
+ requests.append((method, path))
+ with mocked_http_conn(
+ 200, 200, 200, give_connect=capture_requests) as fake_conn:
+ x.pop_queue('c', 'o')
+ self.assertRaises(StopIteration, fake_conn.code_iter.next)
+ for method, path in requests:
+ self.assertEqual(method, 'DELETE')
+ device, part, account, container, obj = utils.split_path(
+ path, 5, 5, True)
+ self.assertEqual(account, '.expiring_objects')
+ self.assertEqual(container, 'c')
+ self.assertEqual(obj, 'o')
+
if __name__ == '__main__':
main()
diff --git a/test/unit/proxy/controllers/test_account.py b/test/unit/proxy/controllers/test_account.py
index 47f76dc..23ad0a1 100644
--- a/test/unit/proxy/controllers/test_account.py
+++ b/test/unit/proxy/controllers/test_account.py
@@ -20,18 +20,21 @@ from swift.common.swob import Request, Response
from swift.common.middleware.acl import format_acl
from swift.proxy import server as proxy_server
from swift.proxy.controllers.base import headers_to_account_info
-from swift.common.constraints import MAX_ACCOUNT_NAME_LENGTH as MAX_ANAME_LEN
+from swift.common import constraints
from test.unit import fake_http_connect, FakeRing, FakeMemcache
+from swift.common.storage_policy import StoragePolicy
from swift.common.request_helpers import get_sys_meta_prefix
import swift.proxy.controllers.base
+from test.unit import patch_policies
+
+@patch_policies([StoragePolicy(0, 'zero', True, object_ring=FakeRing())])
class TestAccountController(unittest.TestCase):
def setUp(self):
- self.app = proxy_server.Application(None, FakeMemcache(),
- account_ring=FakeRing(),
- container_ring=FakeRing(),
- object_ring=FakeRing())
+ self.app = proxy_server.Application(
+ None, FakeMemcache(),
+ account_ring=FakeRing(), container_ring=FakeRing())
def test_account_info_in_response_env(self):
controller = proxy_server.AccountController(self.app, 'AUTH_bob')
@@ -79,7 +82,8 @@ class TestAccountController(unittest.TestCase):
self.assertEquals(410, resp.status_int)
def test_long_acct_names(self):
- long_acct_name = '%sLongAccountName' % ('Very' * (MAX_ANAME_LEN // 4))
+ long_acct_name = '%sLongAccountName' % (
+ 'Very' * (constraints.MAX_ACCOUNT_NAME_LENGTH // 4))
controller = proxy_server.AccountController(self.app, long_acct_name)
req = Request.blank('/v1/%s' % long_acct_name)
diff --git a/test/unit/proxy/controllers/test_base.py b/test/unit/proxy/controllers/test_base.py
index 0c94f90..037e28b 100644
--- a/test/unit/proxy/controllers/test_base.py
+++ b/test/unit/proxy/controllers/test_base.py
@@ -13,87 +13,159 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+import itertools
+from collections import defaultdict
import unittest
from mock import patch
from swift.proxy.controllers.base import headers_to_container_info, \
headers_to_account_info, headers_to_object_info, get_container_info, \
get_container_memcache_key, get_account_info, get_account_memcache_key, \
- get_object_env_key, _get_cache_key, get_info, get_object_info, \
- Controller, GetOrHeadHandler
-from swift.common.swob import Request, HTTPException, HeaderKeyDict
+ get_object_env_key, get_info, get_object_info, \
+ Controller, GetOrHeadHandler, _set_info_cache, _set_object_info_cache, \
+ bytes_to_skip
+from swift.common.swob import Request, HTTPException, HeaderKeyDict, \
+ RESPONSE_REASONS
+from swift.common import exceptions
from swift.common.utils import split_path
+from swift.common.http import is_success
+from swift.common.storage_policy import StoragePolicy
from test.unit import fake_http_connect, FakeRing, FakeMemcache
from swift.proxy import server as proxy_server
from swift.common.request_helpers import get_sys_meta_prefix
-
-FakeResponse_status_int = 201
+from test.unit import patch_policies
class FakeResponse(object):
- def __init__(self, headers, env, account, container, obj):
- self.headers = headers
- self.status_int = FakeResponse_status_int
- self.environ = env
- if obj:
- env_key = get_object_env_key(account, container, obj)
- else:
- cache_key, env_key = _get_cache_key(account, container)
- if account and container and obj:
- info = headers_to_object_info(headers, FakeResponse_status_int)
- elif account and container:
- info = headers_to_container_info(headers, FakeResponse_status_int)
- else:
- info = headers_to_account_info(headers, FakeResponse_status_int)
- env[env_key] = info
+ base_headers = {}
+ def __init__(self, status_int=200, headers=None, body=''):
+ self.status_int = status_int
+ self._headers = headers or {}
+ self.body = body
+
+ @property
+ def headers(self):
+ if is_success(self.status_int):
+ self._headers.update(self.base_headers)
+ return self._headers
-class FakeRequest(object):
- def __init__(self, env, path, swift_source=None):
- self.environ = env
- (version, account, container, obj) = split_path(path, 2, 4, True)
- self.account = account
- self.container = container
- self.obj = obj
- if obj:
- stype = 'object'
- self.headers = {'content-length': 5555,
- 'content-type': 'text/plain'}
- else:
- stype = container and 'container' or 'account'
- self.headers = {'x-%s-object-count' % (stype): 1000,
- 'x-%s-bytes-used' % (stype): 6666}
- if swift_source:
- meta = 'x-%s-meta-fakerequest-swift-source' % stype
- self.headers[meta] = swift_source
- def get_response(self, app):
- return FakeResponse(self.headers, self.environ, self.account,
- self.container, self.obj)
+class AccountResponse(FakeResponse):
+ base_headers = {
+ 'x-account-container-count': 333,
+ 'x-account-object-count': 1000,
+ 'x-account-bytes-used': 6666,
+ }
-class FakeCache(object):
- def __init__(self, val):
- self.val = val
- def get(self, *args):
- return self.val
+class ContainerResponse(FakeResponse):
+ base_headers = {
+ 'x-container-object-count': 1000,
+ 'x-container-bytes-used': 6666,
+ }
+
+class ObjectResponse(FakeResponse):
+
+ base_headers = {
+ 'content-length': 5555,
+ 'content-type': 'text/plain'
+ }
+
+
+class DynamicResponseFactory(object):
+
+ def __init__(self, *statuses):
+ if statuses:
+ self.statuses = iter(statuses)
+ else:
+ self.statuses = itertools.repeat(200)
+ self.stats = defaultdict(int)
+
+ response_type = {
+ 'obj': ObjectResponse,
+ 'container': ContainerResponse,
+ 'account': AccountResponse,
+ }
+
+ def _get_response(self, type_):
+ self.stats[type_] += 1
+ class_ = self.response_type[type_]
+ return class_(self.statuses.next())
+
+ def get_response(self, environ):
+ (version, account, container, obj) = split_path(
+ environ['PATH_INFO'], 2, 4, True)
+ if obj:
+ resp = self._get_response('obj')
+ elif container:
+ resp = self._get_response('container')
+ else:
+ resp = self._get_response('account')
+ resp.account = account
+ resp.container = container
+ resp.obj = obj
+ return resp
+
+
+class FakeApp(object):
+
+ recheck_container_existence = 30
+ recheck_account_existence = 30
+
+ def __init__(self, response_factory=None, statuses=None):
+ self.responses = response_factory or \
+ DynamicResponseFactory(*statuses or [])
+ self.sources = []
+
+ def __call__(self, environ, start_response):
+ self.sources.append(environ.get('swift.source'))
+ response = self.responses.get_response(environ)
+ reason = RESPONSE_REASONS[response.status_int][0]
+ start_response('%d %s' % (response.status_int, reason),
+ [(k, v) for k, v in response.headers.items()])
+ # It's a bit strnage, but the get_info cache stuff relies on the
+ # app setting some keys in the environment as it makes requests
+ # (in particular GETorHEAD_base) - so our fake does the same
+ _set_info_cache(self, environ, response.account,
+ response.container, response)
+ if response.obj:
+ _set_object_info_cache(self, environ, response.account,
+ response.container, response.obj,
+ response)
+ return iter(response.body)
+
+
+class FakeCache(FakeMemcache):
+ def __init__(self, stub=None, **pre_cached):
+ super(FakeCache, self).__init__()
+ if pre_cached:
+ self.store.update(pre_cached)
+ self.stub = stub
+
+ def get(self, key):
+ return self.stub or self.store.get(key)
+
+
+@patch_policies([StoragePolicy(0, 'zero', True, object_ring=FakeRing())])
class TestFuncs(unittest.TestCase):
def setUp(self):
self.app = proxy_server.Application(None, FakeMemcache(),
account_ring=FakeRing(),
- container_ring=FakeRing(),
- object_ring=FakeRing)
+ container_ring=FakeRing())
def test_GETorHEAD_base(self):
base = Controller(self.app)
req = Request.blank('/v1/a/c/o/with/slashes')
+ ring = FakeRing()
+ nodes = list(ring.get_part_nodes(0)) + list(ring.get_more_nodes(0))
with patch('swift.proxy.controllers.base.'
'http_connect', fake_http_connect(200)):
- resp = base.GETorHEAD_base(req, 'object', FakeRing(), 'part',
+ resp = base.GETorHEAD_base(req, 'object', iter(nodes), 'part',
'/a/c/o/with/slashes')
self.assertTrue('swift.object/a/c/o/with/slashes' in resp.environ)
self.assertEqual(
@@ -101,14 +173,14 @@ class TestFuncs(unittest.TestCase):
req = Request.blank('/v1/a/c/o')
with patch('swift.proxy.controllers.base.'
'http_connect', fake_http_connect(200)):
- resp = base.GETorHEAD_base(req, 'object', FakeRing(), 'part',
+ resp = base.GETorHEAD_base(req, 'object', iter(nodes), 'part',
'/a/c/o')
self.assertTrue('swift.object/a/c/o' in resp.environ)
self.assertEqual(resp.environ['swift.object/a/c/o']['status'], 200)
req = Request.blank('/v1/a/c')
with patch('swift.proxy.controllers.base.'
'http_connect', fake_http_connect(200)):
- resp = base.GETorHEAD_base(req, 'container', FakeRing(), 'part',
+ resp = base.GETorHEAD_base(req, 'container', iter(nodes), 'part',
'/a/c')
self.assertTrue('swift.container/a/c' in resp.environ)
self.assertEqual(resp.environ['swift.container/a/c']['status'], 200)
@@ -116,150 +188,166 @@ class TestFuncs(unittest.TestCase):
req = Request.blank('/v1/a')
with patch('swift.proxy.controllers.base.'
'http_connect', fake_http_connect(200)):
- resp = base.GETorHEAD_base(req, 'account', FakeRing(), 'part',
+ resp = base.GETorHEAD_base(req, 'account', iter(nodes), 'part',
'/a')
self.assertTrue('swift.account/a' in resp.environ)
self.assertEqual(resp.environ['swift.account/a']['status'], 200)
def test_get_info(self):
- global FakeResponse_status_int
+ app = FakeApp()
# Do a non cached call to account
env = {}
- with patch('swift.proxy.controllers.base.'
- '_prepare_pre_auth_info_request', FakeRequest):
- info_a = get_info(None, env, 'a')
+ info_a = get_info(app, env, 'a')
# Check that you got proper info
- self.assertEquals(info_a['status'], 201)
+ self.assertEquals(info_a['status'], 200)
self.assertEquals(info_a['bytes'], 6666)
self.assertEquals(info_a['total_object_count'], 1000)
# Make sure the env cache is set
self.assertEquals(env.get('swift.account/a'), info_a)
+ # Make sure the app was called
+ self.assertEqual(app.responses.stats['account'], 1)
# Do an env cached call to account
- info_a = get_info(None, env, 'a')
+ info_a = get_info(app, env, 'a')
# Check that you got proper info
- self.assertEquals(info_a['status'], 201)
+ self.assertEquals(info_a['status'], 200)
self.assertEquals(info_a['bytes'], 6666)
self.assertEquals(info_a['total_object_count'], 1000)
# Make sure the env cache is set
self.assertEquals(env.get('swift.account/a'), info_a)
+ # Make sure the app was NOT called AGAIN
+ self.assertEqual(app.responses.stats['account'], 1)
# This time do env cached call to account and non cached to container
- with patch('swift.proxy.controllers.base.'
- '_prepare_pre_auth_info_request', FakeRequest):
- info_c = get_info(None, env, 'a', 'c')
+ info_c = get_info(app, env, 'a', 'c')
# Check that you got proper info
- self.assertEquals(info_a['status'], 201)
+ self.assertEquals(info_c['status'], 200)
self.assertEquals(info_c['bytes'], 6666)
self.assertEquals(info_c['object_count'], 1000)
# Make sure the env cache is set
self.assertEquals(env.get('swift.account/a'), info_a)
self.assertEquals(env.get('swift.container/a/c'), info_c)
+ # Make sure the app was called for container
+ self.assertEqual(app.responses.stats['container'], 1)
# This time do a non cached call to account than non cached to
# container
+ app = FakeApp()
env = {} # abandon previous call to env
- with patch('swift.proxy.controllers.base.'
- '_prepare_pre_auth_info_request', FakeRequest):
- info_c = get_info(None, env, 'a', 'c')
+ info_c = get_info(app, env, 'a', 'c')
# Check that you got proper info
- self.assertEquals(info_a['status'], 201)
+ self.assertEquals(info_c['status'], 200)
self.assertEquals(info_c['bytes'], 6666)
self.assertEquals(info_c['object_count'], 1000)
# Make sure the env cache is set
self.assertEquals(env.get('swift.account/a'), info_a)
self.assertEquals(env.get('swift.container/a/c'), info_c)
+ # check app calls both account and container
+ self.assertEqual(app.responses.stats['account'], 1)
+ self.assertEqual(app.responses.stats['container'], 1)
# This time do an env cached call to container while account is not
# cached
del(env['swift.account/a'])
- info_c = get_info(None, env, 'a', 'c')
+ info_c = get_info(app, env, 'a', 'c')
# Check that you got proper info
- self.assertEquals(info_a['status'], 201)
+ self.assertEquals(info_a['status'], 200)
self.assertEquals(info_c['bytes'], 6666)
self.assertEquals(info_c['object_count'], 1000)
# Make sure the env cache is set and account still not cached
self.assertEquals(env.get('swift.container/a/c'), info_c)
+ # no additional calls were made
+ self.assertEqual(app.responses.stats['account'], 1)
+ self.assertEqual(app.responses.stats['container'], 1)
# Do a non cached call to account not found with ret_not_found
+ app = FakeApp(statuses=(404,))
env = {}
- with patch('swift.proxy.controllers.base.'
- '_prepare_pre_auth_info_request', FakeRequest):
- try:
- FakeResponse_status_int = 404
- info_a = get_info(None, env, 'a', ret_not_found=True)
- finally:
- FakeResponse_status_int = 201
+ info_a = get_info(app, env, 'a', ret_not_found=True)
# Check that you got proper info
self.assertEquals(info_a['status'], 404)
- self.assertEquals(info_a['bytes'], 6666)
- self.assertEquals(info_a['total_object_count'], 1000)
+ self.assertEquals(info_a['bytes'], None)
+ self.assertEquals(info_a['total_object_count'], None)
# Make sure the env cache is set
self.assertEquals(env.get('swift.account/a'), info_a)
+ # and account was called
+ self.assertEqual(app.responses.stats['account'], 1)
# Do a cached call to account not found with ret_not_found
- info_a = get_info(None, env, 'a', ret_not_found=True)
+ info_a = get_info(app, env, 'a', ret_not_found=True)
# Check that you got proper info
self.assertEquals(info_a['status'], 404)
- self.assertEquals(info_a['bytes'], 6666)
- self.assertEquals(info_a['total_object_count'], 1000)
+ self.assertEquals(info_a['bytes'], None)
+ self.assertEquals(info_a['total_object_count'], None)
# Make sure the env cache is set
self.assertEquals(env.get('swift.account/a'), info_a)
+ # add account was NOT called AGAIN
+ self.assertEqual(app.responses.stats['account'], 1)
# Do a non cached call to account not found without ret_not_found
+ app = FakeApp(statuses=(404,))
env = {}
- with patch('swift.proxy.controllers.base.'
- '_prepare_pre_auth_info_request', FakeRequest):
- try:
- FakeResponse_status_int = 404
- info_a = get_info(None, env, 'a')
- finally:
- FakeResponse_status_int = 201
+ info_a = get_info(app, env, 'a')
# Check that you got proper info
self.assertEquals(info_a, None)
self.assertEquals(env['swift.account/a']['status'], 404)
+ # and account was called
+ self.assertEqual(app.responses.stats['account'], 1)
# Do a cached call to account not found without ret_not_found
info_a = get_info(None, env, 'a')
# Check that you got proper info
self.assertEquals(info_a, None)
self.assertEquals(env['swift.account/a']['status'], 404)
+ # add account was NOT called AGAIN
+ self.assertEqual(app.responses.stats['account'], 1)
def test_get_container_info_swift_source(self):
- req = Request.blank("/v1/a/c", environ={'swift.cache': FakeCache({})})
- with patch('swift.proxy.controllers.base.'
- '_prepare_pre_auth_info_request', FakeRequest):
- resp = get_container_info(req.environ, 'app', swift_source='MC')
- self.assertEquals(resp['meta']['fakerequest-swift-source'], 'MC')
+ app = FakeApp()
+ req = Request.blank("/v1/a/c", environ={'swift.cache': FakeCache()})
+ get_container_info(req.environ, app, swift_source='MC')
+ self.assertEqual(app.sources, ['GET_INFO', 'MC'])
def test_get_object_info_swift_source(self):
+ app = FakeApp()
req = Request.blank("/v1/a/c/o",
- environ={'swift.cache': FakeCache({})})
- with patch('swift.proxy.controllers.base.'
- '_prepare_pre_auth_info_request', FakeRequest):
- resp = get_object_info(req.environ, 'app', swift_source='LU')
- self.assertEquals(resp['meta']['fakerequest-swift-source'], 'LU')
+ environ={'swift.cache': FakeCache()})
+ get_object_info(req.environ, app, swift_source='LU')
+ self.assertEqual(app.sources, ['LU'])
def test_get_container_info_no_cache(self):
req = Request.blank("/v1/AUTH_account/cont",
environ={'swift.cache': FakeCache({})})
- with patch('swift.proxy.controllers.base.'
- '_prepare_pre_auth_info_request', FakeRequest):
- resp = get_container_info(req.environ, 'xxx')
+ resp = get_container_info(req.environ, FakeApp())
+ self.assertEquals(resp['storage_policy'], '0')
self.assertEquals(resp['bytes'], 6666)
self.assertEquals(resp['object_count'], 1000)
+ def test_get_container_info_no_account(self):
+ responses = DynamicResponseFactory(404, 200)
+ app = FakeApp(responses)
+ req = Request.blank("/v1/AUTH_does_not_exist/cont")
+ info = get_container_info(req.environ, app)
+ self.assertEqual(info['status'], 0)
+
+ def test_get_container_info_no_auto_account(self):
+ responses = DynamicResponseFactory(404, 200)
+ app = FakeApp(responses)
+ req = Request.blank("/v1/.system_account/cont")
+ info = get_container_info(req.environ, app)
+ self.assertEqual(info['status'], 200)
+ self.assertEquals(info['bytes'], 6666)
+ self.assertEquals(info['object_count'], 1000)
+
def test_get_container_info_cache(self):
- cached = {'status': 404,
- 'bytes': 3333,
- 'object_count': 10,
- # simplejson sometimes hands back strings, sometimes unicodes
- 'versions': u"\u1F4A9"}
+ cache_stub = {
+ 'status': 404, 'bytes': 3333, 'object_count': 10,
+ # simplejson sometimes hands back strings, sometimes unicodes
+ 'versions': u"\u1F4A9"}
req = Request.blank("/v1/account/cont",
- environ={'swift.cache': FakeCache(cached)})
- with patch('swift.proxy.controllers.base.'
- '_prepare_pre_auth_info_request', FakeRequest):
- resp = get_container_info(req.environ, 'xxx')
+ environ={'swift.cache': FakeCache(cache_stub)})
+ resp = get_container_info(req.environ, FakeApp())
+ self.assertEquals(resp['storage_policy'], '0')
self.assertEquals(resp['bytes'], 3333)
self.assertEquals(resp['object_count'], 10)
self.assertEquals(resp['status'], 404)
@@ -275,18 +363,16 @@ class TestFuncs(unittest.TestCase):
self.assertEquals(resp['bytes'], 3867)
def test_get_account_info_swift_source(self):
- req = Request.blank("/v1/a", environ={'swift.cache': FakeCache({})})
- with patch('swift.proxy.controllers.base.'
- '_prepare_pre_auth_info_request', FakeRequest):
- resp = get_account_info(req.environ, 'a', swift_source='MC')
- self.assertEquals(resp['meta']['fakerequest-swift-source'], 'MC')
+ app = FakeApp()
+ req = Request.blank("/v1/a", environ={'swift.cache': FakeCache()})
+ get_account_info(req.environ, app, swift_source='MC')
+ self.assertEqual(app.sources, ['MC'])
def test_get_account_info_no_cache(self):
+ app = FakeApp()
req = Request.blank("/v1/AUTH_account",
environ={'swift.cache': FakeCache({})})
- with patch('swift.proxy.controllers.base.'
- '_prepare_pre_auth_info_request', FakeRequest):
- resp = get_account_info(req.environ, 'xxx')
+ resp = get_account_info(req.environ, app)
self.assertEquals(resp['bytes'], 6666)
self.assertEquals(resp['total_object_count'], 1000)
@@ -297,9 +383,7 @@ class TestFuncs(unittest.TestCase):
'total_object_count': 10}
req = Request.blank("/v1/account/cont",
environ={'swift.cache': FakeCache(cached)})
- with patch('swift.proxy.controllers.base.'
- '_prepare_pre_auth_info_request', FakeRequest):
- resp = get_account_info(req.environ, 'xxx')
+ resp = get_account_info(req.environ, FakeApp())
self.assertEquals(resp['bytes'], 3333)
self.assertEquals(resp['total_object_count'], 10)
self.assertEquals(resp['status'], 404)
@@ -312,9 +396,7 @@ class TestFuncs(unittest.TestCase):
'meta': {}}
req = Request.blank("/v1/account/cont",
environ={'swift.cache': FakeCache(cached)})
- with patch('swift.proxy.controllers.base.'
- '_prepare_pre_auth_info_request', FakeRequest):
- resp = get_account_info(req.environ, 'xxx')
+ resp = get_account_info(req.environ, FakeApp())
self.assertEquals(resp['status'], 404)
self.assertEquals(resp['bytes'], '3333')
self.assertEquals(resp['container_count'], 234)
@@ -344,11 +426,13 @@ class TestFuncs(unittest.TestCase):
self.assertEquals(resp['type'], 'application/json')
def test_get_object_info_no_env(self):
+ app = FakeApp()
req = Request.blank("/v1/account/cont/obj",
environ={'swift.cache': FakeCache({})})
- with patch('swift.proxy.controllers.base.'
- '_prepare_pre_auth_info_request', FakeRequest):
- resp = get_object_info(req.environ, 'xxx')
+ resp = get_object_info(req.environ, app)
+ self.assertEqual(app.responses.stats['account'], 0)
+ self.assertEqual(app.responses.stats['container'], 0)
+ self.assertEqual(app.responses.stats['obj'], 1)
self.assertEquals(resp['length'], 5555)
self.assertEquals(resp['type'], 'text/plain')
@@ -443,6 +527,15 @@ class TestFuncs(unittest.TestCase):
self.assertEquals(resp['meta']['whatevs'], 14)
self.assertEquals(resp['meta']['somethingelse'], 0)
+ def test_headers_to_object_info_sys_meta(self):
+ prefix = get_sys_meta_prefix('object')
+ headers = {'%sWhatevs' % prefix: 14,
+ '%ssomethingelse' % prefix: 0}
+ resp = headers_to_object_info(headers.items(), 200)
+ self.assertEquals(len(resp['sysmeta']), 2)
+ self.assertEquals(resp['sysmeta']['whatevs'], 14)
+ self.assertEquals(resp['sysmeta']['somethingelse'], 0)
+
def test_headers_to_object_info_values(self):
headers = {
'content-length': '1024',
@@ -457,7 +550,7 @@ class TestFuncs(unittest.TestCase):
resp,
headers_to_object_info(headers.items(), 200))
- def test_have_quorum(self):
+ def test_base_have_quorum(self):
base = Controller(self.app)
# just throw a bunch of test cases at it
self.assertEqual(base.have_quorum([201, 404], 3), False)
@@ -470,6 +563,32 @@ class TestFuncs(unittest.TestCase):
self.assertEqual(base.have_quorum([404, 404], 2), True)
self.assertEqual(base.have_quorum([201, 404, 201, 201], 4), True)
+ def test_best_response_overrides(self):
+ base = Controller(self.app)
+ responses = [
+ (302, 'Found', '', 'The resource has moved temporarily.'),
+ (100, 'Continue', '', ''),
+ (404, 'Not Found', '', 'Custom body'),
+ ]
+ server_type = "Base DELETE"
+ req = Request.blank('/v1/a/c/o', method='DELETE')
+ statuses, reasons, headers, bodies = zip(*responses)
+
+ # First test that you can't make a quorum with only overridden
+ # responses
+ overrides = {302: 204, 100: 204}
+ resp = base.best_response(req, statuses, reasons, bodies, server_type,
+ headers=headers, overrides=overrides)
+ self.assertEqual(resp.status, '503 Internal Server Error')
+
+ # next make a 404 quorum and make sure the last delete (real) 404
+ # status is the one returned.
+ overrides = {100: 404}
+ resp = base.best_response(req, statuses, reasons, bodies, server_type,
+ headers=headers, overrides=overrides)
+ self.assertEqual(resp.status, '404 Not Found')
+ self.assertEqual(resp.body, 'Custom body')
+
def test_range_fast_forward(self):
req = Request.blank('/')
handler = GetOrHeadHandler(None, req, None, None, None, None, {})
@@ -512,7 +631,8 @@ class TestFuncs(unittest.TestCase):
req = Request.blank('/v1/a/c/o', headers=src_headers)
dst_headers = base.generate_request_headers(req, transfer=True)
expected_headers = {'x-base-meta-owner': '',
- 'x-base-meta-size': '151M'}
+ 'x-base-meta-size': '151M',
+ 'connection': 'close'}
for k, v in expected_headers.iteritems():
self.assertTrue(k in dst_headers)
self.assertEqual(v, dst_headers[k])
@@ -532,3 +652,88 @@ class TestFuncs(unittest.TestCase):
self.assertEqual(v, dst_headers[k.lower()])
for k, v in bad_hdrs.iteritems():
self.assertFalse(k.lower() in dst_headers)
+
+ def test_client_chunk_size(self):
+
+ class TestSource(object):
+ def __init__(self, chunks):
+ self.chunks = list(chunks)
+
+ def read(self, _read_size):
+ if self.chunks:
+ return self.chunks.pop(0)
+ else:
+ return ''
+
+ source = TestSource((
+ 'abcd', '1234', 'abc', 'd1', '234abcd1234abcd1', '2'))
+ req = Request.blank('/v1/a/c/o')
+ node = {}
+ handler = GetOrHeadHandler(self.app, req, None, None, None, None, {},
+ client_chunk_size=8)
+
+ app_iter = handler._make_app_iter(req, node, source)
+ client_chunks = list(app_iter)
+ self.assertEqual(client_chunks, [
+ 'abcd1234', 'abcd1234', 'abcd1234', 'abcd12'])
+
+ def test_client_chunk_size_resuming(self):
+
+ class TestSource(object):
+ def __init__(self, chunks):
+ self.chunks = list(chunks)
+
+ def read(self, _read_size):
+ if self.chunks:
+ chunk = self.chunks.pop(0)
+ if chunk is None:
+ raise exceptions.ChunkReadTimeout()
+ else:
+ return chunk
+ else:
+ return ''
+
+ node = {'ip': '1.2.3.4', 'port': 6000, 'device': 'sda'}
+
+ source1 = TestSource(['abcd', '1234', 'abc', None])
+ source2 = TestSource(['efgh5678'])
+ req = Request.blank('/v1/a/c/o')
+ handler = GetOrHeadHandler(
+ self.app, req, 'Object', None, None, None, {},
+ client_chunk_size=8)
+
+ app_iter = handler._make_app_iter(req, node, source1)
+ with patch.object(handler, '_get_source_and_node',
+ lambda: (source2, node)):
+ client_chunks = list(app_iter)
+ self.assertEqual(client_chunks, ['abcd1234', 'efgh5678'])
+ self.assertEqual(handler.backend_headers['Range'], 'bytes=8-')
+
+ def test_bytes_to_skip(self):
+ # if you start at the beginning, skip nothing
+ self.assertEqual(bytes_to_skip(1024, 0), 0)
+
+ # missed the first 10 bytes, so we've got 1014 bytes of partial
+ # record
+ self.assertEqual(bytes_to_skip(1024, 10), 1014)
+
+ # skipped some whole records first
+ self.assertEqual(bytes_to_skip(1024, 4106), 1014)
+
+ # landed on a record boundary
+ self.assertEqual(bytes_to_skip(1024, 1024), 0)
+ self.assertEqual(bytes_to_skip(1024, 2048), 0)
+
+ # big numbers
+ self.assertEqual(bytes_to_skip(2 ** 20, 2 ** 32), 0)
+ self.assertEqual(bytes_to_skip(2 ** 20, 2 ** 32 + 1), 2 ** 20 - 1)
+ self.assertEqual(bytes_to_skip(2 ** 20, 2 ** 32 + 2 ** 19), 2 ** 19)
+
+ # odd numbers
+ self.assertEqual(bytes_to_skip(123, 0), 0)
+ self.assertEqual(bytes_to_skip(123, 23), 100)
+ self.assertEqual(bytes_to_skip(123, 247), 122)
+
+ # prime numbers
+ self.assertEqual(bytes_to_skip(11, 7), 4)
+ self.assertEqual(bytes_to_skip(97, 7873823), 55)
diff --git a/test/unit/proxy/controllers/test_container.py b/test/unit/proxy/controllers/test_container.py
index 7c8ecf7..715cd94 100644
--- a/test/unit/proxy/controllers/test_container.py
+++ b/test/unit/proxy/controllers/test_container.py
@@ -16,19 +16,61 @@
import mock
import unittest
+from eventlet import Timeout
+
from swift.common.swob import Request
from swift.proxy import server as proxy_server
from swift.proxy.controllers.base import headers_to_container_info
from test.unit import fake_http_connect, FakeRing, FakeMemcache
+from swift.common.storage_policy import StoragePolicy
from swift.common.request_helpers import get_sys_meta_prefix
+from swift.common import utils
+
+from test.unit import patch_policies, mocked_http_conn, debug_logger
+from test.unit.proxy.test_server import node_error_count
+@patch_policies([StoragePolicy(0, 'zero', True, object_ring=FakeRing())])
class TestContainerController(unittest.TestCase):
def setUp(self):
+ # SOF
+ self._orig_hash_suffix = utils.HASH_PATH_SUFFIX
+ self._orig_hash_prefix = utils.HASH_PATH_PREFIX
+ utils.HASH_PATH_SUFFIX = 'endcap'
+ utils.HASH_PATH_PREFIX = ''
+ self.logger = debug_logger()
+ self.container_ring = FakeRing(max_more_nodes=9)
self.app = proxy_server.Application(None, FakeMemcache(),
+ logger=self.logger,
account_ring=FakeRing(),
- container_ring=FakeRing(),
- object_ring=FakeRing())
+ container_ring=self.container_ring)
+
+ self.account_info = {
+ 'status': 200,
+ 'container_count': '10',
+ 'total_object_count': '100',
+ 'bytes': '1000',
+ 'meta': {},
+ 'sysmeta': {},
+ }
+
+ class FakeAccountInfoContainerController(
+ proxy_server.ContainerController):
+
+ def account_info(controller, *args, **kwargs):
+ patch_path = 'swift.proxy.controllers.base.get_info'
+ with mock.patch(patch_path) as mock_get_info:
+ mock_get_info.return_value = dict(self.account_info)
+ return super(FakeAccountInfoContainerController,
+ controller).account_info(
+ *args, **kwargs)
+ _orig_get_controller = self.app.get_controller
+
+ def wrapped_get_controller(*args, **kwargs):
+ with mock.patch('swift.proxy.server.ContainerController',
+ new=FakeAccountInfoContainerController):
+ return _orig_get_controller(*args, **kwargs)
+ self.app.get_controller = wrapped_get_controller
def test_container_info_in_response_env(self):
controller = proxy_server.ContainerController(self.app, 'a', 'c')
@@ -118,6 +160,53 @@ class TestContainerController(unittest.TestCase):
self.assertEqual(context['headers'][user_meta_key], 'bar')
self.assertNotEqual(context['headers']['x-timestamp'], '1.0')
+ def test_node_errors(self):
+ self.app.sort_nodes = lambda n: n
+
+ for method in ('PUT', 'DELETE', 'POST'):
+ def test_status_map(statuses, expected):
+ self.app._error_limiting = {}
+ req = Request.blank('/v1/a/c', method=method)
+ with mocked_http_conn(*statuses) as fake_conn:
+ print 'a' * 50
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, expected)
+ for req in fake_conn.requests:
+ self.assertEqual(req['method'], method)
+ self.assert_(req['path'].endswith('/a/c'))
+
+ base_status = [201] * 3
+ # test happy path
+ test_status_map(list(base_status), 201)
+ for i in range(3):
+ self.assertEqual(node_error_count(
+ self.app, self.container_ring.devs[i]), 0)
+ # single node errors and test isolation
+ for i in range(3):
+ status_list = list(base_status)
+ status_list[i] = 503
+ status_list.append(201)
+ test_status_map(status_list, 201)
+ for j in range(3):
+ expected = 1 if j == i else 0
+ self.assertEqual(node_error_count(
+ self.app, self.container_ring.devs[j]), expected)
+ # timeout
+ test_status_map((201, Timeout(), 201, 201), 201)
+ self.assertEqual(node_error_count(
+ self.app, self.container_ring.devs[1]), 1)
+
+ # exception
+ test_status_map((Exception('kaboom!'), 201, 201, 201), 201)
+ self.assertEqual(node_error_count(
+ self.app, self.container_ring.devs[0]), 1)
+
+ # insufficient storage
+ test_status_map((201, 201, 507, 201), 201)
+ self.assertEqual(node_error_count(
+ self.app, self.container_ring.devs[2]),
+ self.app.error_suppression_limit + 1)
+
if __name__ == '__main__':
unittest.main()
diff --git a/test/unit/proxy/controllers/test_obj.py b/test/unit/proxy/controllers/test_obj.py
index 4942691..a38e753 100755
--- a/test/unit/proxy/controllers/test_obj.py
+++ b/test/unit/proxy/controllers/test_obj.py
@@ -14,185 +14,1397 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+import email.parser
+import itertools
+import random
+import time
import unittest
+from collections import defaultdict
from contextlib import contextmanager
+import json
+from hashlib import md5
import mock
+from eventlet import Timeout
import swift
+from swift.common import utils, swob
from swift.proxy import server as proxy_server
-from swift.common.swob import HTTPException
-from test.unit import FakeRing, FakeMemcache, fake_http_connect, debug_logger
+from swift.proxy.controllers import obj
+from swift.proxy.controllers.base import get_info as _real_get_info
+from swift.common.storage_policy import POLICIES, ECDriverError
+
+from test.unit import FakeRing, FakeMemcache, fake_http_connect, \
+ debug_logger, patch_policies, SlowBody
+from test.unit.proxy.test_server import node_error_count
+
+
+def unchunk_body(chunked_body):
+ body = ''
+ remaining = chunked_body
+ while remaining:
+ hex_length, remaining = remaining.split('\r\n', 1)
+ length = int(hex_length, 16)
+ body += remaining[:length]
+ remaining = remaining[length + 2:]
+ return body
@contextmanager
def set_http_connect(*args, **kwargs):
old_connect = swift.proxy.controllers.base.http_connect
new_connect = fake_http_connect(*args, **kwargs)
- swift.proxy.controllers.base.http_connect = new_connect
- swift.proxy.controllers.obj.http_connect = new_connect
- swift.proxy.controllers.account.http_connect = new_connect
- swift.proxy.controllers.container.http_connect = new_connect
- yield new_connect
- swift.proxy.controllers.base.http_connect = old_connect
- swift.proxy.controllers.obj.http_connect = old_connect
- swift.proxy.controllers.account.http_connect = old_connect
- swift.proxy.controllers.container.http_connect = old_connect
-
-
-class TestObjControllerWriteAffinity(unittest.TestCase):
+ try:
+ swift.proxy.controllers.base.http_connect = new_connect
+ swift.proxy.controllers.obj.http_connect = new_connect
+ swift.proxy.controllers.account.http_connect = new_connect
+ swift.proxy.controllers.container.http_connect = new_connect
+ yield new_connect
+ left_over_status = list(new_connect.code_iter)
+ if left_over_status:
+ raise AssertionError('left over status %r' % left_over_status)
+ finally:
+ swift.proxy.controllers.base.http_connect = old_connect
+ swift.proxy.controllers.obj.http_connect = old_connect
+ swift.proxy.controllers.account.http_connect = old_connect
+ swift.proxy.controllers.container.http_connect = old_connect
+
+
+class PatchedObjControllerApp(proxy_server.Application):
+ """
+ This patch is just a hook over the proxy server's __call__ to ensure
+ that calls to get_info will return the stubbed value for
+ container_info if it's a container info call.
+ """
+
+ container_info = {}
+ per_container_info = {}
+
+ def __call__(self, *args, **kwargs):
+
+ def _fake_get_info(app, env, account, container=None, **kwargs):
+ if container:
+ if container in self.per_container_info:
+ return self.per_container_info[container]
+ return self.container_info
+ else:
+ return _real_get_info(app, env, account, container, **kwargs)
+
+ mock_path = 'swift.proxy.controllers.base.get_info'
+ with mock.patch(mock_path, new=_fake_get_info):
+ return super(
+ PatchedObjControllerApp, self).__call__(*args, **kwargs)
+
+
+class BaseObjectControllerMixin(object):
+ container_info = {
+ 'write_acl': None,
+ 'read_acl': None,
+ 'storage_policy': None,
+ 'sync_key': None,
+ 'versions': None,
+ }
+
+ # this needs to be set on the test case
+ controller_cls = None
+
def setUp(self):
- self.app = proxy_server.Application(
+ # setup fake rings with handoffs
+ for policy in POLICIES:
+ policy.object_ring.max_more_nodes = policy.object_ring.replicas
+
+ self.logger = debug_logger('proxy-server')
+ self.logger.thread_locals = ('txn1', '127.0.0.2')
+ self.app = PatchedObjControllerApp(
None, FakeMemcache(), account_ring=FakeRing(),
- container_ring=FakeRing(), object_ring=FakeRing(max_more_nodes=9))
- self.app.request_node_count = lambda replicas: 10000000
- self.app.sort_nodes = lambda l: l # stop shuffling the primary nodes
+ container_ring=FakeRing(), logger=self.logger)
+ # you can over-ride the container_info just by setting it on the app
+ self.app.container_info = dict(self.container_info)
+ # default policy and ring references
+ self.policy = POLICIES.default
+ self.obj_ring = self.policy.object_ring
+ self._ts_iter = (utils.Timestamp(t) for t in
+ itertools.count(int(time.time())))
+
+ def ts(self):
+ return self._ts_iter.next()
+
+ def replicas(self, policy=None):
+ policy = policy or POLICIES.default
+ return policy.object_ring.replicas
+
+ def quorum(self, policy=None):
+ policy = policy or POLICIES.default
+ return policy.quorum
def test_iter_nodes_local_first_noops_when_no_affinity(self):
- controller = proxy_server.ObjectController(self.app, 'a', 'c', 'o')
+ # this test needs a stable node order - most don't
+ self.app.sort_nodes = lambda l: l
+ controller = self.controller_cls(
+ self.app, 'a', 'c', 'o')
self.app.write_affinity_is_local_fn = None
-
- all_nodes = self.app.object_ring.get_part_nodes(1)
- all_nodes.extend(self.app.object_ring.get_more_nodes(1))
+ object_ring = self.app.get_object_ring(None)
+ all_nodes = object_ring.get_part_nodes(1)
+ all_nodes.extend(object_ring.get_more_nodes(1))
local_first_nodes = list(controller.iter_nodes_local_first(
- self.app.object_ring, 1))
+ object_ring, 1))
self.maxDiff = None
self.assertEqual(all_nodes, local_first_nodes)
def test_iter_nodes_local_first_moves_locals_first(self):
- controller = proxy_server.ObjectController(self.app, 'a', 'c', 'o')
+ controller = self.controller_cls(
+ self.app, 'a', 'c', 'o')
self.app.write_affinity_is_local_fn = (
lambda node: node['region'] == 1)
- self.app.write_affinity_node_count = lambda ring: 4
+ # we'll write to one more than replica count local nodes
+ self.app.write_affinity_node_count = lambda r: r + 1
+
+ object_ring = self.app.get_object_ring(None)
+ # make our fake ring have plenty of nodes, and not get limited
+ # artificially by the proxy max request node count
+ object_ring.max_more_nodes = 100000
+ self.app.request_node_count = lambda r: 100000
- all_nodes = self.app.object_ring.get_part_nodes(1)
- all_nodes.extend(self.app.object_ring.get_more_nodes(1))
+ all_nodes = object_ring.get_part_nodes(1)
+ all_nodes.extend(object_ring.get_more_nodes(1))
+ # i guess fake_ring wants the get_more_nodes iter to more safely be
+ # converted to a list with a smallish sort of limit which *can* be
+ # lower than max_more_nodes
+ fake_rings_real_max_more_nodes_value = object_ring.replicas ** 2
+ self.assertEqual(len(all_nodes), fake_rings_real_max_more_nodes_value)
+
+ # make sure we have enough local nodes (sanity)
+ all_local_nodes = [n for n in all_nodes if
+ self.app.write_affinity_is_local_fn(n)]
+ self.assertTrue(len(all_local_nodes) >= self.replicas() + 1)
+
+ # finally, create the local_first_nodes iter and flatten it out
local_first_nodes = list(controller.iter_nodes_local_first(
- self.app.object_ring, 1))
+ object_ring, 1))
# the local nodes move up in the ordering
- self.assertEqual([1, 1, 1, 1],
- [node['region'] for node in local_first_nodes[:4]])
+ self.assertEqual([1] * (self.replicas() + 1), [
+ node['region'] for node in local_first_nodes[
+ :self.replicas() + 1]])
# we don't skip any nodes
+ self.assertEqual(len(all_nodes), len(local_first_nodes))
+ self.assertEqual(sorted(all_nodes), sorted(local_first_nodes))
+
+ def test_iter_nodes_local_first_best_effort(self):
+ controller = self.controller_cls(
+ self.app, 'a', 'c', 'o')
+ self.app.write_affinity_is_local_fn = (
+ lambda node: node['region'] == 1)
+
+ object_ring = self.app.get_object_ring(None)
+ all_nodes = object_ring.get_part_nodes(1)
+ all_nodes.extend(object_ring.get_more_nodes(1))
+
+ local_first_nodes = list(controller.iter_nodes_local_first(
+ object_ring, 1))
+
+ # we won't have quite enough local nodes...
+ self.assertEqual(len(all_nodes), self.replicas() +
+ POLICIES.default.object_ring.max_more_nodes)
+ all_local_nodes = [n for n in all_nodes if
+ self.app.write_affinity_is_local_fn(n)]
+ self.assertEqual(len(all_local_nodes), self.replicas())
+ # but the local nodes we do have are at the front of the local iter
+ first_n_local_first_nodes = local_first_nodes[:len(all_local_nodes)]
+ self.assertEqual(sorted(all_local_nodes),
+ sorted(first_n_local_first_nodes))
+ # but we *still* don't *skip* any nodes
+ self.assertEqual(len(all_nodes), len(local_first_nodes))
self.assertEqual(sorted(all_nodes), sorted(local_first_nodes))
def test_connect_put_node_timeout(self):
- controller = proxy_server.ObjectController(self.app, 'a', 'c', 'o')
- self.app.conn_timeout = 0.1
- with set_http_connect(200, slow_connect=True):
+ controller = self.controller_cls(
+ self.app, 'a', 'c', 'o')
+ self.app.conn_timeout = 0.05
+ with set_http_connect(slow_connect=True):
nodes = [dict(ip='', port='', device='')]
res = controller._connect_put_node(nodes, '', '', {}, ('', ''))
self.assertTrue(res is None)
+ def test_DELETE_simple(self):
+ req = swift.common.swob.Request.blank('/v1/a/c/o', method='DELETE')
+ codes = [204] * self.replicas()
+ with set_http_connect(*codes):
+ resp = req.get_response(self.app)
+ self.assertEquals(resp.status_int, 204)
+
+ def test_DELETE_missing_one(self):
+ req = swift.common.swob.Request.blank('/v1/a/c/o', method='DELETE')
+ codes = [404] + [204] * (self.replicas() - 1)
+ random.shuffle(codes)
+ with set_http_connect(*codes):
+ resp = req.get_response(self.app)
+ self.assertEquals(resp.status_int, 204)
-class TestObjController(unittest.TestCase):
- def setUp(self):
- logger = debug_logger('proxy-server')
- logger.thread_locals = ('txn1', '127.0.0.2')
- self.app = proxy_server.Application(
- None, FakeMemcache(), account_ring=FakeRing(),
- container_ring=FakeRing(), object_ring=FakeRing(),
- logger=logger)
- self.controller = proxy_server.ObjectController(self.app,
- 'a', 'c', 'o')
- self.controller.container_info = mock.MagicMock(return_value={
- 'partition': 1,
- 'nodes': [
- {'ip': '127.0.0.1', 'port': '1', 'device': 'sda'},
- {'ip': '127.0.0.1', 'port': '2', 'device': 'sda'},
- {'ip': '127.0.0.1', 'port': '3', 'device': 'sda'},
- ],
- 'write_acl': None,
- 'read_acl': None,
- 'sync_key': None,
- 'versions': None})
+ def test_DELETE_not_found(self):
+ req = swift.common.swob.Request.blank('/v1/a/c/o', method='DELETE')
+ codes = [404] * (self.replicas() - 1) + [204]
+ with set_http_connect(*codes):
+ resp = req.get_response(self.app)
+ self.assertEquals(resp.status_int, 404)
+
+ def test_DELETE_mostly_found(self):
+ req = swift.common.swob.Request.blank('/v1/a/c/o', method='DELETE')
+ mostly_204s = [204] * self.quorum()
+ codes = mostly_204s + [404] * (self.replicas() - len(mostly_204s))
+ self.assertEqual(len(codes), self.replicas())
+ with set_http_connect(*codes):
+ resp = req.get_response(self.app)
+ self.assertEquals(resp.status_int, 204)
+
+ def test_DELETE_mostly_not_found(self):
+ req = swift.common.swob.Request.blank('/v1/a/c/o', method='DELETE')
+ mostly_404s = [404] * self.quorum()
+ codes = mostly_404s + [204] * (self.replicas() - len(mostly_404s))
+ self.assertEqual(len(codes), self.replicas())
+ with set_http_connect(*codes):
+ resp = req.get_response(self.app)
+ self.assertEquals(resp.status_int, 404)
+
+ def test_DELETE_half_not_found_statuses(self):
+ self.obj_ring.set_replicas(4)
+
+ req = swift.common.swob.Request.blank('/v1/a/c/o', method='DELETE')
+ with set_http_connect(404, 204, 404, 204):
+ resp = req.get_response(self.app)
+ self.assertEquals(resp.status_int, 204)
+
+ def test_DELETE_half_not_found_headers_and_body(self):
+ # Transformed responses have bogus bodies and headers, so make sure we
+ # send the client headers and body from a real node's response.
+ self.obj_ring.set_replicas(4)
+
+ status_codes = (404, 404, 204, 204)
+ bodies = ('not found', 'not found', '', '')
+ headers = [{}, {}, {'Pick-Me': 'yes'}, {'Pick-Me': 'yes'}]
+
+ req = swift.common.swob.Request.blank('/v1/a/c/o', method='DELETE')
+ with set_http_connect(*status_codes, body_iter=bodies,
+ headers=headers):
+ resp = req.get_response(self.app)
+ self.assertEquals(resp.status_int, 204)
+ self.assertEquals(resp.headers.get('Pick-Me'), 'yes')
+ self.assertEquals(resp.body, '')
+
+ def test_DELETE_handoff(self):
+ req = swift.common.swob.Request.blank('/v1/a/c/o', method='DELETE')
+ codes = [204] * self.replicas()
+ with set_http_connect(507, *codes):
+ resp = req.get_response(self.app)
+ self.assertEquals(resp.status_int, 204)
+
+ def test_POST_non_int_delete_after(self):
+ t = str(int(time.time() + 100)) + '.1'
+ req = swob.Request.blank('/v1/a/c/o', method='POST',
+ headers={'Content-Type': 'foo/bar',
+ 'X-Delete-After': t})
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 400)
+ self.assertEqual('Non-integer X-Delete-After', resp.body)
+
+ def test_PUT_non_int_delete_after(self):
+ t = str(int(time.time() + 100)) + '.1'
+ req = swob.Request.blank('/v1/a/c/o', method='PUT', body='',
+ headers={'Content-Type': 'foo/bar',
+ 'X-Delete-After': t})
+ with set_http_connect():
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 400)
+ self.assertEqual('Non-integer X-Delete-After', resp.body)
+
+ def test_POST_negative_delete_after(self):
+ req = swob.Request.blank('/v1/a/c/o', method='POST',
+ headers={'Content-Type': 'foo/bar',
+ 'X-Delete-After': '-60'})
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 400)
+ self.assertEqual('X-Delete-After in past', resp.body)
+
+ def test_PUT_negative_delete_after(self):
+ req = swob.Request.blank('/v1/a/c/o', method='PUT', body='',
+ headers={'Content-Type': 'foo/bar',
+ 'X-Delete-After': '-60'})
+ with set_http_connect():
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 400)
+ self.assertEqual('X-Delete-After in past', resp.body)
+
+ def test_POST_delete_at_non_integer(self):
+ t = str(int(time.time() + 100)) + '.1'
+ req = swob.Request.blank('/v1/a/c/o', method='POST',
+ headers={'Content-Type': 'foo/bar',
+ 'X-Delete-At': t})
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 400)
+ self.assertEqual('Non-integer X-Delete-At', resp.body)
+
+ def test_PUT_delete_at_non_integer(self):
+ t = str(int(time.time() - 100)) + '.1'
+ req = swob.Request.blank('/v1/a/c/o', method='PUT', body='',
+ headers={'Content-Type': 'foo/bar',
+ 'X-Delete-At': t})
+ with set_http_connect():
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 400)
+ self.assertEqual('Non-integer X-Delete-At', resp.body)
+
+ def test_POST_delete_at_in_past(self):
+ t = str(int(time.time() - 100))
+ req = swob.Request.blank('/v1/a/c/o', method='POST',
+ headers={'Content-Type': 'foo/bar',
+ 'X-Delete-At': t})
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 400)
+ self.assertEqual('X-Delete-At in past', resp.body)
+
+ def test_PUT_delete_at_in_past(self):
+ t = str(int(time.time() - 100))
+ req = swob.Request.blank('/v1/a/c/o', method='PUT', body='',
+ headers={'Content-Type': 'foo/bar',
+ 'X-Delete-At': t})
+ with set_http_connect():
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 400)
+ self.assertEqual('X-Delete-At in past', resp.body)
+
+ def test_HEAD_simple(self):
+ req = swift.common.swob.Request.blank('/v1/a/c/o', method='HEAD')
+ with set_http_connect(200):
+ resp = req.get_response(self.app)
+ self.assertEquals(resp.status_int, 200)
+
+ def test_HEAD_x_newest(self):
+ req = swift.common.swob.Request.blank('/v1/a/c/o', method='HEAD',
+ headers={'X-Newest': 'true'})
+ with set_http_connect(200, 200, 200):
+ resp = req.get_response(self.app)
+ self.assertEquals(resp.status_int, 200)
+
+ def test_HEAD_x_newest_different_timestamps(self):
+ req = swob.Request.blank('/v1/a/c/o', method='HEAD',
+ headers={'X-Newest': 'true'})
+ ts = (utils.Timestamp(t) for t in itertools.count(int(time.time())))
+ timestamps = [next(ts) for i in range(3)]
+ newest_timestamp = timestamps[-1]
+ random.shuffle(timestamps)
+ backend_response_headers = [{
+ 'X-Backend-Timestamp': t.internal,
+ 'X-Timestamp': t.normal
+ } for t in timestamps]
+ with set_http_connect(200, 200, 200,
+ headers=backend_response_headers):
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 200)
+ self.assertEqual(resp.headers['x-timestamp'], newest_timestamp.normal)
+
+ def test_HEAD_x_newest_with_two_vector_timestamps(self):
+ req = swob.Request.blank('/v1/a/c/o', method='HEAD',
+ headers={'X-Newest': 'true'})
+ ts = (utils.Timestamp(time.time(), offset=offset)
+ for offset in itertools.count())
+ timestamps = [next(ts) for i in range(3)]
+ newest_timestamp = timestamps[-1]
+ random.shuffle(timestamps)
+ backend_response_headers = [{
+ 'X-Backend-Timestamp': t.internal,
+ 'X-Timestamp': t.normal
+ } for t in timestamps]
+ with set_http_connect(200, 200, 200,
+ headers=backend_response_headers):
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 200)
+ self.assertEqual(resp.headers['x-backend-timestamp'],
+ newest_timestamp.internal)
+
+ def test_HEAD_x_newest_with_some_missing(self):
+ req = swob.Request.blank('/v1/a/c/o', method='HEAD',
+ headers={'X-Newest': 'true'})
+ ts = (utils.Timestamp(t) for t in itertools.count(int(time.time())))
+ request_count = self.app.request_node_count(self.obj_ring.replicas)
+ backend_response_headers = [{
+ 'x-timestamp': next(ts).normal,
+ } for i in range(request_count)]
+ responses = [404] * (request_count - 1)
+ responses.append(200)
+ request_log = []
+
+ def capture_requests(ip, port, device, part, method, path,
+ headers=None, **kwargs):
+ req = {
+ 'ip': ip,
+ 'port': port,
+ 'device': device,
+ 'part': part,
+ 'method': method,
+ 'path': path,
+ 'headers': headers,
+ }
+ request_log.append(req)
+ with set_http_connect(*responses,
+ headers=backend_response_headers,
+ give_connect=capture_requests):
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 200)
+ for req in request_log:
+ self.assertEqual(req['method'], 'HEAD')
+ self.assertEqual(req['path'], '/a/c/o')
+
+ def test_container_sync_delete(self):
+ ts = (utils.Timestamp(t) for t in itertools.count(int(time.time())))
+ test_indexes = [None] + [int(p) for p in POLICIES]
+ for policy_index in test_indexes:
+ req = swob.Request.blank(
+ '/v1/a/c/o', method='DELETE', headers={
+ 'X-Timestamp': ts.next().internal})
+ codes = [409] * self.obj_ring.replicas
+ ts_iter = itertools.repeat(ts.next().internal)
+ with set_http_connect(*codes, timestamps=ts_iter):
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 409)
+
+ def test_PUT_requires_length(self):
+ req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT')
+ resp = req.get_response(self.app)
+ self.assertEquals(resp.status_int, 411)
+
+# end of BaseObjectControllerMixin
+
+
+@patch_policies()
+class TestReplicatedObjController(BaseObjectControllerMixin,
+ unittest.TestCase):
+
+ controller_cls = obj.ReplicatedObjectController
def test_PUT_simple(self):
- req = swift.common.swob.Request.blank('/v1/a/c/o')
+ req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT')
req.headers['content-length'] = '0'
with set_http_connect(201, 201, 201):
- resp = self.controller.PUT(req)
+ resp = req.get_response(self.app)
self.assertEquals(resp.status_int, 201)
def test_PUT_if_none_match(self):
- req = swift.common.swob.Request.blank('/v1/a/c/o')
+ req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT')
req.headers['if-none-match'] = '*'
req.headers['content-length'] = '0'
with set_http_connect(201, 201, 201):
- resp = self.controller.PUT(req)
+ resp = req.get_response(self.app)
self.assertEquals(resp.status_int, 201)
def test_PUT_if_none_match_denied(self):
- req = swift.common.swob.Request.blank('/v1/a/c/o')
+ req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT')
req.headers['if-none-match'] = '*'
req.headers['content-length'] = '0'
- with set_http_connect(201, (412, 412), 201):
- resp = self.controller.PUT(req)
+ with set_http_connect(201, 412, 201):
+ resp = req.get_response(self.app)
self.assertEquals(resp.status_int, 412)
def test_PUT_if_none_match_not_star(self):
- req = swift.common.swob.Request.blank('/v1/a/c/o')
+ req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT')
req.headers['if-none-match'] = 'somethingelse'
req.headers['content-length'] = '0'
- with set_http_connect(201, 201, 201):
- resp = self.controller.PUT(req)
+ with set_http_connect():
+ resp = req.get_response(self.app)
self.assertEquals(resp.status_int, 400)
+ def test_PUT_connect_exceptions(self):
+ object_ring = self.app.get_object_ring(None)
+ self.app.sort_nodes = lambda n: n # disable shuffle
+
+ def test_status_map(statuses, expected):
+ self.app._error_limiting = {}
+ req = swob.Request.blank('/v1/a/c/o.jpg', method='PUT',
+ body='test body')
+ with set_http_connect(*statuses):
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, expected)
+
+ base_status = [201] * 3
+ # test happy path
+ test_status_map(list(base_status), 201)
+ for i in range(3):
+ self.assertEqual(node_error_count(
+ self.app, object_ring.devs[i]), 0)
+ # single node errors and test isolation
+ for i in range(3):
+ status_list = list(base_status)
+ status_list[i] = 503
+ test_status_map(status_list, 201)
+ for j in range(3):
+ self.assertEqual(node_error_count(
+ self.app, object_ring.devs[j]), 1 if j == i else 0)
+ # connect errors
+ test_status_map((201, Timeout(), 201, 201), 201)
+ self.assertEqual(node_error_count(
+ self.app, object_ring.devs[1]), 1)
+ test_status_map((Exception('kaboom!'), 201, 201, 201), 201)
+ self.assertEqual(node_error_count(
+ self.app, object_ring.devs[0]), 1)
+ # expect errors
+ test_status_map((201, 201, (503, None), 201), 201)
+ self.assertEqual(node_error_count(
+ self.app, object_ring.devs[2]), 1)
+ test_status_map(((507, None), 201, 201, 201), 201)
+ self.assertEqual(
+ node_error_count(self.app, object_ring.devs[0]),
+ self.app.error_suppression_limit + 1)
+ # response errors
+ test_status_map(((100, Timeout()), 201, 201), 201)
+ self.assertEqual(
+ node_error_count(self.app, object_ring.devs[0]), 1)
+ test_status_map((201, 201, (100, Exception())), 201)
+ self.assertEqual(
+ node_error_count(self.app, object_ring.devs[2]), 1)
+ test_status_map((201, (100, 507), 201), 201)
+ self.assertEqual(
+ node_error_count(self.app, object_ring.devs[1]),
+ self.app.error_suppression_limit + 1)
+
def test_GET_simple(self):
req = swift.common.swob.Request.blank('/v1/a/c/o')
with set_http_connect(200):
- resp = self.controller.GET(req)
+ resp = req.get_response(self.app)
self.assertEquals(resp.status_int, 200)
- def test_DELETE_simple(self):
+ def test_GET_error(self):
req = swift.common.swob.Request.blank('/v1/a/c/o')
- with set_http_connect(204, 204, 204):
- resp = self.controller.DELETE(req)
- self.assertEquals(resp.status_int, 204)
+ with set_http_connect(503, 200):
+ resp = req.get_response(self.app)
+ self.assertEquals(resp.status_int, 200)
+
+ def test_GET_handoff(self):
+ req = swift.common.swob.Request.blank('/v1/a/c/o')
+ codes = [503] * self.obj_ring.replicas + [200]
+ with set_http_connect(*codes):
+ resp = req.get_response(self.app)
+ self.assertEquals(resp.status_int, 200)
- def test_POST_simple(self):
+ def test_GET_not_found(self):
req = swift.common.swob.Request.blank('/v1/a/c/o')
- with set_http_connect(200, 200, 200, 201, 201, 201):
- resp = self.controller.POST(req)
+ codes = [404] * (self.obj_ring.replicas +
+ self.obj_ring.max_more_nodes)
+ with set_http_connect(*codes):
+ resp = req.get_response(self.app)
+ self.assertEquals(resp.status_int, 404)
+
+ def test_POST_as_COPY_simple(self):
+ req = swift.common.swob.Request.blank('/v1/a/c/o', method='POST')
+ head_resp = [200] * self.obj_ring.replicas + \
+ [404] * self.obj_ring.max_more_nodes
+ put_resp = [201] * self.obj_ring.replicas
+ codes = head_resp + put_resp
+ with set_http_connect(*codes):
+ resp = req.get_response(self.app)
self.assertEquals(resp.status_int, 202)
+ def test_POST_delete_at(self):
+ t = str(int(time.time() + 100))
+ req = swob.Request.blank('/v1/a/c/o', method='POST',
+ headers={'Content-Type': 'foo/bar',
+ 'X-Delete-At': t})
+ post_headers = []
+
+ def capture_headers(ip, port, device, part, method, path, headers,
+ **kwargs):
+ if method == 'POST':
+ post_headers.append(headers)
+ x_newest_responses = [200] * self.obj_ring.replicas + \
+ [404] * self.obj_ring.max_more_nodes
+ post_resp = [200] * self.obj_ring.replicas
+ codes = x_newest_responses + post_resp
+ with set_http_connect(*codes, give_connect=capture_headers):
+ resp = req.get_response(self.app)
+ self.assertEquals(resp.status_int, 200)
+ for given_headers in post_headers:
+ self.assertEquals(given_headers.get('X-Delete-At'), t)
+ self.assertTrue('X-Delete-At-Host' in given_headers)
+ self.assertTrue('X-Delete-At-Device' in given_headers)
+ self.assertTrue('X-Delete-At-Partition' in given_headers)
+ self.assertTrue('X-Delete-At-Container' in given_headers)
+
+ def test_PUT_delete_at(self):
+ t = str(int(time.time() + 100))
+ req = swob.Request.blank('/v1/a/c/o', method='PUT', body='',
+ headers={'Content-Type': 'foo/bar',
+ 'X-Delete-At': t})
+ put_headers = []
+
+ def capture_headers(ip, port, device, part, method, path, headers,
+ **kwargs):
+ if method == 'PUT':
+ put_headers.append(headers)
+ codes = [201] * self.obj_ring.replicas
+ with set_http_connect(*codes, give_connect=capture_headers):
+ resp = req.get_response(self.app)
+ self.assertEquals(resp.status_int, 201)
+ for given_headers in put_headers:
+ self.assertEquals(given_headers.get('X-Delete-At'), t)
+ self.assertTrue('X-Delete-At-Host' in given_headers)
+ self.assertTrue('X-Delete-At-Device' in given_headers)
+ self.assertTrue('X-Delete-At-Partition' in given_headers)
+ self.assertTrue('X-Delete-At-Container' in given_headers)
+
+ def test_PUT_converts_delete_after_to_delete_at(self):
+ req = swob.Request.blank('/v1/a/c/o', method='PUT', body='',
+ headers={'Content-Type': 'foo/bar',
+ 'X-Delete-After': '60'})
+ put_headers = []
+
+ def capture_headers(ip, port, device, part, method, path, headers,
+ **kwargs):
+ if method == 'PUT':
+ put_headers.append(headers)
+ codes = [201] * self.obj_ring.replicas
+ t = time.time()
+ with set_http_connect(*codes, give_connect=capture_headers):
+ with mock.patch('time.time', lambda: t):
+ resp = req.get_response(self.app)
+ self.assertEquals(resp.status_int, 201)
+ expected_delete_at = str(int(t) + 60)
+ for given_headers in put_headers:
+ self.assertEquals(given_headers.get('X-Delete-At'),
+ expected_delete_at)
+ self.assertTrue('X-Delete-At-Host' in given_headers)
+ self.assertTrue('X-Delete-At-Device' in given_headers)
+ self.assertTrue('X-Delete-At-Partition' in given_headers)
+ self.assertTrue('X-Delete-At-Container' in given_headers)
+
+ def test_container_sync_put_x_timestamp_not_found(self):
+ test_indexes = [None] + [int(p) for p in POLICIES]
+ for policy_index in test_indexes:
+ self.app.container_info['storage_policy'] = policy_index
+ put_timestamp = utils.Timestamp(time.time()).normal
+ req = swob.Request.blank(
+ '/v1/a/c/o', method='PUT', headers={
+ 'Content-Length': 0,
+ 'X-Timestamp': put_timestamp})
+ codes = [201] * self.obj_ring.replicas
+ with set_http_connect(*codes):
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 201)
+
+ def test_container_sync_put_x_timestamp_match(self):
+ test_indexes = [None] + [int(p) for p in POLICIES]
+ for policy_index in test_indexes:
+ self.app.container_info['storage_policy'] = policy_index
+ put_timestamp = utils.Timestamp(time.time()).normal
+ req = swob.Request.blank(
+ '/v1/a/c/o', method='PUT', headers={
+ 'Content-Length': 0,
+ 'X-Timestamp': put_timestamp})
+ ts_iter = itertools.repeat(put_timestamp)
+ codes = [409] * self.obj_ring.replicas
+ with set_http_connect(*codes, timestamps=ts_iter):
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 202)
+
+ def test_container_sync_put_x_timestamp_older(self):
+ ts = (utils.Timestamp(t) for t in itertools.count(int(time.time())))
+ test_indexes = [None] + [int(p) for p in POLICIES]
+ for policy_index in test_indexes:
+ self.app.container_info['storage_policy'] = policy_index
+ req = swob.Request.blank(
+ '/v1/a/c/o', method='PUT', headers={
+ 'Content-Length': 0,
+ 'X-Timestamp': ts.next().internal})
+ ts_iter = itertools.repeat(ts.next().internal)
+ codes = [409] * self.obj_ring.replicas
+ with set_http_connect(*codes, timestamps=ts_iter):
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 202)
+
+ def test_container_sync_put_x_timestamp_newer(self):
+ ts = (utils.Timestamp(t) for t in itertools.count(int(time.time())))
+ test_indexes = [None] + [int(p) for p in POLICIES]
+ for policy_index in test_indexes:
+ orig_timestamp = ts.next().internal
+ req = swob.Request.blank(
+ '/v1/a/c/o', method='PUT', headers={
+ 'Content-Length': 0,
+ 'X-Timestamp': ts.next().internal})
+ ts_iter = itertools.repeat(orig_timestamp)
+ codes = [201] * self.obj_ring.replicas
+ with set_http_connect(*codes, timestamps=ts_iter):
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 201)
+
+ def test_put_x_timestamp_conflict(self):
+ ts = (utils.Timestamp(t) for t in itertools.count(int(time.time())))
+ req = swob.Request.blank(
+ '/v1/a/c/o', method='PUT', headers={
+ 'Content-Length': 0,
+ 'X-Timestamp': ts.next().internal})
+ ts_iter = iter([ts.next().internal, None, None])
+ codes = [409] + [201] * (self.obj_ring.replicas - 1)
+ with set_http_connect(*codes, timestamps=ts_iter):
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 202)
+
+ def test_container_sync_put_x_timestamp_race(self):
+ ts = (utils.Timestamp(t) for t in itertools.count(int(time.time())))
+ test_indexes = [None] + [int(p) for p in POLICIES]
+ for policy_index in test_indexes:
+ put_timestamp = ts.next().internal
+ req = swob.Request.blank(
+ '/v1/a/c/o', method='PUT', headers={
+ 'Content-Length': 0,
+ 'X-Timestamp': put_timestamp})
+
+ # object nodes they respond 409 because another in-flight request
+ # finished and now the on disk timestamp is equal to the request.
+ put_ts = [put_timestamp] * self.obj_ring.replicas
+ codes = [409] * self.obj_ring.replicas
+
+ ts_iter = iter(put_ts)
+ with set_http_connect(*codes, timestamps=ts_iter):
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 202)
+
+ def test_container_sync_put_x_timestamp_unsynced_race(self):
+ ts = (utils.Timestamp(t) for t in itertools.count(int(time.time())))
+ test_indexes = [None] + [int(p) for p in POLICIES]
+ for policy_index in test_indexes:
+ put_timestamp = ts.next().internal
+ req = swob.Request.blank(
+ '/v1/a/c/o', method='PUT', headers={
+ 'Content-Length': 0,
+ 'X-Timestamp': put_timestamp})
+
+ # only one in-flight request finished
+ put_ts = [None] * (self.obj_ring.replicas - 1)
+ put_resp = [201] * (self.obj_ring.replicas - 1)
+ put_ts += [put_timestamp]
+ put_resp += [409]
+
+ ts_iter = iter(put_ts)
+ codes = put_resp
+ with set_http_connect(*codes, timestamps=ts_iter):
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 202)
+
def test_COPY_simple(self):
+ req = swift.common.swob.Request.blank(
+ '/v1/a/c/o', method='COPY',
+ headers={'Content-Length': 0,
+ 'Destination': 'c/o-copy'})
+ head_resp = [200] * self.obj_ring.replicas + \
+ [404] * self.obj_ring.max_more_nodes
+ put_resp = [201] * self.obj_ring.replicas
+ codes = head_resp + put_resp
+ with set_http_connect(*codes):
+ resp = req.get_response(self.app)
+ self.assertEquals(resp.status_int, 201)
+
+ def test_PUT_log_info(self):
+ req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT')
+ req.headers['x-copy-from'] = 'some/where'
+ req.headers['Content-Length'] = 0
+ # override FakeConn default resp headers to keep log_info clean
+ resp_headers = {'x-delete-at': None}
+ head_resp = [200] * self.obj_ring.replicas + \
+ [404] * self.obj_ring.max_more_nodes
+ put_resp = [201] * self.obj_ring.replicas
+ codes = head_resp + put_resp
+ with set_http_connect(*codes, headers=resp_headers):
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 201)
+ self.assertEquals(
+ req.environ.get('swift.log_info'), ['x-copy-from:some/where'])
+ # and then check that we don't do that for originating POSTs
req = swift.common.swob.Request.blank('/v1/a/c/o')
- with set_http_connect(200, 200, 200, 201, 201, 201):
- resp = self.controller.POST(req)
- self.assertEquals(resp.status_int, 202)
+ req.method = 'POST'
+ req.headers['x-copy-from'] = 'else/where'
+ with set_http_connect(*codes, headers=resp_headers):
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 202)
+ self.assertEquals(req.environ.get('swift.log_info'), None)
- def test_HEAD_simple(self):
+
+@patch_policies(legacy_only=True)
+class TestObjControllerLegacyCache(TestReplicatedObjController):
+ """
+ This test pretends like memcache returned a stored value that should
+ resemble whatever "old" format. It catches KeyErrors you'd get if your
+ code was expecting some new format during a rolling upgrade.
+ """
+
+ # in this case policy_index is missing
+ container_info = {
+ 'read_acl': None,
+ 'write_acl': None,
+ 'sync_key': None,
+ 'versions': None,
+ }
+
+ def test_invalid_storage_policy_cache(self):
+ self.app.container_info['storage_policy'] = 1
+ for method in ('GET', 'HEAD', 'POST', 'PUT', 'COPY'):
+ req = swob.Request.blank('/v1/a/c/o', method=method)
+ with set_http_connect():
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 503)
+
+
+@patch_policies(with_ec_default=True)
+class TestECObjController(BaseObjectControllerMixin, unittest.TestCase):
+ container_info = {
+ 'read_acl': None,
+ 'write_acl': None,
+ 'sync_key': None,
+ 'versions': None,
+ 'storage_policy': '0',
+ }
+
+ controller_cls = obj.ECObjectController
+
+ def test_determine_chunk_destinations(self):
+ class FakePutter(object):
+ def __init__(self, index):
+ self.node_index = index
+
+ controller = self.controller_cls(
+ self.app, 'a', 'c', 'o')
+
+ # create a dummy list of putters, check no handoffs
+ putters = []
+ for index in range(0, 4):
+ putters.append(FakePutter(index))
+ got = controller._determine_chunk_destinations(putters)
+ expected = {}
+ for i, p in enumerate(putters):
+ expected[p] = i
+ self.assertEquals(got, expected)
+
+ # now lets make a handoff at the end
+ putters[3].node_index = None
+ got = controller._determine_chunk_destinations(putters)
+ self.assertEquals(got, expected)
+ putters[3].node_index = 3
+
+ # now lets make a handoff at the start
+ putters[0].node_index = None
+ got = controller._determine_chunk_destinations(putters)
+ self.assertEquals(got, expected)
+ putters[0].node_index = 0
+
+ # now lets make a handoff in the middle
+ putters[2].node_index = None
+ got = controller._determine_chunk_destinations(putters)
+ self.assertEquals(got, expected)
+ putters[2].node_index = 0
+
+ # now lets make all of them handoffs
+ for index in range(0, 4):
+ putters[index].node_index = None
+ got = controller._determine_chunk_destinations(putters)
+ self.assertEquals(got, expected)
+
+ def test_GET_simple(self):
req = swift.common.swob.Request.blank('/v1/a/c/o')
- with set_http_connect(200, 200, 200, 201, 201, 201):
- resp = self.controller.POST(req)
- self.assertEquals(resp.status_int, 202)
+ get_resp = [200] * self.policy.ec_ndata
+ with set_http_connect(*get_resp):
+ resp = req.get_response(self.app)
+ self.assertEquals(resp.status_int, 200)
- def test_PUT_log_info(self):
- # mock out enough to get to the area of the code we want to test
- with mock.patch('swift.proxy.controllers.obj.check_object_creation',
- mock.MagicMock(return_value=None)):
- req = swift.common.swob.Request.blank('/v1/a/c/o')
- req.headers['x-copy-from'] = 'somewhere'
- try:
- self.controller.PUT(req)
- except HTTPException:
- pass
- self.assertEquals(
- req.environ.get('swift.log_info'), ['x-copy-from:somewhere'])
- # and then check that we don't do that for originating POSTs
- req = swift.common.swob.Request.blank('/v1/a/c/o')
- req.method = 'POST'
- req.headers['x-copy-from'] = 'elsewhere'
- try:
- self.controller.PUT(req)
- except HTTPException:
- pass
- self.assertEquals(req.environ.get('swift.log_info'), None)
+ def test_GET_simple_x_newest(self):
+ req = swift.common.swob.Request.blank('/v1/a/c/o',
+ headers={'X-Newest': 'true'})
+ codes = [200] * self.replicas()
+ codes += [404] * self.obj_ring.max_more_nodes
+ with set_http_connect(*codes):
+ resp = req.get_response(self.app)
+ self.assertEquals(resp.status_int, 200)
+
+ def test_GET_error(self):
+ req = swift.common.swob.Request.blank('/v1/a/c/o')
+ get_resp = [503] + [200] * self.policy.ec_ndata
+ with set_http_connect(*get_resp):
+ resp = req.get_response(self.app)
+ self.assertEquals(resp.status_int, 200)
+
+ def test_GET_with_body(self):
+ req = swift.common.swob.Request.blank('/v1/a/c/o')
+ # turn a real body into fragments
+ segment_size = self.policy.ec_segment_size
+ real_body = ('asdf' * segment_size)[:-10]
+ # split it up into chunks
+ chunks = [real_body[x:x + segment_size]
+ for x in range(0, len(real_body), segment_size)]
+ fragment_payloads = []
+ for chunk in chunks:
+ fragments = self.policy.pyeclib_driver.encode(chunk)
+ if not fragments:
+ break
+ fragment_payloads.append(fragments)
+ # sanity
+ sanity_body = ''
+ for fragment_payload in fragment_payloads:
+ sanity_body += self.policy.pyeclib_driver.decode(
+ fragment_payload)
+ self.assertEqual(len(real_body), len(sanity_body))
+ self.assertEqual(real_body, sanity_body)
+
+ node_fragments = zip(*fragment_payloads)
+ self.assertEqual(len(node_fragments), self.replicas()) # sanity
+ responses = [(200, ''.join(node_fragments[i]), {})
+ for i in range(POLICIES.default.ec_ndata)]
+ status_codes, body_iter, headers = zip(*responses)
+ with set_http_connect(*status_codes, body_iter=body_iter,
+ headers=headers):
+ resp = req.get_response(self.app)
+ self.assertEquals(resp.status_int, 200)
+ self.assertEqual(len(real_body), len(resp.body))
+ self.assertEqual(real_body, resp.body)
+
+ def test_PUT_simple(self):
+ req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT',
+ body='')
+ codes = [201] * self.replicas()
+ expect_headers = {
+ 'X-Obj-Metadata-Footer': 'yes',
+ 'X-Obj-Multiphase-Commit': 'yes'
+ }
+ with set_http_connect(*codes, expect_headers=expect_headers):
+ resp = req.get_response(self.app)
+ self.assertEquals(resp.status_int, 201)
+
+ def test_PUT_with_explicit_commit_status(self):
+ req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT',
+ body='')
+ codes = [(100, 100, 201)] * self.replicas()
+ expect_headers = {
+ 'X-Obj-Metadata-Footer': 'yes',
+ 'X-Obj-Multiphase-Commit': 'yes'
+ }
+ with set_http_connect(*codes, expect_headers=expect_headers):
+ resp = req.get_response(self.app)
+ self.assertEquals(resp.status_int, 201)
+
+ def test_PUT_error(self):
+ req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT',
+ body='')
+ codes = [503] * self.replicas()
+ expect_headers = {
+ 'X-Obj-Metadata-Footer': 'yes',
+ 'X-Obj-Multiphase-Commit': 'yes'
+ }
+ with set_http_connect(*codes, expect_headers=expect_headers):
+ resp = req.get_response(self.app)
+ self.assertEquals(resp.status_int, 503)
+
+ def test_PUT_mostly_success(self):
+ req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT',
+ body='')
+ codes = [201] * self.quorum()
+ codes += [503] * (self.replicas() - len(codes))
+ random.shuffle(codes)
+ expect_headers = {
+ 'X-Obj-Metadata-Footer': 'yes',
+ 'X-Obj-Multiphase-Commit': 'yes'
+ }
+ with set_http_connect(*codes, expect_headers=expect_headers):
+ resp = req.get_response(self.app)
+ self.assertEquals(resp.status_int, 201)
+
+ def test_PUT_error_commit(self):
+ req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT',
+ body='')
+ codes = [(100, 503, Exception('not used'))] * self.replicas()
+ expect_headers = {
+ 'X-Obj-Metadata-Footer': 'yes',
+ 'X-Obj-Multiphase-Commit': 'yes'
+ }
+ with set_http_connect(*codes, expect_headers=expect_headers):
+ resp = req.get_response(self.app)
+ self.assertEquals(resp.status_int, 503)
+
+ def test_PUT_mostly_success_commit(self):
+ req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT',
+ body='')
+ codes = [201] * self.quorum()
+ codes += [(100, 503, Exception('not used'))] * (
+ self.replicas() - len(codes))
+ random.shuffle(codes)
+ expect_headers = {
+ 'X-Obj-Metadata-Footer': 'yes',
+ 'X-Obj-Multiphase-Commit': 'yes'
+ }
+ with set_http_connect(*codes, expect_headers=expect_headers):
+ resp = req.get_response(self.app)
+ self.assertEquals(resp.status_int, 201)
+
+ def test_PUT_mostly_error_commit(self):
+ req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT',
+ body='')
+ codes = [(100, 503, Exception('not used'))] * self.quorum()
+ codes += [201] * (self.replicas() - len(codes))
+ random.shuffle(codes)
+ expect_headers = {
+ 'X-Obj-Metadata-Footer': 'yes',
+ 'X-Obj-Multiphase-Commit': 'yes'
+ }
+ with set_http_connect(*codes, expect_headers=expect_headers):
+ resp = req.get_response(self.app)
+ self.assertEquals(resp.status_int, 503)
+
+ def test_PUT_commit_timeout(self):
+ req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT',
+ body='')
+ codes = [201] * (self.replicas() - 1)
+ codes.append((100, Timeout(), Exception('not used')))
+ expect_headers = {
+ 'X-Obj-Metadata-Footer': 'yes',
+ 'X-Obj-Multiphase-Commit': 'yes'
+ }
+ with set_http_connect(*codes, expect_headers=expect_headers):
+ resp = req.get_response(self.app)
+ self.assertEquals(resp.status_int, 201)
+
+ def test_PUT_commit_exception(self):
+ req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT',
+ body='')
+ codes = [201] * (self.replicas() - 1)
+ codes.append((100, Exception('kaboom!'), Exception('not used')))
+ expect_headers = {
+ 'X-Obj-Metadata-Footer': 'yes',
+ 'X-Obj-Multiphase-Commit': 'yes'
+ }
+ with set_http_connect(*codes, expect_headers=expect_headers):
+ resp = req.get_response(self.app)
+ self.assertEquals(resp.status_int, 201)
+
+ def test_PUT_with_body(self):
+ req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT')
+ segment_size = self.policy.ec_segment_size
+ test_body = ('asdf' * segment_size)[:-10]
+ etag = md5(test_body).hexdigest()
+ size = len(test_body)
+ req.body = test_body
+ codes = [201] * self.replicas()
+ expect_headers = {
+ 'X-Obj-Metadata-Footer': 'yes',
+ 'X-Obj-Multiphase-Commit': 'yes'
+ }
+
+ put_requests = defaultdict(lambda: {'boundary': None, 'chunks': []})
+
+ def capture_body(conn_id, chunk):
+ put_requests[conn_id]['chunks'].append(chunk)
+
+ def capture_headers(ip, port, device, part, method, path, headers,
+ **kwargs):
+ conn_id = kwargs['connection_id']
+ put_requests[conn_id]['boundary'] = headers[
+ 'X-Backend-Obj-Multipart-Mime-Boundary']
+
+ with set_http_connect(*codes, expect_headers=expect_headers,
+ give_send=capture_body,
+ give_connect=capture_headers):
+ resp = req.get_response(self.app)
+
+ self.assertEquals(resp.status_int, 201)
+ frag_archives = []
+ for connection_id, info in put_requests.items():
+ body = unchunk_body(''.join(info['chunks']))
+ self.assertTrue(info['boundary'] is not None,
+ "didn't get boundary for conn %r" % (
+ connection_id,))
+
+ # email.parser.FeedParser doesn't know how to take a multipart
+ # message and boundary together and parse it; it only knows how
+ # to take a string, parse the headers, and figure out the
+ # boundary on its own.
+ parser = email.parser.FeedParser()
+ parser.feed(
+ "Content-Type: multipart/nobodycares; boundary=%s\r\n\r\n" %
+ info['boundary'])
+ parser.feed(body)
+ message = parser.close()
+
+ self.assertTrue(message.is_multipart()) # sanity check
+ mime_parts = message.get_payload()
+ self.assertEqual(len(mime_parts), 3)
+ obj_part, footer_part, commit_part = mime_parts
+
+ # attach the body to frag_archives list
+ self.assertEqual(obj_part['X-Document'], 'object body')
+ frag_archives.append(obj_part.get_payload())
+
+ # validate some footer metadata
+ self.assertEqual(footer_part['X-Document'], 'object metadata')
+ footer_metadata = json.loads(footer_part.get_payload())
+ self.assertTrue(footer_metadata)
+ expected = {
+ 'X-Object-Sysmeta-EC-Content-Length': str(size),
+ 'X-Backend-Container-Update-Override-Size': str(size),
+ 'X-Object-Sysmeta-EC-Etag': etag,
+ 'X-Backend-Container-Update-Override-Etag': etag,
+ 'X-Object-Sysmeta-EC-Segment-Size': str(segment_size),
+ }
+ for header, value in expected.items():
+ self.assertEqual(footer_metadata[header], value)
+
+ # sanity on commit message
+ self.assertEqual(commit_part['X-Document'], 'put commit')
+
+ self.assertEqual(len(frag_archives), self.replicas())
+ fragment_size = self.policy.fragment_size
+ node_payloads = []
+ for fa in frag_archives:
+ payload = [fa[x:x + fragment_size]
+ for x in range(0, len(fa), fragment_size)]
+ node_payloads.append(payload)
+ fragment_payloads = zip(*node_payloads)
+
+ expected_body = ''
+ for fragment_payload in fragment_payloads:
+ self.assertEqual(len(fragment_payload), self.replicas())
+ if True:
+ fragment_payload = list(fragment_payload)
+ expected_body += self.policy.pyeclib_driver.decode(
+ fragment_payload)
+
+ self.assertEqual(len(test_body), len(expected_body))
+ self.assertEqual(test_body, expected_body)
+
+ def test_PUT_old_obj_server(self):
+ req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT',
+ body='')
+ responses = [
+ # one server will response 100-continue but not include the
+ # needful expect headers and the connection will be dropped
+ ((100, Exception('not used')), {}),
+ ] + [
+ # and pleanty of successful responses too
+ (201, {
+ 'X-Obj-Metadata-Footer': 'yes',
+ 'X-Obj-Multiphase-Commit': 'yes',
+ }),
+ ] * self.replicas()
+ random.shuffle(responses)
+ if responses[-1][0] != 201:
+ # whoops, stupid random
+ responses = responses[1:] + [responses[0]]
+ codes, expect_headers = zip(*responses)
+ with set_http_connect(*codes, expect_headers=expect_headers):
+ resp = req.get_response(self.app)
+ self.assertEquals(resp.status_int, 201)
+
+ def test_COPY_cross_policy_type_from_replicated(self):
+ self.app.per_container_info = {
+ 'c1': self.app.container_info.copy(),
+ 'c2': self.app.container_info.copy(),
+ }
+ # make c2 use replicated storage policy 1
+ self.app.per_container_info['c2']['storage_policy'] = '1'
+
+ # a put request with copy from source c2
+ req = swift.common.swob.Request.blank('/v1/a/c1/o', method='PUT',
+ body='', headers={
+ 'X-Copy-From': 'c2/o'})
+
+ # c2 get
+ codes = [200] * self.replicas(POLICIES[1])
+ codes += [404] * POLICIES[1].object_ring.max_more_nodes
+ # c1 put
+ codes += [201] * self.replicas()
+ expect_headers = {
+ 'X-Obj-Metadata-Footer': 'yes',
+ 'X-Obj-Multiphase-Commit': 'yes'
+ }
+ with set_http_connect(*codes, expect_headers=expect_headers):
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 201)
+
+ def test_COPY_cross_policy_type_to_replicated(self):
+ self.app.per_container_info = {
+ 'c1': self.app.container_info.copy(),
+ 'c2': self.app.container_info.copy(),
+ }
+ # make c1 use replicated storage policy 1
+ self.app.per_container_info['c1']['storage_policy'] = '1'
+
+ # a put request with copy from source c2
+ req = swift.common.swob.Request.blank('/v1/a/c1/o', method='PUT',
+ body='', headers={
+ 'X-Copy-From': 'c2/o'})
+
+ # c2 get
+ codes = [200] * self.replicas()
+ codes += [404] * self.obj_ring.max_more_nodes
+ headers = {
+ 'X-Object-Sysmeta-Ec-Content-Length': 0,
+ }
+ # c1 put
+ codes += [201] * self.replicas(POLICIES[1])
+ with set_http_connect(*codes, headers=headers):
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 201)
+
+ def test_COPY_cross_policy_type_unknown(self):
+ self.app.per_container_info = {
+ 'c1': self.app.container_info.copy(),
+ 'c2': self.app.container_info.copy(),
+ }
+ # make c1 use some made up storage policy index
+ self.app.per_container_info['c1']['storage_policy'] = '13'
+
+ # a COPY request of c2 with destination in c1
+ req = swift.common.swob.Request.blank('/v1/a/c2/o', method='COPY',
+ body='', headers={
+ 'Destination': 'c1/o'})
+ with set_http_connect():
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 503)
+
+ def _make_ec_archive_bodies(self, test_body, policy=None):
+ policy = policy or self.policy
+ segment_size = policy.ec_segment_size
+ # split up the body into buffers
+ chunks = [test_body[x:x + segment_size]
+ for x in range(0, len(test_body), segment_size)]
+ # encode the buffers into fragment payloads
+ fragment_payloads = []
+ for chunk in chunks:
+ fragments = self.policy.pyeclib_driver.encode(chunk)
+ if not fragments:
+ break
+ fragment_payloads.append(fragments)
+
+ # join up the fragment payloads per node
+ ec_archive_bodies = [''.join(fragments)
+ for fragments in zip(*fragment_payloads)]
+ return ec_archive_bodies
+
+ def test_GET_mismatched_fragment_archives(self):
+ segment_size = self.policy.ec_segment_size
+ test_data1 = ('test' * segment_size)[:-333]
+ # N.B. the object data *length* here is different
+ test_data2 = ('blah1' * segment_size)[:-333]
+
+ etag1 = md5(test_data1).hexdigest()
+ etag2 = md5(test_data2).hexdigest()
+
+ ec_archive_bodies1 = self._make_ec_archive_bodies(test_data1)
+ ec_archive_bodies2 = self._make_ec_archive_bodies(test_data2)
+
+ headers1 = {'X-Object-Sysmeta-Ec-Etag': etag1}
+ # here we're going to *lie* and say the etag here matches
+ headers2 = {'X-Object-Sysmeta-Ec-Etag': etag1}
+
+ responses1 = [(200, body, headers1)
+ for body in ec_archive_bodies1]
+ responses2 = [(200, body, headers2)
+ for body in ec_archive_bodies2]
+
+ req = swob.Request.blank('/v1/a/c/o')
+
+ # sanity check responses1
+ responses = responses1[:self.policy.ec_ndata]
+ status_codes, body_iter, headers = zip(*responses)
+ with set_http_connect(*status_codes, body_iter=body_iter,
+ headers=headers):
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 200)
+ self.assertEqual(md5(resp.body).hexdigest(), etag1)
+
+ # sanity check responses2
+ responses = responses2[:self.policy.ec_ndata]
+ status_codes, body_iter, headers = zip(*responses)
+ with set_http_connect(*status_codes, body_iter=body_iter,
+ headers=headers):
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 200)
+ self.assertEqual(md5(resp.body).hexdigest(), etag2)
+
+ # now mix the responses a bit
+ mix_index = random.randint(0, self.policy.ec_ndata - 1)
+ mixed_responses = responses1[:self.policy.ec_ndata]
+ mixed_responses[mix_index] = responses2[mix_index]
+
+ status_codes, body_iter, headers = zip(*mixed_responses)
+ with set_http_connect(*status_codes, body_iter=body_iter,
+ headers=headers):
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 200)
+ try:
+ resp.body
+ except ECDriverError:
+ pass
+ else:
+ self.fail('invalid ec fragment response body did not blow up!')
+ error_lines = self.logger.get_lines_for_level('error')
+ self.assertEqual(1, len(error_lines))
+ msg = error_lines[0]
+ self.assertTrue('Error decoding fragments' in msg)
+ self.assertTrue('/a/c/o' in msg)
+ log_msg_args, log_msg_kwargs = self.logger.log_dict['error'][0]
+ self.assertEqual(log_msg_kwargs['exc_info'][0], ECDriverError)
+
+ def test_GET_read_timeout(self):
+ segment_size = self.policy.ec_segment_size
+ test_data = ('test' * segment_size)[:-333]
+ etag = md5(test_data).hexdigest()
+ ec_archive_bodies = self._make_ec_archive_bodies(test_data)
+ headers = {'X-Object-Sysmeta-Ec-Etag': etag}
+ self.app.recoverable_node_timeout = 0.01
+ responses = [(200, SlowBody(body, 0.1), headers)
+ for body in ec_archive_bodies]
+
+ req = swob.Request.blank('/v1/a/c/o')
+
+ status_codes, body_iter, headers = zip(*responses + [
+ (404, '', {}) for i in range(
+ self.policy.object_ring.max_more_nodes)])
+ with set_http_connect(*status_codes, body_iter=body_iter,
+ headers=headers):
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 200)
+ # do this inside the fake http context manager, it'll try to
+ # resume but won't be able to give us all the right bytes
+ self.assertNotEqual(md5(resp.body).hexdigest(), etag)
+ error_lines = self.logger.get_lines_for_level('error')
+ self.assertEqual(self.replicas(), len(error_lines))
+ nparity = self.policy.ec_nparity
+ for line in error_lines[:nparity]:
+ self.assertTrue('retrying' in line)
+ for line in error_lines[nparity:]:
+ self.assertTrue('ChunkReadTimeout (0.01s)' in line)
+
+ def test_GET_read_timeout_resume(self):
+ segment_size = self.policy.ec_segment_size
+ test_data = ('test' * segment_size)[:-333]
+ etag = md5(test_data).hexdigest()
+ ec_archive_bodies = self._make_ec_archive_bodies(test_data)
+ headers = {'X-Object-Sysmeta-Ec-Etag': etag}
+ self.app.recoverable_node_timeout = 0.05
+ # first one is slow
+ responses = [(200, SlowBody(ec_archive_bodies[0], 0.1), headers)]
+ # ... the rest are fine
+ responses += [(200, body, headers)
+ for body in ec_archive_bodies[1:]]
+
+ req = swob.Request.blank('/v1/a/c/o')
+
+ status_codes, body_iter, headers = zip(
+ *responses[:self.policy.ec_ndata + 1])
+ with set_http_connect(*status_codes, body_iter=body_iter,
+ headers=headers):
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 200)
+ self.assertTrue(md5(resp.body).hexdigest(), etag)
+ error_lines = self.logger.get_lines_for_level('error')
+ self.assertEqual(1, len(error_lines))
+ self.assertTrue('retrying' in error_lines[0])
if __name__ == '__main__':
diff --git a/test/unit/proxy/test_server.py b/test/unit/proxy/test_server.py
index 1a59016..bdce41f 100644
--- a/test/unit/proxy/test_server.py
+++ b/test/unit/proxy/test_server.py
@@ -1,3 +1,4 @@
+# -*- coding: utf-8 -*-
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -14,55 +15,67 @@
# limitations under the License.
from __future__ import with_statement
-import cPickle as pickle
import logging
+import math
import os
+import pickle
import sys
import unittest
-import urlparse
-from nose import SkipTest
-from contextlib import contextmanager, nested, closing
+from contextlib import closing, contextmanager, nested
from gzip import GzipFile
from shutil import rmtree
+from StringIO import StringIO
import gc
import time
+from textwrap import dedent
from urllib import quote
from hashlib import md5
-from tempfile import mkdtemp
+from pyeclib.ec_iface import ECDriverError
+from tempfile import mkdtemp, NamedTemporaryFile
import weakref
+import operator
+import functools
+from swift.obj import diskfile
import re
+import random
+import urlparse
+from nose import SkipTest
import mock
-from eventlet import sleep, spawn, wsgi, listen
-import simplejson
+from eventlet import sleep, spawn, wsgi, listen, Timeout
+from swift.common.utils import hash_path, json, storage_directory, public
import gluster.swift.common.Glusterfs as gfs
gfs.RUN_DIR = mkdtemp()
-from test.unit import connect_tcp, readuntil2crlfs, FakeLogger, \
- fake_http_connect, FakeRing, FakeMemcache, debug_logger
+from test.unit import (
+ connect_tcp, readuntil2crlfs, FakeLogger, fake_http_connect, FakeRing,
+ FakeMemcache, debug_logger, patch_policies, write_fake_ring,
+ mocked_http_conn)
+from swift.proxy.controllers.obj import ReplicatedObjectController
from gluster.swift.proxy import server as proxy_server
from gluster.swift.account import server as account_server
from gluster.swift.container import server as container_server
from gluster.swift.obj import server as object_server
-from swift.common import ring
from swift.common.middleware import proxy_logging
from swift.common.middleware.acl import parse_acl, format_acl
-from swift.common.exceptions import ChunkReadTimeout
-from swift.common.constraints import MAX_META_NAME_LENGTH, \
- MAX_META_VALUE_LENGTH, MAX_META_COUNT, MAX_META_OVERALL_SIZE, \
- MAX_FILE_SIZE, MAX_ACCOUNT_NAME_LENGTH, MAX_CONTAINER_NAME_LENGTH, \
- ACCOUNT_LISTING_LIMIT, CONTAINER_LISTING_LIMIT, MAX_OBJECT_NAME_LENGTH
-from swift.common import utils
+from swift.common.exceptions import ChunkReadTimeout, DiskFileNotExist, \
+ APIVersionError
+from swift.common import utils, constraints
+from swift.common.ring import RingData
from swift.common.utils import mkdirs, normalize_timestamp, NullLogger
-from swift.common.wsgi import monkey_patch_mimetools
+from swift.common.wsgi import monkey_patch_mimetools, loadapp
from swift.proxy.controllers import base as proxy_base
from swift.proxy.controllers.base import get_container_memcache_key, \
get_account_memcache_key, cors_validation
import swift.proxy.controllers
-from swift.common.request_helpers import get_sys_meta_prefix
+import swift.proxy.controllers.obj
from swift.common.swob import Request, Response, HTTPUnauthorized, \
- HTTPException
+ HTTPException, HTTPForbidden, HeaderKeyDict
+from swift.common import storage_policy
+from swift.common.storage_policy import StoragePolicy, ECStoragePolicy, \
+ StoragePolicyCollection, POLICIES
+from swift.common.request_helpers import get_sys_meta_prefix
# mocks
logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))
@@ -70,13 +83,15 @@ logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))
STATIC_TIME = time.time()
_test_coros = _test_servers = _test_sockets = _orig_container_listing_limit = \
- _testdir = _orig_SysLogHandler = None
+ _testdir = _orig_SysLogHandler = _orig_POLICIES = _test_POLICIES = None
def do_setup(the_object_server):
utils.HASH_PATH_SUFFIX = 'endcap'
global _testdir, _test_servers, _test_sockets, \
- _orig_container_listing_limit, _test_coros, _orig_SysLogHandler
+ _orig_container_listing_limit, _test_coros, _orig_SysLogHandler, \
+ _orig_POLICIES, _test_POLICIES
+ _orig_POLICIES = storage_policy._POLICIES
_orig_SysLogHandler = utils.SysLogHandler
utils.SysLogHandler = mock.MagicMock()
monkey_patch_mimetools()
@@ -86,12 +101,13 @@ def do_setup(the_object_server):
_testdir = os.path.join(gfs.RUN_DIR, 'swift')
mkdirs(_testdir)
rmtree(_testdir)
- mkdirs(os.path.join(_testdir, 'sda1'))
- mkdirs(os.path.join(_testdir, 'sda1', 'tmp'))
- mkdirs(os.path.join(_testdir, 'sdb1'))
- mkdirs(os.path.join(_testdir, 'sdb1', 'tmp'))
+ for drive in ('sda1', 'sdb1', 'sdc1', 'sdd1', 'sde1',
+ 'sdf1', 'sdg1', 'sdh1', 'sdi1'):
+ mkdirs(os.path.join(_testdir, drive, 'tmp'))
mkdirs(os.path.join(_testdir, 'a'))
+ mkdirs(os.path.join(_testdir, 'a1'))
mkdirs(os.path.join(_testdir, 'a', 'tmp'))
+ mkdirs(os.path.join(_testdir, 'a1', 'tmp'))
conf = {'devices': _testdir, 'swift_dir': _testdir,
'mount_check': 'false', 'allowed_headers':
'content-encoding, x-object-manifest, content-disposition, foo',
@@ -103,43 +119,57 @@ def do_setup(the_object_server):
con2lis = listen(('localhost', 0))
obj1lis = listen(('localhost', 0))
obj2lis = listen(('localhost', 0))
+ obj3lis = listen(('localhost', 0))
+ objsocks = [obj1lis, obj2lis, obj3lis]
_test_sockets = \
- (prolis, acc1lis, acc2lis, con1lis, con2lis, obj1lis, obj2lis)
+ (prolis, acc1lis, acc2lis, con1lis, con2lis, obj1lis, obj2lis, obj3lis)
account_ring_path = os.path.join(_testdir, 'account.ring.gz')
with closing(GzipFile(account_ring_path, 'wb')) as f:
- pickle.dump(ring.RingData([[0, 1, 0, 1], [1, 0, 1, 0]],
+ pickle.dump(RingData([[0, 1, 0, 1], [1, 0, 1, 0]],
[{'id': 0, 'zone': 0, 'device': 'sda1', 'ip': '127.0.0.1',
'port': acc1lis.getsockname()[1]},
{'id': 1, 'zone': 1, 'device': 'sdb1', 'ip': '127.0.0.1',
'port': acc2lis.getsockname()[1]},
# Gluster volume mapping to device
- {'id': 1, 'zone': 1, 'device': 'a', 'ip': '127.0.0.1',
+ {'id': 0, 'zone': 0, 'device': 'a', 'ip': '127.0.0.1',
+ 'port': acc1lis.getsockname()[1]},
+ {'id': 1, 'zone': 1, 'device': 'a1', 'ip': '127.0.0.1',
'port': acc2lis.getsockname()[1]}], 30),
f)
container_ring_path = os.path.join(_testdir, 'container.ring.gz')
with closing(GzipFile(container_ring_path, 'wb')) as f:
- pickle.dump(ring.RingData([[0, 1, 0, 1], [1, 0, 1, 0]],
+ pickle.dump(RingData([[0, 1, 0, 1], [1, 0, 1, 0]],
[{'id': 0, 'zone': 0, 'device': 'sda1', 'ip': '127.0.0.1',
'port': con1lis.getsockname()[1]},
{'id': 1, 'zone': 1, 'device': 'sdb1', 'ip': '127.0.0.1',
'port': con2lis.getsockname()[1]},
# Gluster volume mapping to device
- {'id': 1, 'zone': 1, 'device': 'a', 'ip': '127.0.0.1',
+ {'id': 0, 'zone': 0, 'device': 'a', 'ip': '127.0.0.1',
+ 'port': con1lis.getsockname()[1]},
+ {'id': 1, 'zone': 1, 'device': 'a1', 'ip': '127.0.0.1',
'port': con2lis.getsockname()[1]}], 30),
f)
object_ring_path = os.path.join(_testdir, 'object.ring.gz')
with closing(GzipFile(object_ring_path, 'wb')) as f:
- pickle.dump(ring.RingData([[0, 1, 0, 1], [1, 0, 1, 0]],
+ pickle.dump(RingData([[0, 1, 0, 1], [1, 0, 1, 0]],
[{'id': 0, 'zone': 0, 'device': 'sda1', 'ip': '127.0.0.1',
'port': obj1lis.getsockname()[1]},
{'id': 1, 'zone': 1, 'device': 'sdb1', 'ip': '127.0.0.1',
'port': obj2lis.getsockname()[1]},
# Gluster volume mapping to device
- {'id': 1, 'zone': 1, 'device': 'a', 'ip': '127.0.0.1',
+ {'id': 0, 'zone': 0, 'device': 'a', 'ip': '127.0.0.1',
+ 'port': obj1lis.getsockname()[1]},
+ {'id': 1, 'zone': 1, 'device': 'a1', 'ip': '127.0.0.1',
'port': obj2lis.getsockname()[1]}], 30),
f)
+
prosrv = proxy_server.Application(conf, FakeMemcacheReturnsNone(),
logger=debug_logger('proxy'))
+ for policy in POLICIES:
+ # make sure all the rings are loaded
+ prosrv.get_object_ring(policy.idx)
+ # don't lose this one!
+ _test_POLICIES = storage_policy._POLICIES
acc1srv = account_server.AccountController(
conf, logger=debug_logger('acct1'))
acc2srv = account_server.AccountController(
@@ -152,8 +182,10 @@ def do_setup(the_object_server):
conf, logger=debug_logger('obj1'))
obj2srv = the_object_server.ObjectController(
conf, logger=debug_logger('obj2'))
+ obj3srv = the_object_server.ObjectController(
+ conf, logger=debug_logger('obj3'))
_test_servers = \
- (prosrv, acc1srv, acc2srv, con1srv, con2srv, obj1srv, obj2srv)
+ (prosrv, acc1srv, acc2srv, con1srv, con2srv, obj1srv, obj2srv, obj3srv)
nl = NullLogger()
logging_prosv = proxy_logging.ProxyLoggingMiddleware(prosrv, conf,
logger=prosrv.logger)
@@ -164,9 +196,10 @@ def do_setup(the_object_server):
con2spa = spawn(wsgi.server, con2lis, con2srv, nl)
obj1spa = spawn(wsgi.server, obj1lis, obj1srv, nl)
obj2spa = spawn(wsgi.server, obj2lis, obj2srv, nl)
+ obj3spa = spawn(wsgi.server, obj3lis, obj3srv, nl)
_test_coros = \
- (prospa, acc1spa, acc2spa, con1spa, con2spa, obj1spa, obj2spa)
- # Gluster: ensure account exists
+ (prospa, acc1spa, acc2spa, con1spa, con2spa, obj1spa, obj2spa, obj3spa)
+ # Create account
ts = normalize_timestamp(time.time())
partition, nodes = prosrv.account_ring.get_nodes('a')
for node in nodes:
@@ -177,12 +210,24 @@ def do_setup(the_object_server):
{'X-Timestamp': ts,
'x-trans-id': 'test'})
resp = conn.getresponse()
-
+ assert(resp.status == 202)
+ # Gluster: ensure account exists
+ ts = normalize_timestamp(time.time())
+ partition, nodes = prosrv.account_ring.get_nodes('a1')
+ for node in nodes:
+ conn = swift.proxy.controllers.obj.http_connect(node['ip'],
+ node['port'],
+ node['device'],
+ partition, 'PUT',
+ '/a1',
+ {'X-Timestamp': ts,
+ 'x-trans-id': 'test'})
+ resp = conn.getresponse()
# For GlusterFS the volume should have already been created since
# accounts map to volumes. Expect a 202 instead of a 201 as for
# OpenStack Swift's proxy unit test the account is explicitly created.
assert(resp.status == 202)
- # Create container
+ # Create containers, 1 per test policy
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/c HTTP/1.1\r\nHost: localhost\r\n'
@@ -193,6 +238,62 @@ def do_setup(the_object_server):
exp = 'HTTP/1.1 201'
assert headers[:len(exp)] == exp, "Expected '%s', encountered '%s'" % (
exp, headers[:len(exp)])
+ # Create container in other account
+ # used for account-to-account tests
+ sock = connect_tcp(('localhost', prolis.getsockname()[1]))
+ fd = sock.makefile()
+ fd.write('PUT /v1/a1/c1 HTTP/1.1\r\nHost: localhost\r\n'
+ 'Connection: close\r\nX-Auth-Token: t\r\n'
+ 'Content-Length: 0\r\n\r\n')
+ fd.flush()
+ headers = readuntil2crlfs(fd)
+ exp = 'HTTP/1.1 201'
+ assert headers[:len(exp)] == exp, "Expected '%s', encountered '%s'" % (
+ exp, headers[:len(exp)])
+
+ sock = connect_tcp(('localhost', prolis.getsockname()[1]))
+ fd = sock.makefile()
+ fd.write(
+ 'PUT /v1/a/c1 HTTP/1.1\r\nHost: localhost\r\n'
+ #'Connection: close\r\nX-Auth-Token: t\r\nX-Storage-Policy: one\r\n'
+ # Gluster-Swift: No storage policies
+ 'Connection: close\r\nX-Auth-Token: t\r\n'
+ 'Content-Length: 0\r\n\r\n')
+ fd.flush()
+ headers = readuntil2crlfs(fd)
+ exp = 'HTTP/1.1 201'
+ assert headers[:len(exp)] == exp, \
+ "Expected '%s', encountered '%s'" % (exp, headers[:len(exp)])
+
+ sock = connect_tcp(('localhost', prolis.getsockname()[1]))
+ fd = sock.makefile()
+ fd.write(
+ 'PUT /v1/a/c2 HTTP/1.1\r\nHost: localhost\r\n'
+ #'Connection: close\r\nX-Auth-Token: t\r\nX-Storage-Policy: two\r\n'
+ # Gluster-Swift: No storage policies
+ 'Connection: close\r\nX-Auth-Token: t\r\n'
+ 'Content-Length: 0\r\n\r\n')
+ fd.flush()
+ headers = readuntil2crlfs(fd)
+ exp = 'HTTP/1.1 201'
+ assert headers[:len(exp)] == exp, \
+ "Expected '%s', encountered '%s'" % (exp, headers[:len(exp)])
+
+
+def unpatch_policies(f):
+ """
+ This will unset a TestCase level patch_policies to use the module level
+ policies setup for the _test_servers instead.
+
+ N.B. You should NEVER modify the _test_server policies or rings during a
+ test because they persist for the life of the entire module!
+ """
+ raise SkipTest("Storage Policies are not supported")
+ @functools.wraps(f)
+ def wrapper(*args, **kwargs):
+ with patch_policies(_test_POLICIES):
+ return f(*args, **kwargs)
+ return wrapper
def setup():
@@ -204,6 +305,7 @@ def teardown():
server.kill()
rmtree(os.path.dirname(_testdir))
utils.SysLogHandler = _orig_SysLogHandler
+ storage_policy._POLICIES = _orig_POLICIES
def sortHeaderNames(headerNames):
@@ -217,6 +319,37 @@ def sortHeaderNames(headerNames):
return ', '.join(headers)
+def parse_headers_string(headers_str):
+ headers_dict = HeaderKeyDict()
+ for line in headers_str.split('\r\n'):
+ if ': ' in line:
+ header, value = line.split(': ', 1)
+ headers_dict[header] = value
+ return headers_dict
+
+
+def node_error_count(proxy_app, ring_node):
+ # Reach into the proxy's internals to get the error count for a
+ # particular node
+ node_key = proxy_app._error_limit_node_key(ring_node)
+ return proxy_app._error_limiting.get(node_key, {}).get('errors', 0)
+
+
+def node_last_error(proxy_app, ring_node):
+ # Reach into the proxy's internals to get the last error for a
+ # particular node
+ node_key = proxy_app._error_limit_node_key(ring_node)
+ return proxy_app._error_limiting.get(node_key, {}).get('last_error')
+
+
+def set_node_errors(proxy_app, ring_node, value, last_error):
+ # Set the node's error count to value
+ node_key = proxy_app._error_limit_node_key(ring_node)
+ stats = proxy_app._error_limiting.setdefault(node_key, {})
+ stats['errors'] = value
+ stats['last_error'] = last_error
+
+
class FakeMemcacheReturnsNone(FakeMemcache):
def get(self, key):
@@ -231,6 +364,9 @@ def save_globals():
None)
orig_account_info = getattr(swift.proxy.controllers.Controller,
'account_info', None)
+ orig_container_info = getattr(swift.proxy.controllers.Controller,
+ 'container_info', None)
+
try:
yield True
finally:
@@ -239,6 +375,7 @@ def save_globals():
swift.proxy.controllers.obj.http_connect = orig_http_connect
swift.proxy.controllers.account.http_connect = orig_http_connect
swift.proxy.controllers.container.http_connect = orig_http_connect
+ swift.proxy.controllers.Controller.container_info = orig_container_info
def set_http_connect(*args, **kwargs):
@@ -250,6 +387,36 @@ def set_http_connect(*args, **kwargs):
return new_connect
+def _make_callback_func(calls):
+ def callback(ipaddr, port, device, partition, method, path,
+ headers=None, query_string=None, ssl=False):
+ context = {}
+ context['method'] = method
+ context['path'] = path
+ context['headers'] = headers or {}
+ calls.append(context)
+ return callback
+
+
+def _limit_max_file_size(f):
+ """
+ This will limit constraints.MAX_FILE_SIZE for the duration of the
+ wrapped function, based on whether MAX_FILE_SIZE exceeds the
+ sys.maxsize limit on the system running the tests.
+
+ This allows successful testing on 32 bit systems.
+ """
+ @functools.wraps(f)
+ def wrapper(*args, **kwargs):
+ test_max_file_size = constraints.MAX_FILE_SIZE
+ if constraints.MAX_FILE_SIZE >= sys.maxsize:
+ test_max_file_size = (2 ** 30 + 2)
+ with mock.patch.object(constraints, 'MAX_FILE_SIZE',
+ test_max_file_size):
+ return f(*args, **kwargs)
+ return wrapper
+
+
# tests
class TestController(unittest.TestCase):
@@ -257,11 +424,9 @@ class TestController(unittest.TestCase):
self.account_ring = FakeRing()
self.container_ring = FakeRing()
self.memcache = FakeMemcache()
-
app = proxy_server.Application(None, self.memcache,
account_ring=self.account_ring,
- container_ring=self.container_ring,
- object_ring=FakeRing())
+ container_ring=self.container_ring)
self.controller = swift.proxy.controllers.Controller(app)
class FakeReq(object):
@@ -515,17 +680,43 @@ class TestController(unittest.TestCase):
test(503, 503, 503)
+@patch_policies([StoragePolicy(0, 'zero', True, object_ring=FakeRing())])
class TestProxyServer(unittest.TestCase):
+ def test_get_object_ring(self):
+ baseapp = proxy_server.Application({},
+ FakeMemcache(),
+ container_ring=FakeRing(),
+ account_ring=FakeRing())
+ with patch_policies([
+ StoragePolicy(0, 'a', False, object_ring=123),
+ StoragePolicy(1, 'b', True, object_ring=456),
+ StoragePolicy(2, 'd', False, object_ring=789)
+ ]):
+ # None means legacy so always use policy 0
+ ring = baseapp.get_object_ring(None)
+ self.assertEqual(ring, 123)
+ ring = baseapp.get_object_ring('')
+ self.assertEqual(ring, 123)
+ ring = baseapp.get_object_ring('0')
+ self.assertEqual(ring, 123)
+ ring = baseapp.get_object_ring('1')
+ self.assertEqual(ring, 456)
+ ring = baseapp.get_object_ring('2')
+ self.assertEqual(ring, 789)
+ # illegal values
+ self.assertRaises(ValueError, baseapp.get_object_ring, '99')
+ self.assertRaises(ValueError, baseapp.get_object_ring, 'asdf')
+
def test_unhandled_exception(self):
class MyApp(proxy_server.Application):
def get_controller(self, path):
- raise Exception('this shouldnt be caught')
+ raise Exception('this shouldn\'t be caught')
app = MyApp(None, FakeMemcache(), account_ring=FakeRing(),
- container_ring=FakeRing(), object_ring=FakeRing())
+ container_ring=FakeRing())
req = Request.blank('/v1/account', environ={'REQUEST_METHOD': 'HEAD'})
app.update_request(req)
resp = app.handle_request(req)
@@ -535,7 +726,6 @@ class TestProxyServer(unittest.TestCase):
baseapp = proxy_server.Application({},
FakeMemcache(),
container_ring=FakeRing(),
- object_ring=FakeRing(),
account_ring=FakeRing())
resp = baseapp.handle_request(
Request.blank('/v1/a', environ={'REQUEST_METHOD': '__init__'}))
@@ -545,8 +735,7 @@ class TestProxyServer(unittest.TestCase):
baseapp = proxy_server.Application({},
FakeMemcache(),
container_ring=FakeRing(),
- account_ring=FakeRing(),
- object_ring=FakeRing())
+ account_ring=FakeRing())
resp = baseapp.handle_request(
Request.blank('/v1/a', environ={'REQUEST_METHOD': '!invalid'}))
self.assertEquals(resp.status, '405 Method Not Allowed')
@@ -560,8 +749,7 @@ class TestProxyServer(unittest.TestCase):
set_http_connect(200)
app = proxy_server.Application(None, FakeMemcache(),
account_ring=FakeRing(),
- container_ring=FakeRing(),
- object_ring=FakeRing())
+ container_ring=FakeRing())
req = Request.blank('/v1/a')
req.environ['swift.authorize'] = authorize
app.update_request(req)
@@ -576,8 +764,7 @@ class TestProxyServer(unittest.TestCase):
return HTTPUnauthorized(request=req)
app = proxy_server.Application(None, FakeMemcache(),
account_ring=FakeRing(),
- container_ring=FakeRing(),
- object_ring=FakeRing())
+ container_ring=FakeRing())
req = Request.blank('/v1/a')
req.environ['swift.authorize'] = authorize
app.update_request(req)
@@ -589,8 +776,7 @@ class TestProxyServer(unittest.TestCase):
try:
baseapp = proxy_server.Application({'swift_dir': swift_dir},
FakeMemcache(), FakeLogger(),
- FakeRing(), FakeRing(),
- FakeRing())
+ FakeRing(), FakeRing())
resp = baseapp.handle_request(
Request.blank('/', environ={'CONTENT_LENGTH': '-1'}))
self.assertEquals(resp.status, '400 Bad Request')
@@ -602,15 +788,52 @@ class TestProxyServer(unittest.TestCase):
finally:
rmtree(swift_dir, ignore_errors=True)
+ def test_adds_transaction_id(self):
+ swift_dir = mkdtemp()
+ try:
+ logger = FakeLogger()
+ baseapp = proxy_server.Application({'swift_dir': swift_dir},
+ FakeMemcache(), logger,
+ container_ring=FakeLogger(),
+ account_ring=FakeRing())
+ baseapp.handle_request(
+ Request.blank('/info',
+ environ={'HTTP_X_TRANS_ID_EXTRA': 'sardine',
+ 'REQUEST_METHOD': 'GET'}))
+ # This is kind of a hokey way to get the transaction ID; it'd be
+ # better to examine response headers, but the catch_errors
+ # middleware is what sets the X-Trans-Id header, and we don't have
+ # that available here.
+ self.assertTrue(logger.txn_id.endswith('-sardine'))
+ finally:
+ rmtree(swift_dir, ignore_errors=True)
+
+ def test_adds_transaction_id_length_limit(self):
+ swift_dir = mkdtemp()
+ try:
+ logger = FakeLogger()
+ baseapp = proxy_server.Application({'swift_dir': swift_dir},
+ FakeMemcache(), logger,
+ container_ring=FakeLogger(),
+ account_ring=FakeRing())
+ baseapp.handle_request(
+ Request.blank('/info',
+ environ={'HTTP_X_TRANS_ID_EXTRA': 'a' * 1000,
+ 'REQUEST_METHOD': 'GET'}))
+ self.assertTrue(logger.txn_id.endswith(
+ '-aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'))
+ finally:
+ rmtree(swift_dir, ignore_errors=True)
+
def test_denied_host_header(self):
swift_dir = mkdtemp()
try:
baseapp = proxy_server.Application({'swift_dir': swift_dir,
'deny_host_headers':
'invalid_host.com'},
- FakeMemcache(), FakeLogger(),
- FakeRing(), FakeRing(),
- FakeRing())
+ FakeMemcache(),
+ container_ring=FakeLogger(),
+ account_ring=FakeRing())
resp = baseapp.handle_request(
Request.blank('/v1/a/c/o',
environ={'HTTP_HOST': 'invalid_host.com'}))
@@ -622,7 +845,6 @@ class TestProxyServer(unittest.TestCase):
baseapp = proxy_server.Application({'sorting_method': 'timing'},
FakeMemcache(),
container_ring=FakeRing(),
- object_ring=FakeRing(),
account_ring=FakeRing())
self.assertEquals(baseapp.node_timings, {})
@@ -651,7 +873,6 @@ class TestProxyServer(unittest.TestCase):
'read_affinity': 'r1=1'},
FakeMemcache(),
container_ring=FakeRing(),
- object_ring=FakeRing(),
account_ring=FakeRing())
nodes = [{'region': 2, 'zone': 1, 'ip': '127.0.0.1'},
@@ -665,22 +886,22 @@ class TestProxyServer(unittest.TestCase):
def test_info_defaults(self):
app = proxy_server.Application({}, FakeMemcache(),
account_ring=FakeRing(),
- container_ring=FakeRing(),
- object_ring=FakeRing())
+ container_ring=FakeRing())
self.assertTrue(app.expose_info)
self.assertTrue(isinstance(app.disallowed_sections, list))
- self.assertEqual(0, len(app.disallowed_sections))
+ self.assertEqual(1, len(app.disallowed_sections))
+ self.assertEqual(['swift.valid_api_versions'],
+ app.disallowed_sections)
self.assertTrue(app.admin_key is None)
def test_get_info_controller(self):
- path = '/info'
+ req = Request.blank('/info')
app = proxy_server.Application({}, FakeMemcache(),
account_ring=FakeRing(),
- container_ring=FakeRing(),
- object_ring=FakeRing())
+ container_ring=FakeRing())
- controller, path_parts = app.get_controller(path)
+ controller, path_parts = app.get_controller(req)
self.assertTrue('version' in path_parts)
self.assertTrue(path_parts['version'] is None)
@@ -690,20 +911,253 @@ class TestProxyServer(unittest.TestCase):
self.assertEqual(controller.__name__, 'InfoController')
+ def test_error_limit_methods(self):
+ logger = debug_logger('test')
+ app = proxy_server.Application({}, FakeMemcache(),
+ account_ring=FakeRing(),
+ container_ring=FakeRing(),
+ logger=logger)
+ node = app.container_ring.get_part_nodes(0)[0]
+ # error occurred
+ app.error_occurred(node, 'test msg')
+ self.assertTrue('test msg' in
+ logger.get_lines_for_level('error')[-1])
+ self.assertEqual(1, node_error_count(app, node))
+
+ # exception occurred
+ try:
+ raise Exception('kaboom1!')
+ except Exception as e1:
+ app.exception_occurred(node, 'test1', 'test1 msg')
+ line = logger.get_lines_for_level('error')[-1]
+ self.assertTrue('test1 server' in line)
+ self.assertTrue('test1 msg' in line)
+ log_args, log_kwargs = logger.log_dict['error'][-1]
+ self.assertTrue(log_kwargs['exc_info'])
+ self.assertEqual(log_kwargs['exc_info'][1], e1)
+ self.assertEqual(2, node_error_count(app, node))
+
+ # warning exception occurred
+ try:
+ raise Exception('kaboom2!')
+ except Exception as e2:
+ app.exception_occurred(node, 'test2', 'test2 msg',
+ level=logging.WARNING)
+ line = logger.get_lines_for_level('warning')[-1]
+ self.assertTrue('test2 server' in line)
+ self.assertTrue('test2 msg' in line)
+ log_args, log_kwargs = logger.log_dict['warning'][-1]
+ self.assertTrue(log_kwargs['exc_info'])
+ self.assertEqual(log_kwargs['exc_info'][1], e2)
+ self.assertEqual(3, node_error_count(app, node))
+
+ # custom exception occurred
+ try:
+ raise Exception('kaboom3!')
+ except Exception as e3:
+ e3_info = sys.exc_info()
+ try:
+ raise Exception('kaboom4!')
+ except Exception:
+ pass
+ app.exception_occurred(node, 'test3', 'test3 msg',
+ level=logging.WARNING, exc_info=e3_info)
+ line = logger.get_lines_for_level('warning')[-1]
+ self.assertTrue('test3 server' in line)
+ self.assertTrue('test3 msg' in line)
+ log_args, log_kwargs = logger.log_dict['warning'][-1]
+ self.assertTrue(log_kwargs['exc_info'])
+ self.assertEqual(log_kwargs['exc_info'][1], e3)
+ self.assertEqual(4, node_error_count(app, node))
+
+ def test_valid_api_version(self):
+ app = proxy_server.Application({}, FakeMemcache(),
+ account_ring=FakeRing(),
+ container_ring=FakeRing())
+
+ # The version string is only checked for account, container and object
+ # requests; the raised APIVersionError returns a 404 to the client
+ for path in [
+ '/v2/a',
+ '/v2/a/c',
+ '/v2/a/c/o']:
+ req = Request.blank(path)
+ self.assertRaises(APIVersionError, app.get_controller, req)
+
+ # Default valid API versions are ok
+ for path in [
+ '/v1/a',
+ '/v1/a/c',
+ '/v1/a/c/o',
+ '/v1.0/a',
+ '/v1.0/a/c',
+ '/v1.0/a/c/o']:
+ req = Request.blank(path)
+ controller, path_parts = app.get_controller(req)
+ self.assertTrue(controller is not None)
+
+ # Ensure settings valid API version constraint works
+ for version in ["42", 42]:
+ try:
+ with NamedTemporaryFile() as f:
+ f.write('[swift-constraints]\n')
+ f.write('valid_api_versions = %s\n' % version)
+ f.flush()
+ with mock.patch.object(utils, 'SWIFT_CONF_FILE', f.name):
+ constraints.reload_constraints()
+
+ req = Request.blank('/%s/a' % version)
+ controller, _ = app.get_controller(req)
+ self.assertTrue(controller is not None)
+
+ # In this case v1 is invalid
+ req = Request.blank('/v1/a')
+ self.assertRaises(APIVersionError, app.get_controller, req)
+ finally:
+ constraints.reload_constraints()
+
+ # Check that the valid_api_versions is not exposed by default
+ req = Request.blank('/info')
+ controller, path_parts = app.get_controller(req)
+ self.assertTrue('swift.valid_api_versions' in
+ path_parts.get('disallowed_sections'))
+
+@patch_policies([
+ StoragePolicy(0, 'zero', is_default=True),
+ StoragePolicy(1, 'one'),
+])
+class TestProxyServerLoading(unittest.TestCase):
+
+ def setUp(self):
+ self._orig_hash_suffix = utils.HASH_PATH_SUFFIX
+ utils.HASH_PATH_SUFFIX = 'endcap'
+ self.tempdir = mkdtemp()
+
+ def tearDown(self):
+ rmtree(self.tempdir)
+ utils.HASH_PATH_SUFFIX = self._orig_hash_suffix
+ for policy in POLICIES:
+ policy.object_ring = None
+
+ def test_load_policy_rings(self):
+ for policy in POLICIES:
+ self.assertFalse(policy.object_ring)
+ conf_path = os.path.join(self.tempdir, 'proxy-server.conf')
+ conf_body = """
+ [DEFAULT]
+ swift_dir = %s
+
+ [pipeline:main]
+ pipeline = catch_errors cache proxy-server
+
+ [app:proxy-server]
+ use = egg:swift#proxy
+
+ [filter:cache]
+ use = egg:swift#memcache
+
+ [filter:catch_errors]
+ use = egg:swift#catch_errors
+ """ % self.tempdir
+ with open(conf_path, 'w') as f:
+ f.write(dedent(conf_body))
+ account_ring_path = os.path.join(self.tempdir, 'account.ring.gz')
+ write_fake_ring(account_ring_path)
+ container_ring_path = os.path.join(self.tempdir, 'container.ring.gz')
+ write_fake_ring(container_ring_path)
+ for policy in POLICIES:
+ object_ring_path = os.path.join(self.tempdir,
+ policy.ring_name + '.ring.gz')
+ write_fake_ring(object_ring_path)
+ app = loadapp(conf_path)
+ # find the end of the pipeline
+ while hasattr(app, 'app'):
+ app = app.app
+
+ # validate loaded rings
+ self.assertEqual(app.account_ring.serialized_path,
+ account_ring_path)
+ self.assertEqual(app.container_ring.serialized_path,
+ container_ring_path)
+ for policy in POLICIES:
+ self.assertEqual(policy.object_ring,
+ app.get_object_ring(int(policy)))
+
+ def test_missing_rings(self):
+ conf_path = os.path.join(self.tempdir, 'proxy-server.conf')
+ conf_body = """
+ [DEFAULT]
+ swift_dir = %s
+
+ [pipeline:main]
+ pipeline = catch_errors cache proxy-server
+
+ [app:proxy-server]
+ use = egg:swift#proxy
+
+ [filter:cache]
+ use = egg:swift#memcache
+
+ [filter:catch_errors]
+ use = egg:swift#catch_errors
+ """ % self.tempdir
+ with open(conf_path, 'w') as f:
+ f.write(dedent(conf_body))
+ ring_paths = [
+ os.path.join(self.tempdir, 'account.ring.gz'),
+ os.path.join(self.tempdir, 'container.ring.gz'),
+ ]
+ for policy in POLICIES:
+ self.assertFalse(policy.object_ring)
+ object_ring_path = os.path.join(self.tempdir,
+ policy.ring_name + '.ring.gz')
+ ring_paths.append(object_ring_path)
+ for policy in POLICIES:
+ self.assertFalse(policy.object_ring)
+ for ring_path in ring_paths:
+ self.assertFalse(os.path.exists(ring_path))
+ self.assertRaises(IOError, loadapp, conf_path)
+ write_fake_ring(ring_path)
+ # all rings exist, app should load
+ loadapp(conf_path)
+ for policy in POLICIES:
+ self.assert_(policy.object_ring)
+
+
+@patch_policies([StoragePolicy(0, 'zero', True,
+ object_ring=FakeRing(base_port=3000))])
class TestObjectController(unittest.TestCase):
def setUp(self):
- self.app = proxy_server.Application(None, FakeMemcache(),
- logger=debug_logger('proxy-ut'),
- account_ring=FakeRing(),
- container_ring=FakeRing(),
- object_ring=FakeRing())
+ self.app = proxy_server.Application(
+ None, FakeMemcache(),
+ logger=debug_logger('proxy-ut'),
+ account_ring=FakeRing(),
+ container_ring=FakeRing())
def tearDown(self):
self.app.account_ring.set_replicas(3)
self.app.container_ring.set_replicas(3)
- self.app.object_ring.set_replicas(3)
+ for policy in POLICIES:
+ policy.object_ring = FakeRing(base_port=3000)
+
+ def put_container(self, policy_name, container_name):
+ # Note: only works if called with unpatched policies
+ prolis = _test_sockets[0]
+ sock = connect_tcp(('localhost', prolis.getsockname()[1]))
+ fd = sock.makefile()
+ fd.write('PUT /v1/a/%s HTTP/1.1\r\n'
+ 'Host: localhost\r\n'
+ 'Connection: close\r\n'
+ 'Content-Length: 0\r\n'
+ 'X-Storage-Token: t\r\n'
+ 'X-Storage-Policy: %s\r\n'
+ '\r\n' % (container_name, policy_name))
+ fd.flush()
+ headers = readuntil2crlfs(fd)
+ exp = 'HTTP/1.1 2'
+ self.assertEqual(headers[:len(exp)], exp)
def assert_status_map(self, method, statuses, expected, raise_exc=False):
with save_globals():
@@ -717,7 +1171,10 @@ class TestObjectController(unittest.TestCase):
headers={'Content-Length': '0',
'Content-Type': 'text/plain'})
self.app.update_request(req)
- res = method(req)
+ try:
+ res = method(req)
+ except HTTPException as res:
+ pass
self.assertEquals(res.status_int, expected)
# repeat test
@@ -727,9 +1184,181 @@ class TestObjectController(unittest.TestCase):
headers={'Content-Length': '0',
'Content-Type': 'text/plain'})
self.app.update_request(req)
- res = method(req)
+ try:
+ res = method(req)
+ except HTTPException as res:
+ pass
self.assertEquals(res.status_int, expected)
+ @unpatch_policies
+ def test_policy_IO(self):
+ def check_file(policy, cont, devs, check_val):
+ partition, nodes = policy.object_ring.get_nodes('a', cont, 'o')
+ conf = {'devices': _testdir, 'mount_check': 'false'}
+ df_mgr = diskfile.DiskFileManager(conf, FakeLogger())
+ for dev in devs:
+ file = df_mgr.get_diskfile(dev, partition, 'a',
+ cont, 'o',
+ policy=policy)
+ if check_val is True:
+ file.open()
+
+ prolis = _test_sockets[0]
+ prosrv = _test_servers[0]
+
+ # check policy 0: put file on c, read it back, check loc on disk
+ sock = connect_tcp(('localhost', prolis.getsockname()[1]))
+ fd = sock.makefile()
+ obj = 'test_object0'
+ path = '/v1/a/c/o'
+ fd.write('PUT %s HTTP/1.1\r\n'
+ 'Host: localhost\r\n'
+ 'Connection: close\r\n'
+ 'X-Storage-Token: t\r\n'
+ 'Content-Length: %s\r\n'
+ 'Content-Type: text/plain\r\n'
+ '\r\n%s' % (path, str(len(obj)), obj))
+ fd.flush()
+ headers = readuntil2crlfs(fd)
+ exp = 'HTTP/1.1 201'
+ self.assertEqual(headers[:len(exp)], exp)
+ req = Request.blank(path,
+ environ={'REQUEST_METHOD': 'GET'},
+ headers={'Content-Type':
+ 'text/plain'})
+ res = req.get_response(prosrv)
+ self.assertEqual(res.status_int, 200)
+ self.assertEqual(res.body, obj)
+
+ check_file(POLICIES[0], 'c', ['sda1', 'sdb1'], True)
+ check_file(POLICIES[0], 'c', ['sdc1', 'sdd1', 'sde1', 'sdf1'], False)
+
+ # check policy 1: put file on c1, read it back, check loc on disk
+ sock = connect_tcp(('localhost', prolis.getsockname()[1]))
+ fd = sock.makefile()
+ path = '/v1/a/c1/o'
+ obj = 'test_object1'
+ fd.write('PUT %s HTTP/1.1\r\n'
+ 'Host: localhost\r\n'
+ 'Connection: close\r\n'
+ 'X-Storage-Token: t\r\n'
+ 'Content-Length: %s\r\n'
+ 'Content-Type: text/plain\r\n'
+ '\r\n%s' % (path, str(len(obj)), obj))
+ fd.flush()
+ headers = readuntil2crlfs(fd)
+ self.assertEqual(headers[:len(exp)], exp)
+ req = Request.blank(path,
+ environ={'REQUEST_METHOD': 'GET'},
+ headers={'Content-Type':
+ 'text/plain'})
+ res = req.get_response(prosrv)
+ self.assertEqual(res.status_int, 200)
+ self.assertEqual(res.body, obj)
+
+ check_file(POLICIES[1], 'c1', ['sdc1', 'sdd1'], True)
+ check_file(POLICIES[1], 'c1', ['sda1', 'sdb1', 'sde1', 'sdf1'], False)
+
+ # check policy 2: put file on c2, read it back, check loc on disk
+ sock = connect_tcp(('localhost', prolis.getsockname()[1]))
+ fd = sock.makefile()
+ path = '/v1/a/c2/o'
+ obj = 'test_object2'
+ fd.write('PUT %s HTTP/1.1\r\n'
+ 'Host: localhost\r\n'
+ 'Connection: close\r\n'
+ 'X-Storage-Token: t\r\n'
+ 'Content-Length: %s\r\n'
+ 'Content-Type: text/plain\r\n'
+ '\r\n%s' % (path, str(len(obj)), obj))
+ fd.flush()
+ headers = readuntil2crlfs(fd)
+ self.assertEqual(headers[:len(exp)], exp)
+ req = Request.blank(path,
+ environ={'REQUEST_METHOD': 'GET'},
+ headers={'Content-Type':
+ 'text/plain'})
+ res = req.get_response(prosrv)
+ self.assertEqual(res.status_int, 200)
+ self.assertEqual(res.body, obj)
+
+ check_file(POLICIES[2], 'c2', ['sde1', 'sdf1'], True)
+ check_file(POLICIES[2], 'c2', ['sda1', 'sdb1', 'sdc1', 'sdd1'], False)
+
+ @unpatch_policies
+ def test_policy_IO_override(self):
+ if hasattr(_test_servers[-1], '_filesystem'):
+ # ironically, the _filesystem attribute on the object server means
+ # the in-memory diskfile is in use, so this test does not apply
+ return
+
+ prosrv = _test_servers[0]
+
+ # validate container policy is 1
+ req = Request.blank('/v1/a/c1', method='HEAD')
+ res = req.get_response(prosrv)
+ self.assertEqual(res.status_int, 204) # sanity check
+ self.assertEqual(POLICIES[1].name, res.headers['x-storage-policy'])
+
+ # check overrides: put it in policy 2 (not where the container says)
+ req = Request.blank(
+ '/v1/a/c1/wrong-o',
+ environ={'REQUEST_METHOD': 'PUT',
+ 'wsgi.input': StringIO("hello")},
+ headers={'Content-Type': 'text/plain',
+ 'Content-Length': '5',
+ 'X-Backend-Storage-Policy-Index': '2'})
+ res = req.get_response(prosrv)
+ self.assertEqual(res.status_int, 201) # sanity check
+
+ # go to disk to make sure it's there
+ partition, nodes = prosrv.get_object_ring(2).get_nodes(
+ 'a', 'c1', 'wrong-o')
+ node = nodes[0]
+ conf = {'devices': _testdir, 'mount_check': 'false'}
+ df_mgr = diskfile.DiskFileManager(conf, FakeLogger())
+ df = df_mgr.get_diskfile(node['device'], partition, 'a',
+ 'c1', 'wrong-o', policy=POLICIES[2])
+ with df.open():
+ contents = ''.join(df.reader())
+ self.assertEqual(contents, "hello")
+
+ # can't get it from the normal place
+ req = Request.blank('/v1/a/c1/wrong-o',
+ environ={'REQUEST_METHOD': 'GET'},
+ headers={'Content-Type': 'text/plain'})
+ res = req.get_response(prosrv)
+ self.assertEqual(res.status_int, 404) # sanity check
+
+ # but we can get it from policy 2
+ req = Request.blank('/v1/a/c1/wrong-o',
+ environ={'REQUEST_METHOD': 'GET'},
+ headers={'Content-Type': 'text/plain',
+ 'X-Backend-Storage-Policy-Index': '2'})
+
+ res = req.get_response(prosrv)
+ self.assertEqual(res.status_int, 200)
+ self.assertEqual(res.body, 'hello')
+
+ # and we can delete it the same way
+ req = Request.blank('/v1/a/c1/wrong-o',
+ environ={'REQUEST_METHOD': 'DELETE'},
+ headers={'Content-Type': 'text/plain',
+ 'X-Backend-Storage-Policy-Index': '2'})
+
+ res = req.get_response(prosrv)
+ self.assertEqual(res.status_int, 204)
+
+ df = df_mgr.get_diskfile(node['device'], partition, 'a',
+ 'c1', 'wrong-o', policy=POLICIES[2])
+ try:
+ df.open()
+ except DiskFileNotExist as e:
+ self.assert_(float(e.timestamp) > 0)
+ else:
+ self.fail('did not raise DiskFileNotExist')
+
+ @unpatch_policies
def test_GET_newest_large_file(self):
prolis = _test_sockets[0]
prosrv = _test_servers[0]
@@ -757,6 +1386,619 @@ class TestObjectController(unittest.TestCase):
self.assertEqual(res.status_int, 200)
self.assertEqual(res.body, obj)
+ @unpatch_policies
+ def test_PUT_ec(self):
+ policy = POLICIES[3]
+ self.put_container("ec", "ec-con")
+
+ obj = 'abCD' * 10 # small, so we don't get multiple EC stripes
+ prolis = _test_sockets[0]
+ sock = connect_tcp(('localhost', prolis.getsockname()[1]))
+ fd = sock.makefile()
+ fd.write('PUT /v1/a/ec-con/o1 HTTP/1.1\r\n'
+ 'Host: localhost\r\n'
+ 'Connection: close\r\n'
+ 'Etag: "%s"\r\n'
+ 'Content-Length: %d\r\n'
+ 'X-Storage-Token: t\r\n'
+ 'Content-Type: application/octet-stream\r\n'
+ '\r\n%s' % (md5(obj).hexdigest(), len(obj), obj))
+ fd.flush()
+ headers = readuntil2crlfs(fd)
+ exp = 'HTTP/1.1 201'
+ self.assertEqual(headers[:len(exp)], exp)
+
+ ecd = policy.pyeclib_driver
+ expected_pieces = set(ecd.encode(obj))
+
+ # go to disk to make sure it's there and all erasure-coded
+ partition, nodes = policy.object_ring.get_nodes('a', 'ec-con', 'o1')
+ conf = {'devices': _testdir, 'mount_check': 'false'}
+ df_mgr = diskfile.DiskFileManager(conf, FakeLogger())
+
+ got_pieces = set()
+ got_indices = set()
+ got_durable = []
+ for node_index, node in enumerate(nodes):
+ df = df_mgr.get_diskfile(node['device'], partition,
+ 'a', 'ec-con', 'o1',
+ policy=policy)
+ with df.open():
+ meta = df.get_metadata()
+ contents = ''.join(df.reader())
+ got_pieces.add(contents)
+
+ # check presence for a .durable file for the timestamp
+ durable_file = os.path.join(
+ _testdir, node['device'], storage_directory(
+ diskfile.get_data_dir(policy),
+ partition, hash_path('a', 'ec-con', 'o1')),
+ utils.Timestamp(df.timestamp).internal + '.durable')
+
+ if os.path.isfile(durable_file):
+ got_durable.append(True)
+
+ lmeta = dict((k.lower(), v) for k, v in meta.items())
+ got_indices.add(
+ lmeta['x-object-sysmeta-ec-frag-index'])
+
+ self.assertEqual(
+ lmeta['x-object-sysmeta-ec-etag'],
+ md5(obj).hexdigest())
+ self.assertEqual(
+ lmeta['x-object-sysmeta-ec-content-length'],
+ str(len(obj)))
+ self.assertEqual(
+ lmeta['x-object-sysmeta-ec-segment-size'],
+ '4096')
+ self.assertEqual(
+ lmeta['x-object-sysmeta-ec-scheme'],
+ 'jerasure_rs_vand 2+1')
+ self.assertEqual(
+ lmeta['etag'],
+ md5(contents).hexdigest())
+
+ self.assertEqual(expected_pieces, got_pieces)
+ self.assertEqual(set(('0', '1', '2')), got_indices)
+
+ # verify at least 2 puts made it all the way to the end of 2nd
+ # phase, ie at least 2 .durable statuses were written
+ num_durable_puts = sum(d is True for d in got_durable)
+ self.assertTrue(num_durable_puts >= 2)
+
+ @unpatch_policies
+ def test_PUT_ec_multiple_segments(self):
+ ec_policy = POLICIES[3]
+ self.put_container("ec", "ec-con")
+
+ pyeclib_header_size = len(ec_policy.pyeclib_driver.encode("")[0])
+ segment_size = ec_policy.ec_segment_size
+
+ # Big enough to have multiple segments. Also a multiple of the
+ # segment size to get coverage of that path too.
+ obj = 'ABC' * segment_size
+
+ prolis = _test_sockets[0]
+ sock = connect_tcp(('localhost', prolis.getsockname()[1]))
+ fd = sock.makefile()
+ fd.write('PUT /v1/a/ec-con/o2 HTTP/1.1\r\n'
+ 'Host: localhost\r\n'
+ 'Connection: close\r\n'
+ 'Content-Length: %d\r\n'
+ 'X-Storage-Token: t\r\n'
+ 'Content-Type: application/octet-stream\r\n'
+ '\r\n%s' % (len(obj), obj))
+ fd.flush()
+ headers = readuntil2crlfs(fd)
+ exp = 'HTTP/1.1 201'
+ self.assertEqual(headers[:len(exp)], exp)
+
+ # it's a 2+1 erasure code, so each fragment archive should be half
+ # the length of the object, plus three inline pyeclib metadata
+ # things (one per segment)
+ expected_length = (len(obj) / 2 + pyeclib_header_size * 3)
+
+ partition, nodes = ec_policy.object_ring.get_nodes(
+ 'a', 'ec-con', 'o2')
+
+ conf = {'devices': _testdir, 'mount_check': 'false'}
+ df_mgr = diskfile.DiskFileManager(conf, FakeLogger())
+
+ got_durable = []
+ fragment_archives = []
+ for node in nodes:
+ df = df_mgr.get_diskfile(
+ node['device'], partition, 'a',
+ 'ec-con', 'o2', policy=ec_policy)
+ with df.open():
+ contents = ''.join(df.reader())
+ fragment_archives.append(contents)
+ self.assertEqual(len(contents), expected_length)
+
+ # check presence for a .durable file for the timestamp
+ durable_file = os.path.join(
+ _testdir, node['device'], storage_directory(
+ diskfile.get_data_dir(ec_policy),
+ partition, hash_path('a', 'ec-con', 'o2')),
+ utils.Timestamp(df.timestamp).internal + '.durable')
+
+ if os.path.isfile(durable_file):
+ got_durable.append(True)
+
+ # Verify that we can decode each individual fragment and that they
+ # are all the correct size
+ fragment_size = ec_policy.fragment_size
+ nfragments = int(
+ math.ceil(float(len(fragment_archives[0])) / fragment_size))
+
+ for fragment_index in range(nfragments):
+ fragment_start = fragment_index * fragment_size
+ fragment_end = (fragment_index + 1) * fragment_size
+
+ try:
+ frags = [fa[fragment_start:fragment_end]
+ for fa in fragment_archives]
+ seg = ec_policy.pyeclib_driver.decode(frags)
+ except ECDriverError:
+ self.fail("Failed to decode fragments %d; this probably "
+ "means the fragments are not the sizes they "
+ "should be" % fragment_index)
+
+ segment_start = fragment_index * segment_size
+ segment_end = (fragment_index + 1) * segment_size
+
+ self.assertEqual(seg, obj[segment_start:segment_end])
+
+ # verify at least 2 puts made it all the way to the end of 2nd
+ # phase, ie at least 2 .durable statuses were written
+ num_durable_puts = sum(d is True for d in got_durable)
+ self.assertTrue(num_durable_puts >= 2)
+
+ @unpatch_policies
+ def test_PUT_ec_object_etag_mismatch(self):
+ self.put_container("ec", "ec-con")
+
+ obj = '90:6A:02:60:B1:08-96da3e706025537fc42464916427727e'
+ prolis = _test_sockets[0]
+ prosrv = _test_servers[0]
+ sock = connect_tcp(('localhost', prolis.getsockname()[1]))
+ fd = sock.makefile()
+ fd.write('PUT /v1/a/ec-con/o3 HTTP/1.1\r\n'
+ 'Host: localhost\r\n'
+ 'Connection: close\r\n'
+ 'Etag: %s\r\n'
+ 'Content-Length: %d\r\n'
+ 'X-Storage-Token: t\r\n'
+ 'Content-Type: application/octet-stream\r\n'
+ '\r\n%s' % (md5('something else').hexdigest(), len(obj), obj))
+ fd.flush()
+ headers = readuntil2crlfs(fd)
+ exp = 'HTTP/1.1 422'
+ self.assertEqual(headers[:len(exp)], exp)
+
+ # nothing should have made it to disk on the object servers
+ partition, nodes = prosrv.get_object_ring(3).get_nodes(
+ 'a', 'ec-con', 'o3')
+ conf = {'devices': _testdir, 'mount_check': 'false'}
+
+ partition, nodes = prosrv.get_object_ring(3).get_nodes(
+ 'a', 'ec-con', 'o3')
+ conf = {'devices': _testdir, 'mount_check': 'false'}
+ df_mgr = diskfile.DiskFileManager(conf, FakeLogger())
+
+ for node in nodes:
+ df = df_mgr.get_diskfile(node['device'], partition,
+ 'a', 'ec-con', 'o3', policy=POLICIES[3])
+ self.assertRaises(DiskFileNotExist, df.open)
+
+ @unpatch_policies
+ def test_PUT_ec_fragment_archive_etag_mismatch(self):
+ self.put_container("ec", "ec-con")
+
+ # Cause a hash mismatch by feeding one particular MD5 hasher some
+ # extra data. The goal here is to get exactly one of the hashers in
+ # an object server.
+ countdown = [1]
+
+ def busted_md5_constructor(initial_str=""):
+ hasher = md5(initial_str)
+ if countdown[0] == 0:
+ hasher.update('wrong')
+ countdown[0] -= 1
+ return hasher
+
+ obj = 'uvarovite-esurience-cerated-symphysic'
+ prolis = _test_sockets[0]
+ prosrv = _test_servers[0]
+ sock = connect_tcp(('localhost', prolis.getsockname()[1]))
+ with mock.patch('swift.obj.server.md5', busted_md5_constructor):
+ fd = sock.makefile()
+ fd.write('PUT /v1/a/ec-con/pimento HTTP/1.1\r\n'
+ 'Host: localhost\r\n'
+ 'Connection: close\r\n'
+ 'Etag: %s\r\n'
+ 'Content-Length: %d\r\n'
+ 'X-Storage-Token: t\r\n'
+ 'Content-Type: application/octet-stream\r\n'
+ '\r\n%s' % (md5(obj).hexdigest(), len(obj), obj))
+ fd.flush()
+ headers = readuntil2crlfs(fd)
+ exp = 'HTTP/1.1 503' # no quorum
+ self.assertEqual(headers[:len(exp)], exp)
+
+ # 2/3 of the fragment archives should have landed on disk
+ partition, nodes = prosrv.get_object_ring(3).get_nodes(
+ 'a', 'ec-con', 'pimento')
+ conf = {'devices': _testdir, 'mount_check': 'false'}
+
+ partition, nodes = prosrv.get_object_ring(3).get_nodes(
+ 'a', 'ec-con', 'pimento')
+ conf = {'devices': _testdir, 'mount_check': 'false'}
+
+ df_mgr = diskfile.DiskFileManager(conf, FakeLogger())
+
+ found = 0
+ for node in nodes:
+ df = df_mgr.get_diskfile(node['device'], partition,
+ 'a', 'ec-con', 'pimento',
+ policy=POLICIES[3])
+ try:
+ df.open()
+ found += 1
+ except DiskFileNotExist:
+ pass
+ self.assertEqual(found, 2)
+
+ @unpatch_policies
+ def test_PUT_ec_if_none_match(self):
+ self.put_container("ec", "ec-con")
+
+ obj = 'ananepionic-lepidophyllous-ropewalker-neglectful'
+ prolis = _test_sockets[0]
+ sock = connect_tcp(('localhost', prolis.getsockname()[1]))
+ fd = sock.makefile()
+ fd.write('PUT /v1/a/ec-con/inm HTTP/1.1\r\n'
+ 'Host: localhost\r\n'
+ 'Connection: close\r\n'
+ 'Etag: "%s"\r\n'
+ 'Content-Length: %d\r\n'
+ 'X-Storage-Token: t\r\n'
+ 'Content-Type: application/octet-stream\r\n'
+ '\r\n%s' % (md5(obj).hexdigest(), len(obj), obj))
+ fd.flush()
+ headers = readuntil2crlfs(fd)
+ exp = 'HTTP/1.1 201'
+ self.assertEqual(headers[:len(exp)], exp)
+
+ sock = connect_tcp(('localhost', prolis.getsockname()[1]))
+ fd = sock.makefile()
+ fd.write('PUT /v1/a/ec-con/inm HTTP/1.1\r\n'
+ 'Host: localhost\r\n'
+ 'Connection: close\r\n'
+ 'If-None-Match: *\r\n'
+ 'Etag: "%s"\r\n'
+ 'Content-Length: %d\r\n'
+ 'X-Storage-Token: t\r\n'
+ 'Content-Type: application/octet-stream\r\n'
+ '\r\n%s' % (md5(obj).hexdigest(), len(obj), obj))
+ fd.flush()
+ headers = readuntil2crlfs(fd)
+ exp = 'HTTP/1.1 412'
+ self.assertEqual(headers[:len(exp)], exp)
+
+ @unpatch_policies
+ def test_GET_ec(self):
+ self.put_container("ec", "ec-con")
+
+ obj = '0123456' * 11 * 17
+
+ prolis = _test_sockets[0]
+ sock = connect_tcp(('localhost', prolis.getsockname()[1]))
+ fd = sock.makefile()
+ fd.write('PUT /v1/a/ec-con/go-get-it HTTP/1.1\r\n'
+ 'Host: localhost\r\n'
+ 'Connection: close\r\n'
+ 'Content-Length: %d\r\n'
+ 'X-Storage-Token: t\r\n'
+ 'X-Object-Meta-Color: chartreuse\r\n'
+ 'Content-Type: application/octet-stream\r\n'
+ '\r\n%s' % (len(obj), obj))
+ fd.flush()
+ headers = readuntil2crlfs(fd)
+ exp = 'HTTP/1.1 201'
+ self.assertEqual(headers[:len(exp)], exp)
+
+ sock = connect_tcp(('localhost', prolis.getsockname()[1]))
+ fd = sock.makefile()
+ fd.write('GET /v1/a/ec-con/go-get-it HTTP/1.1\r\n'
+ 'Host: localhost\r\n'
+ 'Connection: close\r\n'
+ 'X-Storage-Token: t\r\n'
+ '\r\n')
+ fd.flush()
+ headers = readuntil2crlfs(fd)
+ exp = 'HTTP/1.1 200'
+ self.assertEqual(headers[:len(exp)], exp)
+
+ headers = parse_headers_string(headers)
+ self.assertEqual(str(len(obj)), headers['Content-Length'])
+ self.assertEqual(md5(obj).hexdigest(), headers['Etag'])
+ self.assertEqual('chartreuse', headers['X-Object-Meta-Color'])
+
+ gotten_obj = ''
+ while True:
+ buf = fd.read(64)
+ if not buf:
+ break
+ gotten_obj += buf
+ self.assertEqual(gotten_obj, obj)
+
+ @unpatch_policies
+ def test_conditional_GET_ec(self):
+ self.put_container("ec", "ec-con")
+
+ obj = 'this object has an etag and is otherwise unimportant'
+ etag = md5(obj).hexdigest()
+ not_etag = md5(obj + "blahblah").hexdigest()
+
+ prolis = _test_sockets[0]
+ prosrv = _test_servers[0]
+ sock = connect_tcp(('localhost', prolis.getsockname()[1]))
+ fd = sock.makefile()
+ fd.write('PUT /v1/a/ec-con/conditionals HTTP/1.1\r\n'
+ 'Host: localhost\r\n'
+ 'Connection: close\r\n'
+ 'Content-Length: %d\r\n'
+ 'X-Storage-Token: t\r\n'
+ 'Content-Type: application/octet-stream\r\n'
+ '\r\n%s' % (len(obj), obj))
+ fd.flush()
+ headers = readuntil2crlfs(fd)
+ exp = 'HTTP/1.1 201'
+ self.assertEqual(headers[:len(exp)], exp)
+
+ for verb in ('GET', 'HEAD'):
+ # If-Match
+ req = Request.blank(
+ '/v1/a/ec-con/conditionals',
+ environ={'REQUEST_METHOD': verb},
+ headers={'If-Match': etag})
+ resp = req.get_response(prosrv)
+ self.assertEqual(resp.status_int, 200)
+
+ req = Request.blank(
+ '/v1/a/ec-con/conditionals',
+ environ={'REQUEST_METHOD': verb},
+ headers={'If-Match': not_etag})
+ resp = req.get_response(prosrv)
+ self.assertEqual(resp.status_int, 412)
+
+ req = Request.blank(
+ '/v1/a/ec-con/conditionals',
+ environ={'REQUEST_METHOD': verb},
+ headers={'If-Match': "*"})
+ resp = req.get_response(prosrv)
+ self.assertEqual(resp.status_int, 200)
+
+ # If-None-Match
+ req = Request.blank(
+ '/v1/a/ec-con/conditionals',
+ environ={'REQUEST_METHOD': verb},
+ headers={'If-None-Match': etag})
+ resp = req.get_response(prosrv)
+ self.assertEqual(resp.status_int, 304)
+
+ req = Request.blank(
+ '/v1/a/ec-con/conditionals',
+ environ={'REQUEST_METHOD': verb},
+ headers={'If-None-Match': not_etag})
+ resp = req.get_response(prosrv)
+ self.assertEqual(resp.status_int, 200)
+
+ req = Request.blank(
+ '/v1/a/ec-con/conditionals',
+ environ={'REQUEST_METHOD': verb},
+ headers={'If-None-Match': "*"})
+ resp = req.get_response(prosrv)
+ self.assertEqual(resp.status_int, 304)
+
+ @unpatch_policies
+ def test_GET_ec_big(self):
+ self.put_container("ec", "ec-con")
+
+ # our EC segment size is 4 KiB, so this is multiple (3) segments;
+ # we'll verify that with a sanity check
+ obj = 'a moose once bit my sister' * 400
+ self.assertTrue(
+ len(obj) > POLICIES.get_by_name("ec").ec_segment_size * 2,
+ "object is too small for proper testing")
+
+ prolis = _test_sockets[0]
+ sock = connect_tcp(('localhost', prolis.getsockname()[1]))
+ fd = sock.makefile()
+ fd.write('PUT /v1/a/ec-con/big-obj-get HTTP/1.1\r\n'
+ 'Host: localhost\r\n'
+ 'Connection: close\r\n'
+ 'Content-Length: %d\r\n'
+ 'X-Storage-Token: t\r\n'
+ 'Content-Type: application/octet-stream\r\n'
+ '\r\n%s' % (len(obj), obj))
+ fd.flush()
+ headers = readuntil2crlfs(fd)
+ exp = 'HTTP/1.1 201'
+ self.assertEqual(headers[:len(exp)], exp)
+
+ sock = connect_tcp(('localhost', prolis.getsockname()[1]))
+ fd = sock.makefile()
+ fd.write('GET /v1/a/ec-con/big-obj-get HTTP/1.1\r\n'
+ 'Host: localhost\r\n'
+ 'Connection: close\r\n'
+ 'X-Storage-Token: t\r\n'
+ '\r\n')
+ fd.flush()
+ headers = readuntil2crlfs(fd)
+ exp = 'HTTP/1.1 200'
+ self.assertEqual(headers[:len(exp)], exp)
+
+ headers = parse_headers_string(headers)
+ self.assertEqual(str(len(obj)), headers['Content-Length'])
+ self.assertEqual(md5(obj).hexdigest(), headers['Etag'])
+
+ gotten_obj = ''
+ while True:
+ buf = fd.read(64)
+ if not buf:
+ break
+ gotten_obj += buf
+ # This may look like a redundant test, but when things fail, this
+ # has a useful failure message while the subsequent one spews piles
+ # of garbage and demolishes your terminal's scrollback buffer.
+ self.assertEqual(len(gotten_obj), len(obj))
+ self.assertEqual(gotten_obj, obj)
+
+ @unpatch_policies
+ def test_GET_ec_failure_handling(self):
+ self.put_container("ec", "ec-con")
+
+ obj = 'look at this object; it is simply amazing ' * 500
+ prolis = _test_sockets[0]
+ sock = connect_tcp(('localhost', prolis.getsockname()[1]))
+ fd = sock.makefile()
+ fd.write('PUT /v1/a/ec-con/crash-test-dummy HTTP/1.1\r\n'
+ 'Host: localhost\r\n'
+ 'Connection: close\r\n'
+ 'Content-Length: %d\r\n'
+ 'X-Storage-Token: t\r\n'
+ 'Content-Type: application/octet-stream\r\n'
+ '\r\n%s' % (len(obj), obj))
+ fd.flush()
+ headers = readuntil2crlfs(fd)
+ exp = 'HTTP/1.1 201'
+ self.assertEqual(headers[:len(exp)], exp)
+
+ def explodey_iter(inner_iter):
+ yield next(inner_iter)
+ raise Exception("doom ba doom")
+
+ real_ec_app_iter = swift.proxy.controllers.obj.ECAppIter
+
+ def explodey_ec_app_iter(path, policy, iterators, *a, **kw):
+ # Each thing in `iterators` here is a document-parts iterator,
+ # and we want to fail after getting a little into each part.
+ #
+ # That way, we ensure we've started streaming the response to
+ # the client when things go wrong.
+ return real_ec_app_iter(
+ path, policy,
+ [explodey_iter(i) for i in iterators],
+ *a, **kw)
+
+ with mock.patch("swift.proxy.controllers.obj.ECAppIter",
+ explodey_ec_app_iter):
+ sock = connect_tcp(('localhost', prolis.getsockname()[1]))
+ fd = sock.makefile()
+ fd.write('GET /v1/a/ec-con/crash-test-dummy HTTP/1.1\r\n'
+ 'Host: localhost\r\n'
+ 'Connection: close\r\n'
+ 'X-Storage-Token: t\r\n'
+ '\r\n')
+ fd.flush()
+ headers = readuntil2crlfs(fd)
+ exp = 'HTTP/1.1 200'
+ self.assertEqual(headers[:len(exp)], exp)
+
+ headers = parse_headers_string(headers)
+ self.assertEqual(str(len(obj)), headers['Content-Length'])
+ self.assertEqual(md5(obj).hexdigest(), headers['Etag'])
+
+ gotten_obj = ''
+ try:
+ with Timeout(300): # don't hang the testrun when this fails
+ while True:
+ buf = fd.read(64)
+ if not buf:
+ break
+ gotten_obj += buf
+ except Timeout:
+ self.fail("GET hung when connection failed")
+
+ # Ensure we failed partway through, otherwise the mocks could
+ # get out of date without anyone noticing
+ self.assertTrue(0 < len(gotten_obj) < len(obj))
+
+ @unpatch_policies
+ def test_HEAD_ec(self):
+ self.put_container("ec", "ec-con")
+
+ obj = '0123456' * 11 * 17
+
+ prolis = _test_sockets[0]
+ sock = connect_tcp(('localhost', prolis.getsockname()[1]))
+ fd = sock.makefile()
+ fd.write('PUT /v1/a/ec-con/go-head-it HTTP/1.1\r\n'
+ 'Host: localhost\r\n'
+ 'Connection: close\r\n'
+ 'Content-Length: %d\r\n'
+ 'X-Storage-Token: t\r\n'
+ 'X-Object-Meta-Color: chartreuse\r\n'
+ 'Content-Type: application/octet-stream\r\n'
+ '\r\n%s' % (len(obj), obj))
+ fd.flush()
+ headers = readuntil2crlfs(fd)
+ exp = 'HTTP/1.1 201'
+ self.assertEqual(headers[:len(exp)], exp)
+
+ sock = connect_tcp(('localhost', prolis.getsockname()[1]))
+ fd = sock.makefile()
+ fd.write('HEAD /v1/a/ec-con/go-head-it HTTP/1.1\r\n'
+ 'Host: localhost\r\n'
+ 'Connection: close\r\n'
+ 'X-Storage-Token: t\r\n'
+ '\r\n')
+ fd.flush()
+ headers = readuntil2crlfs(fd)
+ exp = 'HTTP/1.1 200'
+ self.assertEqual(headers[:len(exp)], exp)
+
+ headers = parse_headers_string(headers)
+ self.assertEqual(str(len(obj)), headers['Content-Length'])
+ self.assertEqual(md5(obj).hexdigest(), headers['Etag'])
+ self.assertEqual('chartreuse', headers['X-Object-Meta-Color'])
+
+ @unpatch_policies
+ def test_GET_ec_404(self):
+ self.put_container("ec", "ec-con")
+
+ prolis = _test_sockets[0]
+ sock = connect_tcp(('localhost', prolis.getsockname()[1]))
+ fd = sock.makefile()
+ fd.write('GET /v1/a/ec-con/yes-we-have-no-bananas HTTP/1.1\r\n'
+ 'Host: localhost\r\n'
+ 'Connection: close\r\n'
+ 'X-Storage-Token: t\r\n'
+ '\r\n')
+ fd.flush()
+ headers = readuntil2crlfs(fd)
+ exp = 'HTTP/1.1 404'
+ self.assertEqual(headers[:len(exp)], exp)
+
+ @unpatch_policies
+ def test_HEAD_ec_404(self):
+ self.put_container("ec", "ec-con")
+
+ prolis = _test_sockets[0]
+ sock = connect_tcp(('localhost', prolis.getsockname()[1]))
+ fd = sock.makefile()
+ fd.write('HEAD /v1/a/ec-con/yes-we-have-no-bananas HTTP/1.1\r\n'
+ 'Host: localhost\r\n'
+ 'Connection: close\r\n'
+ 'X-Storage-Token: t\r\n'
+ '\r\n')
+ fd.flush()
+ headers = readuntil2crlfs(fd)
+ exp = 'HTTP/1.1 404'
+ self.assertEqual(headers[:len(exp)], exp)
+
def test_PUT_expect_header_zero_content_length(self):
test_errors = []
@@ -768,13 +2010,22 @@ class TestObjectController(unittest.TestCase):
'server!')
with save_globals():
- controller = proxy_server.ObjectController(self.app, 'account',
- 'container', 'object')
- # The (201, -4) tuples in there have the effect of letting the
- # initial connect succeed, after which getexpect() gets called and
- # then the -4 makes the response of that actually be 201 instead of
- # 100. Perfectly straightforward.
- set_http_connect(200, 200, (201, -4), (201, -4), (201, -4),
+ controller = ReplicatedObjectController(
+ self.app, 'account', 'container', 'object')
+ # The (201, Exception('test')) tuples in there have the effect of
+ # changing the status of the initial expect response. The default
+ # expect response from FakeConn for 201 is 100.
+ # But the object server won't send a 100 continue line if the
+ # client doesn't send a expect 100 header (as is the case with
+ # zero byte PUTs as validated by this test), nevertheless the
+ # object controller calls getexpect without prejudice. In this
+ # case the status from the response shows up early in getexpect
+ # instead of having to wait until getresponse. The Exception is
+ # in there to ensure that the object controller also *uses* the
+ # result of getexpect instead of calling getresponse in which case
+ # our FakeConn will blow up.
+ success_codes = [(201, Exception('test'))] * 3
+ set_http_connect(200, 200, *success_codes,
give_connect=test_connect)
req = Request.blank('/v1/a/c/o.jpg', {})
req.content_length = 0
@@ -795,9 +2046,14 @@ class TestObjectController(unittest.TestCase):
'non-zero byte PUT!')
with save_globals():
- controller = \
- proxy_server.ObjectController(self.app, 'a', 'c', 'o.jpg')
- set_http_connect(200, 200, 201, 201, 201,
+ controller = ReplicatedObjectController(
+ self.app, 'a', 'c', 'o.jpg')
+ # the (100, 201) tuples in there are just being extra explicit
+ # about the FakeConn returning the 100 Continue status when the
+ # object controller calls getexpect. Which is FakeConn's default
+ # for 201 if no expect_status is specified.
+ success_codes = [(100, 201)] * 3
+ set_http_connect(200, 200, *success_codes,
give_connect=test_connect)
req = Request.blank('/v1/a/c/o.jpg', {})
req.content_length = 1
@@ -820,12 +2076,14 @@ class TestObjectController(unittest.TestCase):
def is_r0(node):
return node['region'] == 0
- self.app.object_ring.max_more_nodes = 100
+ object_ring = self.app.get_object_ring(None)
+ object_ring.max_more_nodes = 100
self.app.write_affinity_is_local_fn = is_r0
self.app.write_affinity_node_count = lambda r: 3
controller = \
- proxy_server.ObjectController(self.app, 'a', 'c', 'o.jpg')
+ ReplicatedObjectController(
+ self.app, 'a', 'c', 'o.jpg')
set_http_connect(200, 200, 201, 201, 201,
give_connect=test_connect)
req = Request.blank('/v1/a/c/o.jpg', {})
@@ -854,14 +2112,16 @@ class TestObjectController(unittest.TestCase):
def is_r0(node):
return node['region'] == 0
- self.app.object_ring.max_more_nodes = 100
+ object_ring = self.app.get_object_ring(None)
+ object_ring.max_more_nodes = 100
self.app.write_affinity_is_local_fn = is_r0
self.app.write_affinity_node_count = lambda r: 3
controller = \
- proxy_server.ObjectController(self.app, 'a', 'c', 'o.jpg')
+ ReplicatedObjectController(
+ self.app, 'a', 'c', 'o.jpg')
self.app.error_limit(
- self.app.object_ring.get_part_nodes(1)[0], 'test')
+ object_ring.get_part_nodes(1)[0], 'test')
set_http_connect(200, 200, # account, container
201, 201, 201, # 3 working backends
give_connect=test_connect)
@@ -880,6 +2140,28 @@ class TestObjectController(unittest.TestCase):
self.assertEqual(0, written_to[1][1] % 2)
self.assertNotEqual(0, written_to[2][1] % 2)
+ @unpatch_policies
+ def test_PUT_no_etag_fallocate(self):
+ with mock.patch('swift.obj.diskfile.fallocate') as mock_fallocate:
+ prolis = _test_sockets[0]
+ sock = connect_tcp(('localhost', prolis.getsockname()[1]))
+ fd = sock.makefile()
+ obj = 'hemoleucocytic-surfactant'
+ fd.write('PUT /v1/a/c/o HTTP/1.1\r\n'
+ 'Host: localhost\r\n'
+ 'Connection: close\r\n'
+ 'Content-Length: %d\r\n'
+ 'X-Storage-Token: t\r\n'
+ 'Content-Type: application/octet-stream\r\n'
+ '\r\n%s' % (len(obj), obj))
+ fd.flush()
+ headers = readuntil2crlfs(fd)
+ exp = 'HTTP/1.1 201'
+ self.assertEqual(headers[:len(exp)], exp)
+ # one for each obj server; this test has 2
+ self.assertEqual(len(mock_fallocate.mock_calls), 2)
+
+ @unpatch_policies
def test_PUT_message_length_using_content_length(self):
prolis = _test_sockets[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
@@ -897,6 +2179,7 @@ class TestObjectController(unittest.TestCase):
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
+ @unpatch_policies
def test_PUT_message_length_using_transfer_encoding(self):
prolis = _test_sockets[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
@@ -929,6 +2212,7 @@ class TestObjectController(unittest.TestCase):
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
+ @unpatch_policies
def test_PUT_message_length_using_both(self):
prolis = _test_sockets[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
@@ -962,6 +2246,7 @@ class TestObjectController(unittest.TestCase):
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
+ @unpatch_policies
def test_PUT_bad_message_length(self):
prolis = _test_sockets[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
@@ -995,6 +2280,7 @@ class TestObjectController(unittest.TestCase):
exp = 'HTTP/1.1 400'
self.assertEqual(headers[:len(exp)], exp)
+ @unpatch_policies
def test_PUT_message_length_unsup_xfr_encoding(self):
prolis = _test_sockets[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
@@ -1028,9 +2314,9 @@ class TestObjectController(unittest.TestCase):
exp = 'HTTP/1.1 501'
self.assertEqual(headers[:len(exp)], exp)
+ @unpatch_policies
def test_PUT_message_length_too_large(self):
- swift.proxy.controllers.obj.MAX_FILE_SIZE = 10
- try:
+ with mock.patch('swift.common.constraints.MAX_FILE_SIZE', 10):
prolis = _test_sockets[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
@@ -1045,9 +2331,8 @@ class TestObjectController(unittest.TestCase):
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 413'
self.assertEqual(headers[:len(exp)], exp)
- finally:
- swift.proxy.controllers.obj.MAX_FILE_SIZE = MAX_FILE_SIZE
+ @unpatch_policies
def test_PUT_last_modified(self):
prolis = _test_sockets[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
@@ -1107,7 +2392,7 @@ class TestObjectController(unittest.TestCase):
if 'x-if-delete-at' in headers or 'X-If-Delete-At' in headers:
test_errors.append('X-If-Delete-At in headers')
- body = simplejson.dumps(
+ body = json.dumps(
[{"name": "001o/1",
"hash": "x",
"bytes": 0,
@@ -1115,11 +2400,12 @@ class TestObjectController(unittest.TestCase):
"last_modified": "1970-01-01T00:00:01.000000"}])
body_iter = ('', '', body, '', '', '', '', '', '', '', '', '', '', '')
with save_globals():
- controller = proxy_server.ObjectController(self.app, 'a', 'c', 'o')
+ controller = ReplicatedObjectController(
+ self.app, 'a', 'c', 'o')
# HEAD HEAD GET GET HEAD GET GET GET PUT PUT
# PUT DEL DEL DEL
set_http_connect(200, 200, 200, 200, 200, 200, 200, 200, 201, 201,
- 201, 200, 200, 200,
+ 201, 204, 204, 204,
give_connect=test_connect,
body_iter=body_iter,
headers={'x-versions-location': 'foo'})
@@ -1131,10 +2417,133 @@ class TestObjectController(unittest.TestCase):
controller.DELETE(req)
self.assertEquals(test_errors, [])
+ @patch_policies([
+ StoragePolicy(0, 'zero', False, object_ring=FakeRing()),
+ StoragePolicy(1, 'one', True, object_ring=FakeRing())
+ ])
+ def test_DELETE_on_expired_versioned_object(self):
+ # reset the router post patch_policies
+ self.app.obj_controller_router = proxy_server.ObjectControllerRouter()
+ methods = set()
+ authorize_call_count = [0]
+
+ def test_connect(ipaddr, port, device, partition, method, path,
+ headers=None, query_string=None):
+ methods.add((method, path))
+
+ def fake_container_info(account, container, req):
+ return {'status': 200, 'sync_key': None,
+ 'meta': {}, 'cors': {'allow_origin': None,
+ 'expose_headers': None,
+ 'max_age': None},
+ 'sysmeta': {}, 'read_acl': None, 'object_count': None,
+ 'write_acl': None, 'versions': 'foo',
+ 'partition': 1, 'bytes': None, 'storage_policy': '1',
+ 'nodes': [{'zone': 0, 'ip': '10.0.0.0', 'region': 0,
+ 'id': 0, 'device': 'sda', 'port': 1000},
+ {'zone': 1, 'ip': '10.0.0.1', 'region': 1,
+ 'id': 1, 'device': 'sdb', 'port': 1001},
+ {'zone': 2, 'ip': '10.0.0.2', 'region': 0,
+ 'id': 2, 'device': 'sdc', 'port': 1002}]}
+
+ def fake_list_iter(container, prefix, env):
+ object_list = [{'name': '1'}, {'name': '2'}, {'name': '3'}]
+ for obj in object_list:
+ yield obj
+
+ def fake_authorize(req):
+ authorize_call_count[0] += 1
+ return None # allow the request
+
+ with save_globals():
+ controller = ReplicatedObjectController(
+ self.app, 'a', 'c', 'o')
+ controller.container_info = fake_container_info
+ controller._listing_iter = fake_list_iter
+ set_http_connect(404, 404, 404, # get for the previous version
+ 200, 200, 200, # get for the pre-previous
+ 201, 201, 201, # put move the pre-previous
+ 204, 204, 204, # delete for the pre-previous
+ give_connect=test_connect)
+ req = Request.blank('/v1/a/c/o',
+ environ={'REQUEST_METHOD': 'DELETE',
+ 'swift.authorize': fake_authorize})
+
+ self.app.memcache.store = {}
+ self.app.update_request(req)
+ controller.DELETE(req)
+ exp_methods = [('GET', '/a/foo/3'),
+ ('GET', '/a/foo/2'),
+ ('PUT', '/a/c/o'),
+ ('DELETE', '/a/foo/2')]
+ self.assertEquals(set(exp_methods), (methods))
+ self.assertEquals(authorize_call_count[0], 2)
+
+ @patch_policies([
+ StoragePolicy(0, 'zero', False, object_ring=FakeRing()),
+ StoragePolicy(1, 'one', True, object_ring=FakeRing())
+ ])
+ def test_denied_DELETE_of_versioned_object(self):
+ """
+ Verify that a request with read access to a versions container
+ is unable to cause any write operations on the versioned container.
+ """
+ # reset the router post patch_policies
+ self.app.obj_controller_router = proxy_server.ObjectControllerRouter()
+ methods = set()
+ authorize_call_count = [0]
+
+ def test_connect(ipaddr, port, device, partition, method, path,
+ headers=None, query_string=None):
+ methods.add((method, path))
+
+ def fake_container_info(account, container, req):
+ return {'status': 200, 'sync_key': None,
+ 'meta': {}, 'cors': {'allow_origin': None,
+ 'expose_headers': None,
+ 'max_age': None},
+ 'sysmeta': {}, 'read_acl': None, 'object_count': None,
+ 'write_acl': None, 'versions': 'foo',
+ 'partition': 1, 'bytes': None, 'storage_policy': '1',
+ 'nodes': [{'zone': 0, 'ip': '10.0.0.0', 'region': 0,
+ 'id': 0, 'device': 'sda', 'port': 1000},
+ {'zone': 1, 'ip': '10.0.0.1', 'region': 1,
+ 'id': 1, 'device': 'sdb', 'port': 1001},
+ {'zone': 2, 'ip': '10.0.0.2', 'region': 0,
+ 'id': 2, 'device': 'sdc', 'port': 1002}]}
+
+ def fake_list_iter(container, prefix, env):
+ object_list = [{'name': '1'}, {'name': '2'}, {'name': '3'}]
+ for obj in object_list:
+ yield obj
+
+ def fake_authorize(req):
+ # deny write access
+ authorize_call_count[0] += 1
+ return HTTPForbidden(req) # allow the request
+
+ with save_globals():
+ controller = ReplicatedObjectController(self.app, 'a', 'c', 'o')
+ controller.container_info = fake_container_info
+ # patching _listing_iter simulates request being authorized
+ # to list versions container
+ controller._listing_iter = fake_list_iter
+ set_http_connect(give_connect=test_connect)
+ req = Request.blank('/v1/a/c/o',
+ environ={'REQUEST_METHOD': 'DELETE',
+ 'swift.authorize': fake_authorize})
+
+ self.app.memcache.store = {}
+ self.app.update_request(req)
+ resp = controller.DELETE(req)
+ self.assertEqual(403, resp.status_int)
+ self.assertFalse(methods, methods)
+ self.assertEquals(authorize_call_count[0], 1)
+
def test_PUT_auto_content_type(self):
with save_globals():
- controller = proxy_server.ObjectController(self.app, 'account',
- 'container', 'object')
+ controller = ReplicatedObjectController(
+ self.app, 'account', 'container', 'object')
def test_content_type(filename, expected):
# The three responses here are for account_info() (HEAD to
@@ -1170,8 +2579,7 @@ class TestObjectController(unittest.TestCase):
fp.write('foo/bar foo\n')
proxy_server.Application({'swift_dir': swift_dir},
FakeMemcache(), FakeLogger(),
- FakeRing(), FakeRing(),
- FakeRing())
+ FakeRing(), FakeRing())
self.assertEquals(proxy_server.mimetypes.guess_type('blah.foo')[0],
'foo/bar')
self.assertEquals(proxy_server.mimetypes.guess_type('blah.jpg')[0],
@@ -1181,8 +2589,8 @@ class TestObjectController(unittest.TestCase):
def test_PUT(self):
with save_globals():
- controller = proxy_server.ObjectController(self.app, 'account',
- 'container', 'object')
+ controller = ReplicatedObjectController(
+ self.app, 'account', 'container', 'object')
def test_status_map(statuses, expected):
set_http_connect(*statuses)
@@ -1197,11 +2605,12 @@ class TestObjectController(unittest.TestCase):
test_status_map((200, 200, 201, 201, 500), 201)
test_status_map((200, 200, 204, 404, 404), 404)
test_status_map((200, 200, 204, 500, 404), 503)
+ test_status_map((200, 200, 202, 202, 204), 204)
def test_PUT_connect_exceptions(self):
with save_globals():
- controller = proxy_server.ObjectController(self.app, 'account',
- 'container', 'object')
+ controller = ReplicatedObjectController(
+ self.app, 'account', 'container', 'object')
def test_status_map(statuses, expected):
set_http_connect(*statuses)
@@ -1209,19 +2618,30 @@ class TestObjectController(unittest.TestCase):
req = Request.blank('/v1/a/c/o.jpg', {})
req.content_length = 0
self.app.update_request(req)
- res = controller.PUT(req)
+ try:
+ res = controller.PUT(req)
+ except HTTPException as res:
+ pass
expected = str(expected)
self.assertEquals(res.status[:len(expected)], expected)
- test_status_map((200, 200, 201, 201, -1), 201)
- test_status_map((200, 200, 201, 201, -2), 201) # expect timeout
- test_status_map((200, 200, 201, 201, -3), 201) # error limited
- test_status_map((200, 200, 201, -1, -1), 503)
- test_status_map((200, 200, 503, 503, -1), 503)
+ test_status_map((200, 200, 201, 201, -1), 201) # connect exc
+ # connect errors
+ test_status_map((200, 200, Timeout(), 201, 201, ), 201)
+ test_status_map((200, 200, 201, 201, Exception()), 201)
+ # expect errors
+ test_status_map((200, 200, (Timeout(), None), 201, 201), 201)
+ test_status_map((200, 200, (Exception(), None), 201, 201), 201)
+ # response errors
+ test_status_map((200, 200, (100, Timeout()), 201, 201), 201)
+ test_status_map((200, 200, (100, Exception()), 201, 201), 201)
+ test_status_map((200, 200, 507, 201, 201), 201) # error limited
+ test_status_map((200, 200, -1, 201, -1), 503)
+ test_status_map((200, 200, 503, -1, 503), 503)
def test_PUT_send_exceptions(self):
with save_globals():
- controller = proxy_server.ObjectController(self.app, 'account',
- 'container', 'object')
+ controller = ReplicatedObjectController(
+ self.app, 'account', 'container', 'object')
def test_status_map(statuses, expected):
self.app.memcache.store = {}
@@ -1230,7 +2650,10 @@ class TestObjectController(unittest.TestCase):
environ={'REQUEST_METHOD': 'PUT'},
body='some data')
self.app.update_request(req)
- res = controller.PUT(req)
+ try:
+ res = controller.PUT(req)
+ except HTTPException as res:
+ pass
expected = str(expected)
self.assertEquals(res.status[:len(expected)], expected)
test_status_map((200, 200, 201, -1, 201), 201)
@@ -1240,10 +2663,10 @@ class TestObjectController(unittest.TestCase):
def test_PUT_max_size(self):
with save_globals():
set_http_connect(201, 201, 201)
- controller = proxy_server.ObjectController(self.app, 'account',
- 'container', 'object')
+ controller = ReplicatedObjectController(
+ self.app, 'account', 'container', 'object')
req = Request.blank('/v1/a/c/o', {}, headers={
- 'Content-Length': str(MAX_FILE_SIZE + 1),
+ 'Content-Length': str(constraints.MAX_FILE_SIZE + 1),
'Content-Type': 'foo/bar'})
self.app.update_request(req)
res = controller.PUT(req)
@@ -1252,8 +2675,8 @@ class TestObjectController(unittest.TestCase):
def test_PUT_bad_content_type(self):
with save_globals():
set_http_connect(201, 201, 201)
- controller = proxy_server.ObjectController(self.app, 'account',
- 'container', 'object')
+ controller = ReplicatedObjectController(
+ self.app, 'account', 'container', 'object')
req = Request.blank('/v1/a/c/o', {}, headers={
'Content-Length': 0, 'Content-Type': 'foo/bar;swift_hey=45'})
self.app.update_request(req)
@@ -1263,8 +2686,8 @@ class TestObjectController(unittest.TestCase):
def test_PUT_getresponse_exceptions(self):
with save_globals():
- controller = proxy_server.ObjectController(self.app, 'account',
- 'container', 'object')
+ controller = ReplicatedObjectController(
+ self.app, 'account', 'container', 'object')
def test_status_map(statuses, expected):
self.app.memcache.store = {}
@@ -1272,7 +2695,10 @@ class TestObjectController(unittest.TestCase):
req = Request.blank('/v1/a/c/o.jpg', {})
req.content_length = 0
self.app.update_request(req)
- res = controller.PUT(req)
+ try:
+ res = controller.PUT(req)
+ except HTTPException as res:
+ pass
expected = str(expected)
self.assertEquals(res.status[:len(str(expected))],
str(expected))
@@ -1301,6 +2727,137 @@ class TestObjectController(unittest.TestCase):
test_status_map((200, 200, 404, 500, 500), 503)
test_status_map((200, 200, 404, 404, 404), 404)
+ @patch_policies([
+ StoragePolicy(0, 'zero', is_default=True, object_ring=FakeRing()),
+ StoragePolicy(1, 'one', object_ring=FakeRing()),
+ ])
+ def test_POST_backend_headers(self):
+ # reset the router post patch_policies
+ self.app.obj_controller_router = proxy_server.ObjectControllerRouter()
+ self.app.object_post_as_copy = False
+ self.app.sort_nodes = lambda nodes: nodes
+ backend_requests = []
+
+ def capture_requests(ip, port, method, path, headers, *args,
+ **kwargs):
+ backend_requests.append((method, path, headers))
+
+ req = Request.blank('/v1/a/c/o', {}, method='POST',
+ headers={'X-Object-Meta-Color': 'Blue'})
+
+ # we want the container_info response to says a policy index of 1
+ resp_headers = {'X-Backend-Storage-Policy-Index': 1}
+ with mocked_http_conn(
+ 200, 200, 202, 202, 202,
+ headers=resp_headers, give_connect=capture_requests
+ ) as fake_conn:
+ resp = req.get_response(self.app)
+ self.assertRaises(StopIteration, fake_conn.code_iter.next)
+
+ self.assertEqual(resp.status_int, 202)
+ self.assertEqual(len(backend_requests), 5)
+
+ def check_request(req, method, path, headers=None):
+ req_method, req_path, req_headers = req
+ self.assertEqual(method, req_method)
+ # caller can ignore leading path parts
+ self.assertTrue(req_path.endswith(path),
+ 'expected path to end with %s, it was %s' % (
+ path, req_path))
+ headers = headers or {}
+ # caller can ignore some headers
+ for k, v in headers.items():
+ self.assertEqual(req_headers[k], v)
+ account_request = backend_requests.pop(0)
+ check_request(account_request, method='HEAD', path='/sda/0/a')
+ container_request = backend_requests.pop(0)
+ check_request(container_request, method='HEAD', path='/sda/0/a/c')
+ # make sure backend requests included expected container headers
+ container_headers = {}
+ for request in backend_requests:
+ req_headers = request[2]
+ device = req_headers['x-container-device']
+ host = req_headers['x-container-host']
+ container_headers[device] = host
+ expectations = {
+ 'method': 'POST',
+ 'path': '/0/a/c/o',
+ 'headers': {
+ 'X-Container-Partition': '0',
+ 'Connection': 'close',
+ 'User-Agent': 'proxy-server %s' % os.getpid(),
+ 'Host': 'localhost:80',
+ 'Referer': 'POST http://localhost/v1/a/c/o',
+ 'X-Object-Meta-Color': 'Blue',
+ 'X-Backend-Storage-Policy-Index': '1'
+ },
+ }
+ check_request(request, **expectations)
+
+ expected = {}
+ for i, device in enumerate(['sda', 'sdb', 'sdc']):
+ expected[device] = '10.0.0.%d:100%d' % (i, i)
+ self.assertEqual(container_headers, expected)
+
+ # and again with policy override
+ self.app.memcache.store = {}
+ backend_requests = []
+ req = Request.blank('/v1/a/c/o', {}, method='POST',
+ headers={'X-Object-Meta-Color': 'Blue',
+ 'X-Backend-Storage-Policy-Index': 0})
+ with mocked_http_conn(
+ 200, 200, 202, 202, 202,
+ headers=resp_headers, give_connect=capture_requests
+ ) as fake_conn:
+ resp = req.get_response(self.app)
+ self.assertRaises(StopIteration, fake_conn.code_iter.next)
+ self.assertEqual(resp.status_int, 202)
+ self.assertEqual(len(backend_requests), 5)
+ for request in backend_requests[2:]:
+ expectations = {
+ 'method': 'POST',
+ 'path': '/0/a/c/o', # ignore device bit
+ 'headers': {
+ 'X-Object-Meta-Color': 'Blue',
+ 'X-Backend-Storage-Policy-Index': '0',
+ }
+ }
+ check_request(request, **expectations)
+
+ # and this time with post as copy
+ self.app.object_post_as_copy = True
+ self.app.memcache.store = {}
+ backend_requests = []
+ req = Request.blank('/v1/a/c/o', {}, method='POST',
+ headers={'X-Object-Meta-Color': 'Blue',
+ 'X-Backend-Storage-Policy-Index': 0})
+ with mocked_http_conn(
+ 200, 200, 200, 200, 200, 201, 201, 201,
+ headers=resp_headers, give_connect=capture_requests
+ ) as fake_conn:
+ resp = req.get_response(self.app)
+ self.assertRaises(StopIteration, fake_conn.code_iter.next)
+ self.assertEqual(resp.status_int, 202)
+ self.assertEqual(len(backend_requests), 8)
+ policy0 = {'X-Backend-Storage-Policy-Index': '0'}
+ policy1 = {'X-Backend-Storage-Policy-Index': '1'}
+ expected = [
+ # account info
+ {'method': 'HEAD', 'path': '/0/a'},
+ # container info
+ {'method': 'HEAD', 'path': '/0/a/c'},
+ # x-newests
+ {'method': 'GET', 'path': '/0/a/c/o', 'headers': policy1},
+ {'method': 'GET', 'path': '/0/a/c/o', 'headers': policy1},
+ {'method': 'GET', 'path': '/0/a/c/o', 'headers': policy1},
+ # new writes
+ {'method': 'PUT', 'path': '/0/a/c/o', 'headers': policy0},
+ {'method': 'PUT', 'path': '/0/a/c/o', 'headers': policy0},
+ {'method': 'PUT', 'path': '/0/a/c/o', 'headers': policy0},
+ ]
+ for request, expectations in zip(backend_requests, expected):
+ check_request(request, **expectations)
+
def test_POST_as_copy(self):
with save_globals():
def test_status_map(statuses, expected):
@@ -1333,9 +2890,9 @@ class TestObjectController(unittest.TestCase):
test_status_map((200, 200, 204, 204, 204), 204)
test_status_map((200, 200, 204, 204, 500), 204)
test_status_map((200, 200, 204, 404, 404), 404)
- test_status_map((200, 200, 204, 500, 404), 503)
+ test_status_map((200, 204, 500, 500, 404), 503)
test_status_map((200, 200, 404, 404, 404), 404)
- test_status_map((200, 200, 404, 404, 500), 404)
+ test_status_map((200, 200, 400, 400, 400), 400)
def test_HEAD(self):
with save_globals():
@@ -1388,6 +2945,8 @@ class TestObjectController(unittest.TestCase):
None, None), None)
test_status_map((200, 200, 200, 200, 200), 200, ('0', '0', None,
None, '1'), '1')
+ test_status_map((200, 200, 404, 404, 200), 200, ('0', '0', None,
+ None, '1'), '1')
def test_GET_newest(self):
with save_globals():
@@ -1443,10 +3002,10 @@ class TestObjectController(unittest.TestCase):
def test_POST_meta_val_len(self):
with save_globals():
- limit = MAX_META_VALUE_LENGTH
+ limit = constraints.MAX_META_VALUE_LENGTH
self.app.object_post_as_copy = False
- proxy_server.ObjectController(self.app, 'account',
- 'container', 'object')
+ ReplicatedObjectController(
+ self.app, 'account', 'container', 'object')
set_http_connect(200, 200, 202, 202, 202)
# acct cont obj obj obj
req = Request.blank('/v1/a/c/o', {'REQUEST_METHOD': 'POST'},
@@ -1466,7 +3025,7 @@ class TestObjectController(unittest.TestCase):
def test_POST_as_copy_meta_val_len(self):
with save_globals():
- limit = MAX_META_VALUE_LENGTH
+ limit = constraints.MAX_META_VALUE_LENGTH
set_http_connect(200, 200, 200, 200, 200, 202, 202, 202)
# acct cont objc objc objc obj obj obj
req = Request.blank('/v1/a/c/o', {'REQUEST_METHOD': 'POST'},
@@ -1486,7 +3045,7 @@ class TestObjectController(unittest.TestCase):
def test_POST_meta_key_len(self):
with save_globals():
- limit = MAX_META_NAME_LENGTH
+ limit = constraints.MAX_META_NAME_LENGTH
self.app.object_post_as_copy = False
set_http_connect(200, 200, 202, 202, 202)
# acct cont obj obj obj
@@ -1508,7 +3067,7 @@ class TestObjectController(unittest.TestCase):
def test_POST_as_copy_meta_key_len(self):
with save_globals():
- limit = MAX_META_NAME_LENGTH
+ limit = constraints.MAX_META_NAME_LENGTH
set_http_connect(200, 200, 200, 200, 200, 202, 202, 202)
# acct cont objc objc objc obj obj obj
req = Request.blank(
@@ -1529,7 +3088,7 @@ class TestObjectController(unittest.TestCase):
def test_POST_meta_count(self):
with save_globals():
- limit = MAX_META_COUNT
+ limit = constraints.MAX_META_COUNT
headers = dict(
(('X-Object-Meta-' + str(i), 'a') for i in xrange(limit + 1)))
headers.update({'Content-Type': 'foo/bar'})
@@ -1542,7 +3101,7 @@ class TestObjectController(unittest.TestCase):
def test_POST_meta_size(self):
with save_globals():
- limit = MAX_META_OVERALL_SIZE
+ limit = constraints.MAX_META_OVERALL_SIZE
count = limit / 256 # enough to cause the limit to be reached
headers = dict(
(('X-Object-Meta-' + str(i), 'a' * 256)
@@ -1601,15 +3160,16 @@ class TestObjectController(unittest.TestCase):
def test_client_timeout(self):
with save_globals():
self.app.account_ring.get_nodes('account')
- for dev in self.app.account_ring.devs.values():
+ for dev in self.app.account_ring.devs:
dev['ip'] = '127.0.0.1'
dev['port'] = 1
self.app.container_ring.get_nodes('account')
- for dev in self.app.container_ring.devs.values():
+ for dev in self.app.container_ring.devs:
dev['ip'] = '127.0.0.1'
dev['port'] = 1
- self.app.object_ring.get_nodes('account')
- for dev in self.app.object_ring.devs.values():
+ object_ring = self.app.get_object_ring(None)
+ object_ring.get_nodes('account')
+ for dev in object_ring.devs:
dev['ip'] = '127.0.0.1'
dev['port'] = 1
@@ -1635,7 +3195,7 @@ class TestObjectController(unittest.TestCase):
# acct cont obj obj obj
resp = req.get_response(self.app)
self.assertEquals(resp.status_int, 201)
- self.app.client_timeout = 0.1
+ self.app.client_timeout = 0.05
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT',
'wsgi.input': SlowBody()},
@@ -1650,15 +3210,16 @@ class TestObjectController(unittest.TestCase):
def test_client_disconnect(self):
with save_globals():
self.app.account_ring.get_nodes('account')
- for dev in self.app.account_ring.devs.values():
+ for dev in self.app.account_ring.devs:
dev['ip'] = '127.0.0.1'
dev['port'] = 1
self.app.container_ring.get_nodes('account')
- for dev in self.app.container_ring.devs.values():
+ for dev in self.app.container_ring.devs:
dev['ip'] = '127.0.0.1'
dev['port'] = 1
- self.app.object_ring.get_nodes('account')
- for dev in self.app.object_ring.devs.values():
+ object_ring = self.app.get_object_ring(None)
+ object_ring.get_nodes('account')
+ for dev in object_ring.devs:
dev['ip'] = '127.0.0.1'
dev['port'] = 1
@@ -1684,15 +3245,16 @@ class TestObjectController(unittest.TestCase):
def test_node_read_timeout(self):
with save_globals():
self.app.account_ring.get_nodes('account')
- for dev in self.app.account_ring.devs.values():
+ for dev in self.app.account_ring.devs:
dev['ip'] = '127.0.0.1'
dev['port'] = 1
self.app.container_ring.get_nodes('account')
- for dev in self.app.container_ring.devs.values():
+ for dev in self.app.container_ring.devs:
dev['ip'] = '127.0.0.1'
dev['port'] = 1
- self.app.object_ring.get_nodes('account')
- for dev in self.app.object_ring.devs.values():
+ object_ring = self.app.get_object_ring(None)
+ object_ring.get_nodes('account')
+ for dev in object_ring.devs:
dev['ip'] = '127.0.0.1'
dev['port'] = 1
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'GET'})
@@ -1718,6 +3280,19 @@ class TestObjectController(unittest.TestCase):
def test_node_read_timeout_retry(self):
with save_globals():
+ self.app.account_ring.get_nodes('account')
+ for dev in self.app.account_ring.devs:
+ dev['ip'] = '127.0.0.1'
+ dev['port'] = 1
+ self.app.container_ring.get_nodes('account')
+ for dev in self.app.container_ring.devs:
+ dev['ip'] = '127.0.0.1'
+ dev['port'] = 1
+ object_ring = self.app.get_object_ring(None)
+ object_ring.get_nodes('account')
+ for dev in object_ring.devs:
+ dev['ip'] = '127.0.0.1'
+ dev['port'] = 1
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'GET'})
self.app.update_request(req)
@@ -1775,15 +3350,16 @@ class TestObjectController(unittest.TestCase):
def test_node_write_timeout(self):
with save_globals():
self.app.account_ring.get_nodes('account')
- for dev in self.app.account_ring.devs.values():
+ for dev in self.app.account_ring.devs:
dev['ip'] = '127.0.0.1'
dev['port'] = 1
self.app.container_ring.get_nodes('account')
- for dev in self.app.container_ring.devs.values():
+ for dev in self.app.container_ring.devs:
dev['ip'] = '127.0.0.1'
dev['port'] = 1
- self.app.object_ring.get_nodes('account')
- for dev in self.app.object_ring.devs.values():
+ object_ring = self.app.get_object_ring(None)
+ object_ring.get_nodes('account')
+ for dev in object_ring.devs:
dev['ip'] = '127.0.0.1'
dev['port'] = 1
req = Request.blank('/v1/a/c/o',
@@ -1806,87 +3382,149 @@ class TestObjectController(unittest.TestCase):
resp = req.get_response(self.app)
self.assertEquals(resp.status_int, 503)
+ def test_node_request_setting(self):
+ baseapp = proxy_server.Application({'request_node_count': '3'},
+ FakeMemcache(),
+ container_ring=FakeRing(),
+ account_ring=FakeRing())
+ self.assertEquals(baseapp.request_node_count(3), 3)
+
def test_iter_nodes(self):
with save_globals():
try:
- self.app.object_ring.max_more_nodes = 2
- partition, nodes = self.app.object_ring.get_nodes('account',
- 'container',
- 'object')
+ object_ring = self.app.get_object_ring(None)
+ object_ring.max_more_nodes = 2
+ partition, nodes = object_ring.get_nodes('account',
+ 'container',
+ 'object')
collected_nodes = []
- for node in self.app.iter_nodes(self.app.object_ring,
+ for node in self.app.iter_nodes(object_ring,
partition):
collected_nodes.append(node)
self.assertEquals(len(collected_nodes), 5)
- self.app.object_ring.max_more_nodes = 20
+ object_ring.max_more_nodes = 20
self.app.request_node_count = lambda r: 20
- partition, nodes = self.app.object_ring.get_nodes('account',
- 'container',
- 'object')
+ partition, nodes = object_ring.get_nodes('account',
+ 'container',
+ 'object')
collected_nodes = []
- for node in self.app.iter_nodes(self.app.object_ring,
+ for node in self.app.iter_nodes(object_ring,
partition):
collected_nodes.append(node)
self.assertEquals(len(collected_nodes), 9)
+ # zero error-limited primary nodes -> no handoff warnings
self.app.log_handoffs = True
self.app.logger = FakeLogger()
- self.app.object_ring.max_more_nodes = 2
- partition, nodes = self.app.object_ring.get_nodes('account',
- 'container',
- 'object')
+ self.app.request_node_count = lambda r: 7
+ object_ring.max_more_nodes = 20
+ partition, nodes = object_ring.get_nodes('account',
+ 'container',
+ 'object')
collected_nodes = []
- for node in self.app.iter_nodes(self.app.object_ring,
- partition):
+ for node in self.app.iter_nodes(object_ring, partition):
collected_nodes.append(node)
- self.assertEquals(len(collected_nodes), 5)
- self.assertEquals(
- self.app.logger.log_dict['warning'],
- [(('Handoff requested (1)',), {}),
- (('Handoff requested (2)',), {})])
+ self.assertEquals(len(collected_nodes), 7)
+ self.assertEquals(self.app.logger.log_dict['warning'], [])
+ self.assertEquals(self.app.logger.get_increments(), [])
- self.app.log_handoffs = False
+ # one error-limited primary node -> one handoff warning
+ self.app.log_handoffs = True
self.app.logger = FakeLogger()
- self.app.object_ring.max_more_nodes = 2
- partition, nodes = self.app.object_ring.get_nodes('account',
- 'container',
- 'object')
+ self.app.request_node_count = lambda r: 7
+ self.app._error_limiting = {} # clear out errors
+ set_node_errors(self.app, object_ring._devs[0], 999,
+ last_error=(2 ** 63 - 1))
+
collected_nodes = []
- for node in self.app.iter_nodes(self.app.object_ring,
- partition):
+ for node in self.app.iter_nodes(object_ring, partition):
collected_nodes.append(node)
- self.assertEquals(len(collected_nodes), 5)
- self.assertEquals(self.app.logger.log_dict['warning'], [])
+ self.assertEquals(len(collected_nodes), 7)
+ self.assertEquals(self.app.logger.log_dict['warning'], [
+ (('Handoff requested (5)',), {})])
+ self.assertEquals(self.app.logger.get_increments(),
+ ['handoff_count'])
+
+ # two error-limited primary nodes -> two handoff warnings
+ self.app.log_handoffs = True
+ self.app.logger = FakeLogger()
+ self.app.request_node_count = lambda r: 7
+ self.app._error_limiting = {} # clear out errors
+ for i in range(2):
+ set_node_errors(self.app, object_ring._devs[i], 999,
+ last_error=(2 ** 63 - 1))
+
+ collected_nodes = []
+ for node in self.app.iter_nodes(object_ring, partition):
+ collected_nodes.append(node)
+ self.assertEquals(len(collected_nodes), 7)
+ self.assertEquals(self.app.logger.log_dict['warning'], [
+ (('Handoff requested (5)',), {}),
+ (('Handoff requested (6)',), {})])
+ self.assertEquals(self.app.logger.get_increments(),
+ ['handoff_count',
+ 'handoff_count'])
+
+ # all error-limited primary nodes -> four handoff warnings,
+ # plus a handoff-all metric
+ self.app.log_handoffs = True
+ self.app.logger = FakeLogger()
+ self.app.request_node_count = lambda r: 10
+ object_ring.set_replicas(4) # otherwise we run out of handoffs
+ self.app._error_limiting = {} # clear out errors
+ for i in range(4):
+ set_node_errors(self.app, object_ring._devs[i], 999,
+ last_error=(2 ** 63 - 1))
+
+ collected_nodes = []
+ for node in self.app.iter_nodes(object_ring, partition):
+ collected_nodes.append(node)
+ self.assertEquals(len(collected_nodes), 10)
+ self.assertEquals(self.app.logger.log_dict['warning'], [
+ (('Handoff requested (7)',), {}),
+ (('Handoff requested (8)',), {}),
+ (('Handoff requested (9)',), {}),
+ (('Handoff requested (10)',), {})])
+ self.assertEquals(self.app.logger.get_increments(),
+ ['handoff_count',
+ 'handoff_count',
+ 'handoff_count',
+ 'handoff_count',
+ 'handoff_all_count'])
+
finally:
- self.app.object_ring.max_more_nodes = 0
+ object_ring.max_more_nodes = 0
def test_iter_nodes_calls_sort_nodes(self):
with mock.patch.object(self.app, 'sort_nodes') as sort_nodes:
- for node in self.app.iter_nodes(self.app.object_ring, 0):
+ object_ring = self.app.get_object_ring(None)
+ for node in self.app.iter_nodes(object_ring, 0):
pass
sort_nodes.assert_called_once_with(
- self.app.object_ring.get_part_nodes(0))
+ object_ring.get_part_nodes(0))
def test_iter_nodes_skips_error_limited(self):
with mock.patch.object(self.app, 'sort_nodes', lambda n: n):
- first_nodes = list(self.app.iter_nodes(self.app.object_ring, 0))
- second_nodes = list(self.app.iter_nodes(self.app.object_ring, 0))
+ object_ring = self.app.get_object_ring(None)
+ first_nodes = list(self.app.iter_nodes(object_ring, 0))
+ second_nodes = list(self.app.iter_nodes(object_ring, 0))
self.assertTrue(first_nodes[0] in second_nodes)
self.app.error_limit(first_nodes[0], 'test')
- second_nodes = list(self.app.iter_nodes(self.app.object_ring, 0))
+ second_nodes = list(self.app.iter_nodes(object_ring, 0))
self.assertTrue(first_nodes[0] not in second_nodes)
def test_iter_nodes_gives_extra_if_error_limited_inline(self):
+ object_ring = self.app.get_object_ring(None)
with nested(
mock.patch.object(self.app, 'sort_nodes', lambda n: n),
mock.patch.object(self.app, 'request_node_count',
lambda r: 6),
- mock.patch.object(self.app.object_ring, 'max_more_nodes', 99)):
- first_nodes = list(self.app.iter_nodes(self.app.object_ring, 0))
+ mock.patch.object(object_ring, 'max_more_nodes', 99)):
+ first_nodes = list(self.app.iter_nodes(object_ring, 0))
second_nodes = []
- for node in self.app.iter_nodes(self.app.object_ring, 0):
+ for node in self.app.iter_nodes(object_ring, 0):
if not second_nodes:
self.app.error_limit(node, 'test')
second_nodes.append(node)
@@ -1894,12 +3532,14 @@ class TestObjectController(unittest.TestCase):
self.assertEquals(len(second_nodes), 7)
def test_iter_nodes_with_custom_node_iter(self):
- node_list = [dict(id=n) for n in xrange(10)]
+ object_ring = self.app.get_object_ring(None)
+ node_list = [dict(id=n, ip='1.2.3.4', port=n, device='D')
+ for n in xrange(10)]
with nested(
mock.patch.object(self.app, 'sort_nodes', lambda n: n),
mock.patch.object(self.app, 'request_node_count',
lambda r: 3)):
- got_nodes = list(self.app.iter_nodes(self.app.object_ring, 0,
+ got_nodes = list(self.app.iter_nodes(object_ring, 0,
node_iter=iter(node_list)))
self.assertEqual(node_list[:3], got_nodes)
@@ -1907,13 +3547,13 @@ class TestObjectController(unittest.TestCase):
mock.patch.object(self.app, 'sort_nodes', lambda n: n),
mock.patch.object(self.app, 'request_node_count',
lambda r: 1000000)):
- got_nodes = list(self.app.iter_nodes(self.app.object_ring, 0,
+ got_nodes = list(self.app.iter_nodes(object_ring, 0,
node_iter=iter(node_list)))
self.assertEqual(node_list, got_nodes)
def test_best_response_sets_headers(self):
- controller = proxy_server.ObjectController(self.app, 'account',
- 'container', 'object')
+ controller = ReplicatedObjectController(
+ self.app, 'account', 'container', 'object')
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'GET'})
resp = controller.best_response(req, [200] * 3, ['OK'] * 3, [''] * 3,
'Object', headers=[{'X-Test': '1'},
@@ -1922,8 +3562,8 @@ class TestObjectController(unittest.TestCase):
self.assertEquals(resp.headers['X-Test'], '1')
def test_best_response_sets_etag(self):
- controller = proxy_server.ObjectController(self.app, 'account',
- 'container', 'object')
+ controller = ReplicatedObjectController(
+ self.app, 'account', 'container', 'object')
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'GET'})
resp = controller.best_response(req, [200] * 3, ['OK'] * 3, [''] * 3,
'Object')
@@ -1956,8 +3596,8 @@ class TestObjectController(unittest.TestCase):
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'HEAD'})
self.app.update_request(req)
- controller = proxy_server.ObjectController(self.app, 'account',
- 'container', 'object')
+ controller = ReplicatedObjectController(
+ self.app, 'account', 'container', 'object')
set_http_connect(200, 200, 200)
resp = controller.HEAD(req)
self.assertEquals(resp.status_int, 200)
@@ -1969,21 +3609,26 @@ class TestObjectController(unittest.TestCase):
def test_error_limiting(self):
with save_globals():
- controller = proxy_server.ObjectController(self.app, 'account',
- 'container', 'object')
+ controller = ReplicatedObjectController(
+ self.app, 'account', 'container', 'object')
controller.app.sort_nodes = lambda l: l
+ object_ring = controller.app.get_object_ring(None)
self.assert_status_map(controller.HEAD, (200, 200, 503, 200, 200),
200)
- self.assertEquals(controller.app.object_ring.devs[0]['errors'], 2)
- self.assert_('last_error' in controller.app.object_ring.devs[0])
+ self.assertEquals(
+ node_error_count(controller.app, object_ring.devs[0]), 2)
+ self.assert_(node_last_error(controller.app, object_ring.devs[0])
+ is not None)
for _junk in xrange(self.app.error_suppression_limit):
self.assert_status_map(controller.HEAD, (200, 200, 503, 503,
503), 503)
- self.assertEquals(controller.app.object_ring.devs[0]['errors'],
- self.app.error_suppression_limit + 1)
+ self.assertEquals(
+ node_error_count(controller.app, object_ring.devs[0]),
+ self.app.error_suppression_limit + 1)
self.assert_status_map(controller.HEAD, (200, 200, 200, 200, 200),
503)
- self.assert_('last_error' in controller.app.object_ring.devs[0])
+ self.assert_(node_last_error(controller.app, object_ring.devs[0])
+ is not None)
self.assert_status_map(controller.PUT, (200, 200, 200, 201, 201,
201), 503)
self.assert_status_map(controller.POST,
@@ -1999,17 +3644,78 @@ class TestObjectController(unittest.TestCase):
(200, 200, 200, 204, 204, 204), 503,
raise_exc=True)
+ def test_error_limiting_survives_ring_reload(self):
+ with save_globals():
+ controller = ReplicatedObjectController(
+ self.app, 'account', 'container', 'object')
+ controller.app.sort_nodes = lambda l: l
+ object_ring = controller.app.get_object_ring(None)
+ self.assert_status_map(controller.HEAD, (200, 200, 503, 200, 200),
+ 200)
+ self.assertEquals(
+ node_error_count(controller.app, object_ring.devs[0]), 2)
+ self.assert_(node_last_error(controller.app, object_ring.devs[0])
+ is not None)
+ for _junk in xrange(self.app.error_suppression_limit):
+ self.assert_status_map(controller.HEAD, (200, 200, 503, 503,
+ 503), 503)
+ self.assertEquals(
+ node_error_count(controller.app, object_ring.devs[0]),
+ self.app.error_suppression_limit + 1)
+
+ # wipe out any state in the ring
+ for policy in POLICIES:
+ policy.object_ring = FakeRing(base_port=3000)
+
+ # and we still get an error, which proves that the
+ # error-limiting info survived a ring reload
+ self.assert_status_map(controller.HEAD, (200, 200, 200, 200, 200),
+ 503)
+
+ def test_PUT_error_limiting(self):
+ with save_globals():
+ controller = ReplicatedObjectController(
+ self.app, 'account', 'container', 'object')
+ controller.app.sort_nodes = lambda l: l
+ object_ring = controller.app.get_object_ring(None)
+ # acc con obj obj obj
+ self.assert_status_map(controller.PUT, (200, 200, 503, 200, 200),
+ 200)
+
+ # 2, not 1, because assert_status_map() calls the method twice
+ odevs = object_ring.devs
+ self.assertEquals(node_error_count(controller.app, odevs[0]), 2)
+ self.assertEquals(node_error_count(controller.app, odevs[1]), 0)
+ self.assertEquals(node_error_count(controller.app, odevs[2]), 0)
+ self.assert_(node_last_error(controller.app, odevs[0]) is not None)
+ self.assert_(node_last_error(controller.app, odevs[1]) is None)
+ self.assert_(node_last_error(controller.app, odevs[2]) is None)
+
+ def test_PUT_error_limiting_last_node(self):
+ with save_globals():
+ controller = ReplicatedObjectController(
+ self.app, 'account', 'container', 'object')
+ controller.app.sort_nodes = lambda l: l
+ object_ring = controller.app.get_object_ring(None)
+ # acc con obj obj obj
+ self.assert_status_map(controller.PUT, (200, 200, 200, 200, 503),
+ 200)
+
+ # 2, not 1, because assert_status_map() calls the method twice
+ odevs = object_ring.devs
+ self.assertEquals(node_error_count(controller.app, odevs[0]), 0)
+ self.assertEquals(node_error_count(controller.app, odevs[1]), 0)
+ self.assertEquals(node_error_count(controller.app, odevs[2]), 2)
+ self.assert_(node_last_error(controller.app, odevs[0]) is None)
+ self.assert_(node_last_error(controller.app, odevs[1]) is None)
+ self.assert_(node_last_error(controller.app, odevs[2]) is not None)
+
def test_acc_or_con_missing_returns_404(self):
with save_globals():
self.app.memcache = FakeMemcacheReturnsNone()
- for dev in self.app.account_ring.devs.values():
- del dev['errors']
- del dev['last_error']
- for dev in self.app.container_ring.devs.values():
- del dev['errors']
- del dev['last_error']
- controller = proxy_server.ObjectController(self.app, 'account',
- 'container', 'object')
+ self.app._error_limiting = {}
+ controller = ReplicatedObjectController(
+ self.app, 'account', 'container', 'object')
set_http_connect(200, 200, 200, 200, 200, 200)
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'DELETE'})
@@ -2073,9 +3779,10 @@ class TestObjectController(unittest.TestCase):
resp = getattr(controller, 'DELETE')(req)
self.assertEquals(resp.status_int, 404)
- for dev in self.app.account_ring.devs.values():
- dev['errors'] = self.app.error_suppression_limit + 1
- dev['last_error'] = time.time()
+ for dev in self.app.account_ring.devs:
+ set_node_errors(
+ self.app, dev, self.app.error_suppression_limit + 1,
+ time.time())
set_http_connect(200)
# acct [isn't actually called since everything
# is error limited]
@@ -2085,11 +3792,12 @@ class TestObjectController(unittest.TestCase):
resp = getattr(controller, 'DELETE')(req)
self.assertEquals(resp.status_int, 404)
- for dev in self.app.account_ring.devs.values():
- dev['errors'] = 0
- for dev in self.app.container_ring.devs.values():
- dev['errors'] = self.app.error_suppression_limit + 1
- dev['last_error'] = time.time()
+ for dev in self.app.account_ring.devs:
+ set_node_errors(self.app, dev, 0, last_error=None)
+ for dev in self.app.container_ring.devs:
+ set_node_errors(self.app, dev,
+ self.app.error_suppression_limit + 1,
+ time.time())
set_http_connect(200, 200)
# acct cont [isn't actually called since
# everything is error limited]
@@ -2103,8 +3811,8 @@ class TestObjectController(unittest.TestCase):
with save_globals():
self.app.object_post_as_copy = False
self.app.memcache = FakeMemcacheReturnsNone()
- controller = proxy_server.ObjectController(self.app, 'account',
- 'container', 'object')
+ controller = ReplicatedObjectController(
+ self.app, 'account', 'container', 'object')
set_http_connect(200, 404, 404, 404, 200, 200, 200)
req = Request.blank('/v1/a/c/o',
@@ -2124,8 +3832,8 @@ class TestObjectController(unittest.TestCase):
def test_PUT_POST_as_copy_requires_container_exist(self):
with save_globals():
self.app.memcache = FakeMemcacheReturnsNone()
- controller = proxy_server.ObjectController(self.app, 'account',
- 'container', 'object')
+ controller = ReplicatedObjectController(
+ self.app, 'account', 'container', 'object')
set_http_connect(200, 404, 404, 404, 200, 200, 200)
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'})
self.app.update_request(req)
@@ -2142,8 +3850,8 @@ class TestObjectController(unittest.TestCase):
def test_bad_metadata(self):
with save_globals():
- controller = proxy_server.ObjectController(self.app, 'account',
- 'container', 'object')
+ controller = ReplicatedObjectController(
+ self.app, 'account', 'container', 'object')
set_http_connect(200, 200, 201, 201, 201)
# acct cont obj obj obj
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
@@ -2153,18 +3861,21 @@ class TestObjectController(unittest.TestCase):
self.assertEquals(resp.status_int, 201)
set_http_connect(201, 201, 201)
- req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
- headers={'Content-Length': '0',
- 'X-Object-Meta-' + ('a' *
- MAX_META_NAME_LENGTH): 'v'})
+ req = Request.blank(
+ '/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
+ headers={'Content-Length': '0',
+ 'X-Object-Meta-' + (
+ 'a' * constraints.MAX_META_NAME_LENGTH): 'v'})
self.app.update_request(req)
resp = controller.PUT(req)
self.assertEquals(resp.status_int, 201)
set_http_connect(201, 201, 201)
- req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
- headers={'Content-Length': '0',
- 'X-Object-Meta-' + ('a' *
- (MAX_META_NAME_LENGTH + 1)): 'v'})
+ req = Request.blank(
+ '/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
+ headers={
+ 'Content-Length': '0',
+ 'X-Object-Meta-' + (
+ 'a' * (constraints.MAX_META_NAME_LENGTH + 1)): 'v'})
self.app.update_request(req)
resp = controller.PUT(req)
self.assertEquals(resp.status_int, 400)
@@ -2173,22 +3884,23 @@ class TestObjectController(unittest.TestCase):
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Object-Meta-Too-Long': 'a' *
- MAX_META_VALUE_LENGTH})
+ constraints.MAX_META_VALUE_LENGTH})
self.app.update_request(req)
resp = controller.PUT(req)
self.assertEquals(resp.status_int, 201)
set_http_connect(201, 201, 201)
- req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
- headers={'Content-Length': '0',
- 'X-Object-Meta-Too-Long': 'a' *
- (MAX_META_VALUE_LENGTH + 1)})
+ req = Request.blank(
+ '/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
+ headers={'Content-Length': '0',
+ 'X-Object-Meta-Too-Long': 'a' *
+ (constraints.MAX_META_VALUE_LENGTH + 1)})
self.app.update_request(req)
resp = controller.PUT(req)
self.assertEquals(resp.status_int, 400)
set_http_connect(201, 201, 201)
headers = {'Content-Length': '0'}
- for x in xrange(MAX_META_COUNT):
+ for x in xrange(constraints.MAX_META_COUNT):
headers['X-Object-Meta-%d' % x] = 'v'
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers=headers)
@@ -2197,7 +3909,7 @@ class TestObjectController(unittest.TestCase):
self.assertEquals(resp.status_int, 201)
set_http_connect(201, 201, 201)
headers = {'Content-Length': '0'}
- for x in xrange(MAX_META_COUNT + 1):
+ for x in xrange(constraints.MAX_META_COUNT + 1):
headers['X-Object-Meta-%d' % x] = 'v'
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers=headers)
@@ -2207,17 +3919,17 @@ class TestObjectController(unittest.TestCase):
set_http_connect(201, 201, 201)
headers = {'Content-Length': '0'}
- header_value = 'a' * MAX_META_VALUE_LENGTH
+ header_value = 'a' * constraints.MAX_META_VALUE_LENGTH
size = 0
x = 0
- while size < MAX_META_OVERALL_SIZE - 4 - \
- MAX_META_VALUE_LENGTH:
- size += 4 + MAX_META_VALUE_LENGTH
+ while size < constraints.MAX_META_OVERALL_SIZE - 4 - \
+ constraints.MAX_META_VALUE_LENGTH:
+ size += 4 + constraints.MAX_META_VALUE_LENGTH
headers['X-Object-Meta-%04d' % x] = header_value
x += 1
- if MAX_META_OVERALL_SIZE - size > 1:
+ if constraints.MAX_META_OVERALL_SIZE - size > 1:
headers['X-Object-Meta-a'] = \
- 'a' * (MAX_META_OVERALL_SIZE - size - 1)
+ 'a' * (constraints.MAX_META_OVERALL_SIZE - size - 1)
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers=headers)
self.app.update_request(req)
@@ -2225,7 +3937,7 @@ class TestObjectController(unittest.TestCase):
self.assertEquals(resp.status_int, 201)
set_http_connect(201, 201, 201)
headers['X-Object-Meta-a'] = \
- 'a' * (MAX_META_OVERALL_SIZE - size)
+ 'a' * (constraints.MAX_META_OVERALL_SIZE - size)
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers=headers)
self.app.update_request(req)
@@ -2235,8 +3947,8 @@ class TestObjectController(unittest.TestCase):
@contextmanager
def controller_context(self, req, *args, **kwargs):
_v, account, container, obj = utils.split_path(req.path, 4, 4, True)
- controller = proxy_server.ObjectController(self.app, account,
- container, obj)
+ controller = ReplicatedObjectController(
+ self.app, account, container, obj)
self.app.update_request(req)
self.app.memcache.store = {}
with save_globals():
@@ -2263,6 +3975,19 @@ class TestObjectController(unittest.TestCase):
self.assertEquals(resp.status_int, 201)
self.assertEquals(resp.headers['x-copied-from'], 'c/o')
+ def test_basic_put_with_x_copy_from_account(self):
+ req = Request.blank('/v1/a1/c1/o', environ={'REQUEST_METHOD': 'PUT'},
+ headers={'Content-Length': '0',
+ 'X-Copy-From': 'c/o',
+ 'X-Copy-From-Account': 'a'})
+ status_list = (200, 200, 200, 200, 200, 200, 200, 201, 201, 201)
+ # acct cont acc1 con1 objc objc objc obj obj obj
+ with self.controller_context(req, *status_list) as controller:
+ resp = controller.PUT(req)
+ self.assertEquals(resp.status_int, 201)
+ self.assertEquals(resp.headers['x-copied-from'], 'c/o')
+ self.assertEquals(resp.headers['x-copied-from-account'], 'a')
+
def test_basic_put_with_x_copy_from_across_container(self):
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
@@ -2274,6 +3999,19 @@ class TestObjectController(unittest.TestCase):
self.assertEquals(resp.status_int, 201)
self.assertEquals(resp.headers['x-copied-from'], 'c2/o')
+ def test_basic_put_with_x_copy_from_across_container_and_account(self):
+ req = Request.blank('/v1/a1/c1/o', environ={'REQUEST_METHOD': 'PUT'},
+ headers={'Content-Length': '0',
+ 'X-Copy-From': 'c2/o',
+ 'X-Copy-From-Account': 'a'})
+ status_list = (200, 200, 200, 200, 200, 200, 200, 201, 201, 201)
+ # acct cont acc1 con1 objc objc objc obj obj obj
+ with self.controller_context(req, *status_list) as controller:
+ resp = controller.PUT(req)
+ self.assertEquals(resp.status_int, 201)
+ self.assertEquals(resp.headers['x-copied-from'], 'c2/o')
+ self.assertEquals(resp.headers['x-copied-from-account'], 'a')
+
def test_copy_non_zero_content_length(self):
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '5',
@@ -2284,6 +4022,17 @@ class TestObjectController(unittest.TestCase):
resp = controller.PUT(req)
self.assertEquals(resp.status_int, 400)
+ def test_copy_non_zero_content_length_with_account(self):
+ req = Request.blank('/v1/a1/c1/o', environ={'REQUEST_METHOD': 'PUT'},
+ headers={'Content-Length': '5',
+ 'X-Copy-From': 'c/o',
+ 'X-Copy-From-Account': 'a'})
+ status_list = (200, 200)
+ # acct cont
+ with self.controller_context(req, *status_list) as controller:
+ resp = controller.PUT(req)
+ self.assertEquals(resp.status_int, 400)
+
def test_copy_with_slashes_in_x_copy_from(self):
# extra source path parsing
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
@@ -2296,6 +4045,20 @@ class TestObjectController(unittest.TestCase):
self.assertEquals(resp.status_int, 201)
self.assertEquals(resp.headers['x-copied-from'], 'c/o/o2')
+ def test_copy_with_slashes_in_x_copy_from_and_account(self):
+ # extra source path parsing
+ req = Request.blank('/v1/a1/c1/o', environ={'REQUEST_METHOD': 'PUT'},
+ headers={'Content-Length': '0',
+ 'X-Copy-From': 'c/o/o2',
+ 'X-Copy-From-Account': 'a'})
+ status_list = (200, 200, 200, 200, 200, 200, 200, 201, 201, 201)
+ # acct cont acc1 con1 objc objc objc obj obj obj
+ with self.controller_context(req, *status_list) as controller:
+ resp = controller.PUT(req)
+ self.assertEquals(resp.status_int, 201)
+ self.assertEquals(resp.headers['x-copied-from'], 'c/o/o2')
+ self.assertEquals(resp.headers['x-copied-from-account'], 'a')
+
def test_copy_with_spaces_in_x_copy_from(self):
# space in soure path
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
@@ -2308,6 +4071,20 @@ class TestObjectController(unittest.TestCase):
self.assertEquals(resp.status_int, 201)
self.assertEquals(resp.headers['x-copied-from'], 'c/o%20o2')
+ def test_copy_with_spaces_in_x_copy_from_and_account(self):
+ # space in soure path
+ req = Request.blank('/v1/a1/c1/o', environ={'REQUEST_METHOD': 'PUT'},
+ headers={'Content-Length': '0',
+ 'X-Copy-From': 'c/o%20o2',
+ 'X-Copy-From-Account': 'a'})
+ status_list = (200, 200, 200, 200, 200, 200, 200, 201, 201, 201)
+ # acct cont acc1 con1 objc objc objc obj obj obj
+ with self.controller_context(req, *status_list) as controller:
+ resp = controller.PUT(req)
+ self.assertEquals(resp.status_int, 201)
+ self.assertEquals(resp.headers['x-copied-from'], 'c/o%20o2')
+ self.assertEquals(resp.headers['x-copied-from-account'], 'a')
+
def test_copy_with_leading_slash_in_x_copy_from(self):
# repeat tests with leading /
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
@@ -2320,6 +4097,20 @@ class TestObjectController(unittest.TestCase):
self.assertEquals(resp.status_int, 201)
self.assertEquals(resp.headers['x-copied-from'], 'c/o')
+ def test_copy_with_leading_slash_in_x_copy_from_and_account(self):
+ # repeat tests with leading /
+ req = Request.blank('/v1/a1/c1/o', environ={'REQUEST_METHOD': 'PUT'},
+ headers={'Content-Length': '0',
+ 'X-Copy-From': '/c/o',
+ 'X-Copy-From-Account': 'a'})
+ status_list = (200, 200, 200, 200, 200, 200, 200, 201, 201, 201)
+ # acct cont acc1 con1 objc objc objc obj obj obj
+ with self.controller_context(req, *status_list) as controller:
+ resp = controller.PUT(req)
+ self.assertEquals(resp.status_int, 201)
+ self.assertEquals(resp.headers['x-copied-from'], 'c/o')
+ self.assertEquals(resp.headers['x-copied-from-account'], 'a')
+
def test_copy_with_leading_slash_and_slashes_in_x_copy_from(self):
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
@@ -2331,6 +4122,19 @@ class TestObjectController(unittest.TestCase):
self.assertEquals(resp.status_int, 201)
self.assertEquals(resp.headers['x-copied-from'], 'c/o/o2')
+ def test_copy_with_leading_slash_and_slashes_in_x_copy_from_acct(self):
+ req = Request.blank('/v1/a1/c1/o', environ={'REQUEST_METHOD': 'PUT'},
+ headers={'Content-Length': '0',
+ 'X-Copy-From': '/c/o/o2',
+ 'X-Copy-From-Account': 'a'})
+ status_list = (200, 200, 200, 200, 200, 200, 200, 201, 201, 201)
+ # acct cont acc1 con1 objc objc objc obj obj obj
+ with self.controller_context(req, *status_list) as controller:
+ resp = controller.PUT(req)
+ self.assertEquals(resp.status_int, 201)
+ self.assertEquals(resp.headers['x-copied-from'], 'c/o/o2')
+ self.assertEquals(resp.headers['x-copied-from-account'], 'a')
+
def test_copy_with_no_object_in_x_copy_from(self):
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
@@ -2346,6 +4150,22 @@ class TestObjectController(unittest.TestCase):
raise self.fail('Invalid X-Copy-From did not raise '
'client error')
+ def test_copy_with_no_object_in_x_copy_from_and_account(self):
+ req = Request.blank('/v1/a1/c1/o', environ={'REQUEST_METHOD': 'PUT'},
+ headers={'Content-Length': '0',
+ 'X-Copy-From': '/c',
+ 'X-Copy-From-Account': 'a'})
+ status_list = (200, 200)
+ # acct cont
+ with self.controller_context(req, *status_list) as controller:
+ try:
+ controller.PUT(req)
+ except HTTPException as resp:
+ self.assertEquals(resp.status_int // 100, 4) # client error
+ else:
+ raise self.fail('Invalid X-Copy-From did not raise '
+ 'client error')
+
def test_copy_server_error_reading_source(self):
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
@@ -2356,6 +4176,17 @@ class TestObjectController(unittest.TestCase):
resp = controller.PUT(req)
self.assertEquals(resp.status_int, 503)
+ def test_copy_server_error_reading_source_and_account(self):
+ req = Request.blank('/v1/a1/c1/o', environ={'REQUEST_METHOD': 'PUT'},
+ headers={'Content-Length': '0',
+ 'X-Copy-From': '/c/o',
+ 'X-Copy-From-Account': 'a'})
+ status_list = (200, 200, 200, 200, 503, 503, 503)
+ # acct cont acct cont objc objc objc
+ with self.controller_context(req, *status_list) as controller:
+ resp = controller.PUT(req)
+ self.assertEquals(resp.status_int, 503)
+
def test_copy_not_found_reading_source(self):
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
@@ -2367,6 +4198,18 @@ class TestObjectController(unittest.TestCase):
resp = controller.PUT(req)
self.assertEquals(resp.status_int, 404)
+ def test_copy_not_found_reading_source_and_account(self):
+ req = Request.blank('/v1/a1/c1/o', environ={'REQUEST_METHOD': 'PUT'},
+ headers={'Content-Length': '0',
+ 'X-Copy-From': '/c/o',
+ 'X-Copy-From-Account': 'a'})
+ # not found
+ status_list = (200, 200, 200, 200, 404, 404, 404)
+ # acct cont acct cont objc objc objc
+ with self.controller_context(req, *status_list) as controller:
+ resp = controller.PUT(req)
+ self.assertEquals(resp.status_int, 404)
+
def test_copy_with_some_missing_sources(self):
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
@@ -2377,6 +4220,17 @@ class TestObjectController(unittest.TestCase):
resp = controller.PUT(req)
self.assertEquals(resp.status_int, 201)
+ def test_copy_with_some_missing_sources_and_account(self):
+ req = Request.blank('/v1/a1/c1/o', environ={'REQUEST_METHOD': 'PUT'},
+ headers={'Content-Length': '0',
+ 'X-Copy-From': '/c/o',
+ 'X-Copy-From-Account': 'a'})
+ status_list = (200, 200, 200, 200, 404, 404, 200, 201, 201, 201)
+ # acct cont acct cont objc objc objc obj obj obj
+ with self.controller_context(req, *status_list) as controller:
+ resp = controller.PUT(req)
+ self.assertEquals(resp.status_int, 201)
+
def test_copy_with_object_metadata(self):
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
@@ -2392,16 +4246,33 @@ class TestObjectController(unittest.TestCase):
self.assertEquals(resp.headers.get('x-object-meta-ours'), 'okay')
self.assertEquals(resp.headers.get('x-delete-at'), '9876543210')
+ def test_copy_with_object_metadata_and_account(self):
+ req = Request.blank('/v1/a1/c1/o', environ={'REQUEST_METHOD': 'PUT'},
+ headers={'Content-Length': '0',
+ 'X-Copy-From': '/c/o',
+ 'X-Object-Meta-Ours': 'okay',
+ 'X-Copy-From-Account': 'a'})
+ # test object metadata
+ status_list = (200, 200, 200, 200, 200, 200, 200, 201, 201, 201)
+ # acct cont acct cont objc objc objc obj obj obj
+ with self.controller_context(req, *status_list) as controller:
+ resp = controller.PUT(req)
+ self.assertEquals(resp.status_int, 201)
+ self.assertEquals(resp.headers.get('x-object-meta-test'), 'testing')
+ self.assertEquals(resp.headers.get('x-object-meta-ours'), 'okay')
+ self.assertEquals(resp.headers.get('x-delete-at'), '9876543210')
+
+ @_limit_max_file_size
def test_copy_source_larger_than_max_file_size(self):
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': '/c/o'})
-
# copy-from object is too large to fit in target object
+
class LargeResponseBody(object):
def __len__(self):
- return MAX_FILE_SIZE + 1
+ return constraints.MAX_FILE_SIZE + 1
def __getitem__(self, key):
return ''
@@ -2415,7 +4286,10 @@ class TestObjectController(unittest.TestCase):
self.app.update_request(req)
self.app.memcache.store = {}
- resp = controller.PUT(req)
+ try:
+ resp = controller.PUT(req)
+ except HTTPException as resp:
+ pass
self.assertEquals(resp.status_int, 413)
def test_basic_COPY(self):
@@ -2429,6 +4303,19 @@ class TestObjectController(unittest.TestCase):
self.assertEquals(resp.status_int, 201)
self.assertEquals(resp.headers['x-copied-from'], 'c/o')
+ def test_basic_COPY_account(self):
+ req = Request.blank('/v1/a/c/o',
+ environ={'REQUEST_METHOD': 'COPY'},
+ headers={'Destination': 'c1/o2',
+ 'Destination-Account': 'a1'})
+ status_list = (200, 200, 200, 200, 200, 200, 200, 201, 201, 201)
+ # acct cont acct cont objc objc objc obj obj obj
+ with self.controller_context(req, *status_list) as controller:
+ resp = controller.COPY(req)
+ self.assertEquals(resp.status_int, 201)
+ self.assertEquals(resp.headers['x-copied-from'], 'c/o')
+ self.assertEquals(resp.headers['x-copied-from-account'], 'a')
+
def test_COPY_across_containers(self):
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
@@ -2451,6 +4338,19 @@ class TestObjectController(unittest.TestCase):
self.assertEquals(resp.status_int, 201)
self.assertEquals(resp.headers['x-copied-from'], 'c/o/o2')
+ def test_COPY_account_source_with_slashes_in_name(self):
+ req = Request.blank('/v1/a/c/o/o2',
+ environ={'REQUEST_METHOD': 'COPY'},
+ headers={'Destination': 'c1/o',
+ 'Destination-Account': 'a1'})
+ status_list = (200, 200, 200, 200, 200, 200, 200, 201, 201, 201)
+ # acct cont acct cont objc objc objc obj obj obj
+ with self.controller_context(req, *status_list) as controller:
+ resp = controller.COPY(req)
+ self.assertEquals(resp.status_int, 201)
+ self.assertEquals(resp.headers['x-copied-from'], 'c/o/o2')
+ self.assertEquals(resp.headers['x-copied-from-account'], 'a')
+
def test_COPY_destination_leading_slash(self):
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
@@ -2462,6 +4362,19 @@ class TestObjectController(unittest.TestCase):
self.assertEquals(resp.status_int, 201)
self.assertEquals(resp.headers['x-copied-from'], 'c/o')
+ def test_COPY_account_destination_leading_slash(self):
+ req = Request.blank('/v1/a/c/o',
+ environ={'REQUEST_METHOD': 'COPY'},
+ headers={'Destination': '/c1/o',
+ 'Destination-Account': 'a1'})
+ status_list = (200, 200, 200, 200, 200, 200, 200, 201, 201, 201)
+ # acct cont acct cont objc objc objc obj obj obj
+ with self.controller_context(req, *status_list) as controller:
+ resp = controller.COPY(req)
+ self.assertEquals(resp.status_int, 201)
+ self.assertEquals(resp.headers['x-copied-from'], 'c/o')
+ self.assertEquals(resp.headers['x-copied-from-account'], 'a')
+
def test_COPY_source_with_slashes_destination_leading_slash(self):
req = Request.blank('/v1/a/c/o/o2',
environ={'REQUEST_METHOD': 'COPY'},
@@ -2473,14 +4386,35 @@ class TestObjectController(unittest.TestCase):
self.assertEquals(resp.status_int, 201)
self.assertEquals(resp.headers['x-copied-from'], 'c/o/o2')
+ def test_COPY_account_source_with_slashes_destination_leading_slash(self):
+ req = Request.blank('/v1/a/c/o/o2',
+ environ={'REQUEST_METHOD': 'COPY'},
+ headers={'Destination': '/c1/o',
+ 'Destination-Account': 'a1'})
+ status_list = (200, 200, 200, 200, 200, 200, 200, 201, 201, 201)
+ # acct cont acct cont objc objc objc obj obj obj
+ with self.controller_context(req, *status_list) as controller:
+ resp = controller.COPY(req)
+ self.assertEquals(resp.status_int, 201)
+ self.assertEquals(resp.headers['x-copied-from'], 'c/o/o2')
+ self.assertEquals(resp.headers['x-copied-from-account'], 'a')
+
def test_COPY_no_object_in_destination(self):
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': 'c_o'})
status_list = [] # no requests needed
with self.controller_context(req, *status_list) as controller:
- resp = controller.COPY(req)
- self.assertEquals(resp.status_int, 412)
+ self.assertRaises(HTTPException, controller.COPY, req)
+
+ def test_COPY_account_no_object_in_destination(self):
+ req = Request.blank('/v1/a/c/o',
+ environ={'REQUEST_METHOD': 'COPY'},
+ headers={'Destination': 'c_o',
+ 'Destination-Account': 'a1'})
+ status_list = [] # no requests needed
+ with self.controller_context(req, *status_list) as controller:
+ self.assertRaises(HTTPException, controller.COPY, req)
def test_COPY_server_error_reading_source(self):
req = Request.blank('/v1/a/c/o',
@@ -2492,6 +4426,17 @@ class TestObjectController(unittest.TestCase):
resp = controller.COPY(req)
self.assertEquals(resp.status_int, 503)
+ def test_COPY_account_server_error_reading_source(self):
+ req = Request.blank('/v1/a/c/o',
+ environ={'REQUEST_METHOD': 'COPY'},
+ headers={'Destination': '/c1/o',
+ 'Destination-Account': 'a1'})
+ status_list = (200, 200, 200, 200, 503, 503, 503)
+ # acct cont acct cont objc objc objc
+ with self.controller_context(req, *status_list) as controller:
+ resp = controller.COPY(req)
+ self.assertEquals(resp.status_int, 503)
+
def test_COPY_not_found_reading_source(self):
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
@@ -2502,6 +4447,17 @@ class TestObjectController(unittest.TestCase):
resp = controller.COPY(req)
self.assertEquals(resp.status_int, 404)
+ def test_COPY_account_not_found_reading_source(self):
+ req = Request.blank('/v1/a/c/o',
+ environ={'REQUEST_METHOD': 'COPY'},
+ headers={'Destination': '/c1/o',
+ 'Destination-Account': 'a1'})
+ status_list = (200, 200, 200, 200, 404, 404, 404)
+ # acct cont acct cont objc objc objc
+ with self.controller_context(req, *status_list) as controller:
+ resp = controller.COPY(req)
+ self.assertEquals(resp.status_int, 404)
+
def test_COPY_with_some_missing_sources(self):
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
@@ -2512,6 +4468,17 @@ class TestObjectController(unittest.TestCase):
resp = controller.COPY(req)
self.assertEquals(resp.status_int, 201)
+ def test_COPY_account_with_some_missing_sources(self):
+ req = Request.blank('/v1/a/c/o',
+ environ={'REQUEST_METHOD': 'COPY'},
+ headers={'Destination': '/c1/o',
+ 'Destination-Account': 'a1'})
+ status_list = (200, 200, 200, 200, 404, 404, 200, 201, 201, 201)
+ # acct cont acct cont objc objc objc obj obj obj
+ with self.controller_context(req, *status_list) as controller:
+ resp = controller.COPY(req)
+ self.assertEquals(resp.status_int, 201)
+
def test_COPY_with_metadata(self):
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
@@ -2527,6 +4494,23 @@ class TestObjectController(unittest.TestCase):
self.assertEquals(resp.headers.get('x-object-meta-ours'), 'okay')
self.assertEquals(resp.headers.get('x-delete-at'), '9876543210')
+ def test_COPY_account_with_metadata(self):
+ req = Request.blank('/v1/a/c/o',
+ environ={'REQUEST_METHOD': 'COPY'},
+ headers={'Destination': '/c1/o',
+ 'X-Object-Meta-Ours': 'okay',
+ 'Destination-Account': 'a1'})
+ status_list = (200, 200, 200, 200, 200, 200, 200, 201, 201, 201)
+ # acct cont acct cont objc objc objc obj obj obj
+ with self.controller_context(req, *status_list) as controller:
+ resp = controller.COPY(req)
+ self.assertEquals(resp.status_int, 201)
+ self.assertEquals(resp.headers.get('x-object-meta-test'),
+ 'testing')
+ self.assertEquals(resp.headers.get('x-object-meta-ours'), 'okay')
+ self.assertEquals(resp.headers.get('x-delete-at'), '9876543210')
+
+ @_limit_max_file_size
def test_COPY_source_larger_than_max_file_size(self):
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
@@ -2535,7 +4519,7 @@ class TestObjectController(unittest.TestCase):
class LargeResponseBody(object):
def __len__(self):
- return MAX_FILE_SIZE + 1
+ return constraints.MAX_FILE_SIZE + 1
def __getitem__(self, key):
return ''
@@ -2546,12 +4530,43 @@ class TestObjectController(unittest.TestCase):
kwargs = dict(body=copy_from_obj_body)
with self.controller_context(req, *status_list,
**kwargs) as controller:
- resp = controller.COPY(req)
+ try:
+ resp = controller.COPY(req)
+ except HTTPException as resp:
+ pass
+ self.assertEquals(resp.status_int, 413)
+
+ @_limit_max_file_size
+ def test_COPY_account_source_larger_than_max_file_size(self):
+ req = Request.blank('/v1/a/c/o',
+ environ={'REQUEST_METHOD': 'COPY'},
+ headers={'Destination': '/c1/o',
+ 'Destination-Account': 'a1'})
+
+ class LargeResponseBody(object):
+
+ def __len__(self):
+ return constraints.MAX_FILE_SIZE + 1
+
+ def __getitem__(self, key):
+ return ''
+
+ copy_from_obj_body = LargeResponseBody()
+ status_list = (200, 200, 200, 200, 200)
+ # acct cont objc objc objc
+ kwargs = dict(body=copy_from_obj_body)
+ with self.controller_context(req, *status_list,
+ **kwargs) as controller:
+ try:
+ resp = controller.COPY(req)
+ except HTTPException as resp:
+ pass
self.assertEquals(resp.status_int, 413)
def test_COPY_newest(self):
with save_globals():
- controller = proxy_server.ObjectController(self.app, 'a', 'c', 'o')
+ controller = ReplicatedObjectController(
+ self.app, 'a', 'c', 'o')
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': '/c/o'})
@@ -2567,30 +4582,86 @@ class TestObjectController(unittest.TestCase):
self.assertEquals(resp.headers['x-copied-from-last-modified'],
'3')
+ def test_COPY_account_newest(self):
+ with save_globals():
+ controller = ReplicatedObjectController(
+ self.app, 'a', 'c', 'o')
+ req = Request.blank('/v1/a/c/o',
+ environ={'REQUEST_METHOD': 'COPY'},
+ headers={'Destination': '/c1/o',
+ 'Destination-Account': 'a1'})
+ req.account = 'a'
+ controller.object_name = 'o'
+ set_http_connect(200, 200, 200, 200, 200, 200, 200, 201, 201, 201,
+ #act cont acct cont objc objc objc obj obj obj
+ timestamps=('1', '1', '1', '1', '3', '2', '1',
+ '4', '4', '4'))
+ self.app.memcache.store = {}
+ resp = controller.COPY(req)
+ self.assertEquals(resp.status_int, 201)
+ self.assertEquals(resp.headers['x-copied-from-last-modified'],
+ '3')
+
def test_COPY_delete_at(self):
with save_globals():
- given_headers = {}
+ backend_requests = []
- def fake_connect_put_node(nodes, part, path, headers,
- logger_thread_locals):
- given_headers.update(headers)
+ def capture_requests(ipaddr, port, device, partition, method, path,
+ headers=None, query_string=None):
+ backend_requests.append((method, path, headers))
- controller = proxy_server.ObjectController(self.app, 'a',
- 'c', 'o')
- controller._connect_put_node = fake_connect_put_node
- set_http_connect(200, 200, 200, 200, 200, 201, 201, 201)
+ controller = ReplicatedObjectController(
+ self.app, 'a', 'c', 'o')
+ set_http_connect(200, 200, 200, 200, 200, 201, 201, 201,
+ give_connect=capture_requests)
self.app.memcache.store = {}
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': '/c/o'})
self.app.update_request(req)
- controller.COPY(req)
- self.assertEquals(given_headers.get('X-Delete-At'), '9876543210')
- self.assertTrue('X-Delete-At-Host' in given_headers)
- self.assertTrue('X-Delete-At-Device' in given_headers)
- self.assertTrue('X-Delete-At-Partition' in given_headers)
- self.assertTrue('X-Delete-At-Container' in given_headers)
+ resp = controller.COPY(req)
+ self.assertEqual(201, resp.status_int) # sanity
+ for method, path, given_headers in backend_requests:
+ if method != 'PUT':
+ continue
+ self.assertEquals(given_headers.get('X-Delete-At'),
+ '9876543210')
+ self.assertTrue('X-Delete-At-Host' in given_headers)
+ self.assertTrue('X-Delete-At-Device' in given_headers)
+ self.assertTrue('X-Delete-At-Partition' in given_headers)
+ self.assertTrue('X-Delete-At-Container' in given_headers)
+
+ def test_COPY_account_delete_at(self):
+ with save_globals():
+ backend_requests = []
+
+ def capture_requests(ipaddr, port, device, partition, method, path,
+ headers=None, query_string=None):
+ backend_requests.append((method, path, headers))
+
+ controller = ReplicatedObjectController(
+ self.app, 'a', 'c', 'o')
+ set_http_connect(200, 200, 200, 200, 200, 200, 200, 201, 201, 201,
+ give_connect=capture_requests)
+ self.app.memcache.store = {}
+ req = Request.blank('/v1/a/c/o',
+ environ={'REQUEST_METHOD': 'COPY'},
+ headers={'Destination': '/c1/o',
+ 'Destination-Account': 'a1'})
+
+ self.app.update_request(req)
+ resp = controller.COPY(req)
+ self.assertEqual(201, resp.status_int) # sanity
+ for method, path, given_headers in backend_requests:
+ if method != 'PUT':
+ continue
+ self.assertEquals(given_headers.get('X-Delete-At'),
+ '9876543210')
+ self.assertTrue('X-Delete-At-Host' in given_headers)
+ self.assertTrue('X-Delete-At-Device' in given_headers)
+ self.assertTrue('X-Delete-At-Partition' in given_headers)
+ self.assertTrue('X-Delete-At-Container' in given_headers)
def test_chunked_put(self):
@@ -2615,8 +4686,8 @@ class TestObjectController(unittest.TestCase):
with save_globals():
set_http_connect(201, 201, 201, 201)
- controller = proxy_server.ObjectController(self.app, 'account',
- 'container', 'object')
+ controller = ReplicatedObjectController(
+ self.app, 'account', 'container', 'object')
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Transfer-Encoding': 'chunked',
@@ -2637,17 +4708,16 @@ class TestObjectController(unittest.TestCase):
req.body_file = ChunkedFile(11)
self.app.memcache.store = {}
self.app.update_request(req)
- try:
- swift.proxy.controllers.obj.MAX_FILE_SIZE = 10
+
+ with mock.patch('swift.common.constraints.MAX_FILE_SIZE', 10):
res = controller.PUT(req)
self.assertEquals(res.status_int, 413)
- finally:
- swift.proxy.controllers.obj.MAX_FILE_SIZE = MAX_FILE_SIZE
+ @unpatch_policies
def test_chunked_put_bad_version(self):
# Check bad version
(prolis, acc1lis, acc2lis, con1lis, con2lis, obj1lis,
- obj2lis) = _test_sockets
+ obj2lis, obj3lis) = _test_sockets
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('GET /v0 HTTP/1.1\r\nHost: localhost\r\n'
@@ -2657,10 +4727,11 @@ class TestObjectController(unittest.TestCase):
exp = 'HTTP/1.1 412'
self.assertEquals(headers[:len(exp)], exp)
+ @unpatch_policies
def test_chunked_put_bad_path(self):
# Check bad path
(prolis, acc1lis, acc2lis, con1lis, con2lis, obj1lis,
- obj2lis) = _test_sockets
+ obj2lis, obj3lis) = _test_sockets
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('GET invalid HTTP/1.1\r\nHost: localhost\r\n'
@@ -2670,10 +4741,11 @@ class TestObjectController(unittest.TestCase):
exp = 'HTTP/1.1 404'
self.assertEquals(headers[:len(exp)], exp)
+ @unpatch_policies
def test_chunked_put_bad_utf8(self):
# Check invalid utf-8
(prolis, acc1lis, acc2lis, con1lis, con2lis, obj1lis,
- obj2lis) = _test_sockets
+ obj2lis, obj3lis) = _test_sockets
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('GET /v1/a%80 HTTP/1.1\r\nHost: localhost\r\n'
@@ -2684,10 +4756,11 @@ class TestObjectController(unittest.TestCase):
exp = 'HTTP/1.1 412'
self.assertEquals(headers[:len(exp)], exp)
+ @unpatch_policies
def test_chunked_put_bad_path_no_controller(self):
# Check bad path, no controller
(prolis, acc1lis, acc2lis, con1lis, con2lis, obj1lis,
- obj2lis) = _test_sockets
+ obj2lis, obj3lis) = _test_sockets
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('GET /v1 HTTP/1.1\r\nHost: localhost\r\n'
@@ -2698,10 +4771,11 @@ class TestObjectController(unittest.TestCase):
exp = 'HTTP/1.1 412'
self.assertEquals(headers[:len(exp)], exp)
+ @unpatch_policies
def test_chunked_put_bad_method(self):
# Check bad method
(prolis, acc1lis, acc2lis, con1lis, con2lis, obj1lis,
- obj2lis) = _test_sockets
+ obj2lis, obj3lis) = _test_sockets
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('LICK /v1/a HTTP/1.1\r\nHost: localhost\r\n'
@@ -2712,12 +4786,13 @@ class TestObjectController(unittest.TestCase):
exp = 'HTTP/1.1 405'
self.assertEquals(headers[:len(exp)], exp)
+ @unpatch_policies
def test_chunked_put_unhandled_exception(self):
# Check unhandled exception
(prosrv, acc1srv, acc2srv, con1srv, con2srv, obj1srv,
- obj2srv) = _test_servers
+ obj2srv, obj3srv) = _test_servers
(prolis, acc1lis, acc2lis, con1lis, con2lis, obj1lis,
- obj2lis) = _test_sockets
+ obj2lis, obj3lis) = _test_sockets
orig_update_request = prosrv.update_request
def broken_update_request(*args, **kwargs):
@@ -2735,12 +4810,13 @@ class TestObjectController(unittest.TestCase):
self.assertEquals(headers[:len(exp)], exp)
prosrv.update_request = orig_update_request
+ @unpatch_policies
def test_chunked_put_head_account(self):
# Head account, just a double check and really is here to test
# the part Application.log_request that 'enforces' a
# content_length on the response.
(prolis, acc1lis, acc2lis, con1lis, con2lis, obj1lis,
- obj2lis) = _test_sockets
+ obj2lis, obj3lis) = _test_sockets
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('HEAD /v1/a HTTP/1.1\r\nHost: localhost\r\n'
@@ -2752,6 +4828,7 @@ class TestObjectController(unittest.TestCase):
self.assertEquals(headers[:len(exp)], exp)
self.assert_('\r\nContent-Length: 0\r\n' in headers)
+ @unpatch_policies
def test_chunked_put_utf8_all_the_way_down(self):
# Test UTF-8 Unicode all the way through the system
ustr = '\xe1\xbc\xb8\xce\xbf\xe1\xbd\xba \xe1\xbc\xb0\xce' \
@@ -2763,7 +4840,7 @@ class TestObjectController(unittest.TestCase):
ustr_short = '\xe1\xbc\xb8\xce\xbf\xe1\xbd\xbatest'
# Create ustr container
(prolis, acc1lis, acc2lis, con1lis, con2lis, obj1lis,
- obj2lis) = _test_sockets
+ obj2lis, obj3lis) = _test_sockets
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/%s HTTP/1.1\r\nHost: localhost\r\n'
@@ -2795,7 +4872,7 @@ class TestObjectController(unittest.TestCase):
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 200'
self.assertEquals(headers[:len(exp)], exp)
- listing = simplejson.loads(fd.read())
+ listing = json.loads(fd.read())
self.assert_(ustr.decode('utf8') in [l['name'] for l in listing])
# List account with ustr container (test xml)
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
@@ -2843,7 +4920,7 @@ class TestObjectController(unittest.TestCase):
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 200'
self.assertEquals(headers[:len(exp)], exp)
- listing = simplejson.loads(fd.read())
+ listing = json.loads(fd.read())
self.assertEquals(listing[0]['name'], ustr.decode('utf8'))
# List ustr container with ustr object (test xml)
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
@@ -2871,10 +4948,11 @@ class TestObjectController(unittest.TestCase):
self.assert_('\r\nX-Object-Meta-%s: %s\r\n' %
(quote(ustr_short).lower(), quote(ustr)) in headers)
+ @unpatch_policies
def test_chunked_put_chunked_put(self):
# Do chunked object put
(prolis, acc1lis, acc2lis, con1lis, con2lis, obj1lis,
- obj2lis) = _test_sockets
+ obj2lis, obj3lis) = _test_sockets
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
# Also happens to assert that x-storage-token is taken as a
@@ -2900,11 +4978,12 @@ class TestObjectController(unittest.TestCase):
body = fd.read()
self.assertEquals(body, 'oh hai123456789abcdef')
+ @unpatch_policies
def test_version_manifest(self, oc='versions', vc='vers', o='name'):
versions_to_create = 3
# Create a container for our versioned object testing
(prolis, acc1lis, acc2lis, con1lis, con2lis, obj1lis,
- obj2lis) = _test_sockets
+ obj2lis, obj3lis) = _test_sockets
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
pre = quote('%03x' % len(o))
@@ -3108,18 +5187,22 @@ class TestObjectController(unittest.TestCase):
exp = 'HTTP/1.1 404'
self.assertEquals(headers[:len(exp)], exp)
- # make sure manifest files don't get versioned
- sock = connect_tcp(('localhost', prolis.getsockname()[1]))
- fd = sock.makefile()
- fd.write('PUT /v1/a/%s/%s HTTP/1.1\r\nHost: '
- 'localhost\r\nConnection: close\r\nX-Storage-Token: '
- 't\r\nContent-Length: 0\r\nContent-Type: text/jibberish0\r\n'
- 'Foo: barbaz\r\nX-Object-Manifest: %s/foo_\r\n\r\n'
- % (oc, vc, o))
- fd.flush()
- headers = readuntil2crlfs(fd)
- exp = 'HTTP/1.1 201'
- self.assertEquals(headers[:len(exp)], exp)
+ # make sure dlo manifest files don't get versioned
+ for _junk in xrange(1, versions_to_create):
+ sleep(.01) # guarantee that the timestamp changes
+ sock = connect_tcp(('localhost', prolis.getsockname()[1]))
+ fd = sock.makefile()
+ fd.write('PUT /v1/a/%s/%s HTTP/1.1\r\nHost: '
+ 'localhost\r\nConnection: close\r\nX-Storage-Token: '
+ 't\r\nContent-Length: 0\r\n'
+ 'Content-Type: text/jibberish0\r\n'
+ 'Foo: barbaz\r\nX-Object-Manifest: %s/%s/\r\n\r\n'
+ % (oc, o, oc, o))
+ fd.flush()
+ headers = readuntil2crlfs(fd)
+ exp = 'HTTP/1.1 201'
+ self.assertEquals(headers[:len(exp)], exp)
+
# Ensure we have no saved versions
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
@@ -3233,51 +5316,59 @@ class TestObjectController(unittest.TestCase):
exp = 'HTTP/1.1 2' # 2xx response
self.assertEquals(headers[:len(exp)], exp)
+ @unpatch_policies
def test_version_manifest_utf8(self):
oc = '0_oc_non_ascii\xc2\xa3'
vc = '0_vc_non_ascii\xc2\xa3'
o = '0_o_non_ascii\xc2\xa3'
self.test_version_manifest(oc, vc, o)
+ @unpatch_policies
def test_version_manifest_utf8_container(self):
oc = '1_oc_non_ascii\xc2\xa3'
vc = '1_vc_ascii'
o = '1_o_ascii'
self.test_version_manifest(oc, vc, o)
+ @unpatch_policies
def test_version_manifest_utf8_version_container(self):
oc = '2_oc_ascii'
vc = '2_vc_non_ascii\xc2\xa3'
o = '2_o_ascii'
self.test_version_manifest(oc, vc, o)
+ @unpatch_policies
def test_version_manifest_utf8_containers(self):
oc = '3_oc_non_ascii\xc2\xa3'
vc = '3_vc_non_ascii\xc2\xa3'
o = '3_o_ascii'
self.test_version_manifest(oc, vc, o)
+ @unpatch_policies
def test_version_manifest_utf8_object(self):
oc = '4_oc_ascii'
vc = '4_vc_ascii'
o = '4_o_non_ascii\xc2\xa3'
self.test_version_manifest(oc, vc, o)
+ @unpatch_policies
def test_version_manifest_utf8_version_container_utf_object(self):
oc = '5_oc_ascii'
vc = '5_vc_non_ascii\xc2\xa3'
o = '5_o_non_ascii\xc2\xa3'
self.test_version_manifest(oc, vc, o)
+ @unpatch_policies
def test_version_manifest_utf8_container_utf_object(self):
oc = '6_oc_non_ascii\xc2\xa3'
vc = '6_vc_ascii'
o = '6_o_non_ascii\xc2\xa3'
self.test_version_manifest(oc, vc, o)
+ @unpatch_policies
def test_conditional_range_get(self):
- (prolis, acc1lis, acc2lis, con1lis, con2lis, obj1lis, obj2lis) = \
- _test_sockets
+ (prolis, acc1lis, acc2lis, con1lis, con2lis, obj1lis, obj2lis,
+ obj3lis) = _test_sockets
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
# make a container
@@ -3325,8 +5416,8 @@ class TestObjectController(unittest.TestCase):
def test_mismatched_etags(self):
with save_globals():
# no etag supplied, object servers return success w/ diff values
- controller = proxy_server.ObjectController(self.app, 'account',
- 'container', 'object')
+ controller = ReplicatedObjectController(
+ self.app, 'account', 'container', 'object')
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0'})
self.app.update_request(req)
@@ -3357,8 +5448,8 @@ class TestObjectController(unittest.TestCase):
with save_globals():
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'GET'})
self.app.update_request(req)
- controller = proxy_server.ObjectController(self.app, 'account',
- 'container', 'object')
+ controller = ReplicatedObjectController(
+ self.app, 'account', 'container', 'object')
set_http_connect(200, 200, 200)
resp = controller.GET(req)
self.assert_('accept-ranges' in resp.headers)
@@ -3369,8 +5460,8 @@ class TestObjectController(unittest.TestCase):
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'HEAD'})
self.app.update_request(req)
- controller = proxy_server.ObjectController(self.app, 'account',
- 'container', 'object')
+ controller = ReplicatedObjectController(
+ self.app, 'account', 'container', 'object')
set_http_connect(200, 200, 200)
resp = controller.HEAD(req)
self.assert_('accept-ranges' in resp.headers)
@@ -3384,8 +5475,8 @@ class TestObjectController(unittest.TestCase):
return HTTPUnauthorized(request=req)
with save_globals():
set_http_connect(200, 200, 201, 201, 201)
- controller = proxy_server.ObjectController(self.app, 'account',
- 'container', 'object')
+ controller = ReplicatedObjectController(
+ self.app, 'account', 'container', 'object')
req = Request.blank('/v1/a/c/o')
req.environ['swift.authorize'] = authorize
self.app.update_request(req)
@@ -3400,8 +5491,8 @@ class TestObjectController(unittest.TestCase):
return HTTPUnauthorized(request=req)
with save_globals():
set_http_connect(200, 200, 201, 201, 201)
- controller = proxy_server.ObjectController(self.app, 'account',
- 'container', 'object')
+ controller = ReplicatedObjectController(
+ self.app, 'account', 'container', 'object')
req = Request.blank('/v1/a/c/o', {'REQUEST_METHOD': 'HEAD'})
req.environ['swift.authorize'] = authorize
self.app.update_request(req)
@@ -3417,8 +5508,8 @@ class TestObjectController(unittest.TestCase):
with save_globals():
self.app.object_post_as_copy = False
set_http_connect(200, 200, 201, 201, 201)
- controller = proxy_server.ObjectController(self.app, 'account',
- 'container', 'object')
+ controller = ReplicatedObjectController(
+ self.app, 'account', 'container', 'object')
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'POST'},
headers={'Content-Length': '5'}, body='12345')
@@ -3435,8 +5526,8 @@ class TestObjectController(unittest.TestCase):
return HTTPUnauthorized(request=req)
with save_globals():
set_http_connect(200, 200, 200, 200, 200, 201, 201, 201)
- controller = proxy_server.ObjectController(self.app, 'account',
- 'container', 'object')
+ controller = ReplicatedObjectController(
+ self.app, 'account', 'container', 'object')
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'POST'},
headers={'Content-Length': '5'}, body='12345')
@@ -3453,8 +5544,8 @@ class TestObjectController(unittest.TestCase):
return HTTPUnauthorized(request=req)
with save_globals():
set_http_connect(200, 200, 201, 201, 201)
- controller = proxy_server.ObjectController(self.app, 'account',
- 'container', 'object')
+ controller = ReplicatedObjectController(
+ self.app, 'account', 'container', 'object')
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '5'}, body='12345')
req.environ['swift.authorize'] = authorize
@@ -3470,8 +5561,8 @@ class TestObjectController(unittest.TestCase):
return HTTPUnauthorized(request=req)
with save_globals():
set_http_connect(200, 200, 200, 200, 200, 201, 201, 201)
- controller = proxy_server.ObjectController(self.app, 'account',
- 'container', 'object')
+ controller = ReplicatedObjectController(
+ self.app, 'account', 'container', 'object')
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': 'c/o'})
@@ -3482,9 +5573,10 @@ class TestObjectController(unittest.TestCase):
def test_POST_converts_delete_after_to_delete_at(self):
with save_globals():
- controller = proxy_server.ObjectController(self.app, 'account',
- 'container', 'object')
- set_http_connect(200, 200, 200, 200, 200, 202, 202, 202)
+ self.app.object_post_as_copy = False
+ controller = ReplicatedObjectController(
+ self.app, 'account', 'container', 'object')
+ set_http_connect(200, 200, 202, 202, 202)
self.app.memcache.store = {}
orig_time = time.time
try:
@@ -3498,194 +5590,133 @@ class TestObjectController(unittest.TestCase):
self.assertEquals(res.status, '202 Fake')
self.assertEquals(req.headers.get('x-delete-at'),
str(int(t + 60)))
-
- self.app.object_post_as_copy = False
- controller = proxy_server.ObjectController(self.app, 'account',
- 'container',
- 'object')
- set_http_connect(200, 200, 202, 202, 202)
- self.app.memcache.store = {}
- req = Request.blank('/v1/a/c/o', {},
- headers={'Content-Type': 'foo/bar',
- 'X-Delete-After': '60'})
- self.app.update_request(req)
- res = controller.POST(req)
- self.assertEquals(res.status, '202 Fake')
- self.assertEquals(req.headers.get('x-delete-at'),
- str(int(t + 60)))
finally:
time.time = orig_time
- def test_POST_non_int_delete_after(self):
- with save_globals():
- controller = proxy_server.ObjectController(self.app, 'account',
- 'container', 'object')
- set_http_connect(200, 200, 200, 200, 200, 202, 202, 202)
- self.app.memcache.store = {}
- req = Request.blank('/v1/a/c/o', {},
- headers={'Content-Type': 'foo/bar',
- 'X-Delete-After': '60.1'})
- self.app.update_request(req)
- res = controller.POST(req)
- self.assertEquals(res.status, '400 Bad Request')
- self.assertTrue('Non-integer X-Delete-After' in res.body)
-
- def test_POST_negative_delete_after(self):
- with save_globals():
- controller = proxy_server.ObjectController(self.app, 'account',
- 'container', 'object')
- set_http_connect(200, 200, 200, 200, 200, 202, 202, 202)
- self.app.memcache.store = {}
- req = Request.blank('/v1/a/c/o', {},
- headers={'Content-Type': 'foo/bar',
- 'X-Delete-After': '-60'})
- self.app.update_request(req)
- res = controller.POST(req)
- self.assertEquals(res.status, '400 Bad Request')
- self.assertTrue('X-Delete-At in past' in res.body)
-
- def test_POST_delete_at(self):
- with save_globals():
- given_headers = {}
-
- def fake_make_requests(req, ring, part, method, path, headers,
- query_string=''):
- given_headers.update(headers[0])
-
- self.app.object_post_as_copy = False
- controller = proxy_server.ObjectController(self.app, 'account',
- 'container', 'object')
- controller.make_requests = fake_make_requests
- set_http_connect(200, 200)
- self.app.memcache.store = {}
- t = str(int(time.time() + 100))
- req = Request.blank('/v1/a/c/o', {},
- headers={'Content-Type': 'foo/bar',
- 'X-Delete-At': t})
- self.app.update_request(req)
- controller.POST(req)
- self.assertEquals(given_headers.get('X-Delete-At'), t)
- self.assertTrue('X-Delete-At-Host' in given_headers)
- self.assertTrue('X-Delete-At-Device' in given_headers)
- self.assertTrue('X-Delete-At-Partition' in given_headers)
- self.assertTrue('X-Delete-At-Container' in given_headers)
-
- t = str(int(time.time() + 100)) + '.1'
- req = Request.blank('/v1/a/c/o', {},
- headers={'Content-Type': 'foo/bar',
- 'X-Delete-At': t})
- self.app.update_request(req)
- resp = controller.POST(req)
- self.assertEquals(resp.status_int, 400)
- self.assertTrue('Non-integer X-Delete-At' in resp.body)
+ @patch_policies([
+ StoragePolicy(0, 'zero', False, object_ring=FakeRing()),
+ StoragePolicy(1, 'one', True, object_ring=FakeRing())
+ ])
+ def test_PUT_versioning_with_nonzero_default_policy(self):
+ # reset the router post patch_policies
+ self.app.obj_controller_router = proxy_server.ObjectControllerRouter()
- t = str(int(time.time() - 100))
- req = Request.blank('/v1/a/c/o', {},
- headers={'Content-Type': 'foo/bar',
- 'X-Delete-At': t})
+ def test_connect(ipaddr, port, device, partition, method, path,
+ headers=None, query_string=None):
+ if method == "HEAD":
+ self.assertEquals(path, '/a/c/o.jpg')
+ self.assertNotEquals(None,
+ headers['X-Backend-Storage-Policy-Index'])
+ self.assertEquals(1, int(headers
+ ['X-Backend-Storage-Policy-Index']))
+
+ def fake_container_info(account, container, req):
+ return {'status': 200, 'sync_key': None, 'storage_policy': '1',
+ 'meta': {}, 'cors': {'allow_origin': None,
+ 'expose_headers': None,
+ 'max_age': None},
+ 'sysmeta': {}, 'read_acl': None, 'object_count': None,
+ 'write_acl': None, 'versions': 'c-versions',
+ 'partition': 1, 'bytes': None,
+ 'nodes': [{'zone': 0, 'ip': '10.0.0.0', 'region': 0,
+ 'id': 0, 'device': 'sda', 'port': 1000},
+ {'zone': 1, 'ip': '10.0.0.1', 'region': 1,
+ 'id': 1, 'device': 'sdb', 'port': 1001},
+ {'zone': 2, 'ip': '10.0.0.2', 'region': 0,
+ 'id': 2, 'device': 'sdc', 'port': 1002}]}
+ with save_globals():
+ controller = ReplicatedObjectController(
+ self.app, 'a', 'c', 'o.jpg')
+
+ controller.container_info = fake_container_info
+ set_http_connect(200, 200, 200, # head: for the last version
+ 200, 200, 200, # get: for the last version
+ 201, 201, 201, # put: move the current version
+ 201, 201, 201, # put: save the new version
+ give_connect=test_connect)
+ req = Request.blank('/v1/a/c/o.jpg',
+ environ={'REQUEST_METHOD': 'PUT'},
+ headers={'Content-Length': '0'})
self.app.update_request(req)
- resp = controller.POST(req)
- self.assertEquals(resp.status_int, 400)
- self.assertTrue('X-Delete-At in past' in resp.body)
-
- def test_PUT_converts_delete_after_to_delete_at(self):
- with save_globals():
- controller = proxy_server.ObjectController(self.app, 'account',
- 'container', 'object')
- set_http_connect(200, 200, 201, 201, 201)
- self.app.memcache.store = {}
- orig_time = time.time
- try:
- t = time.time()
- time.time = lambda: t
- req = Request.blank('/v1/a/c/o', {},
- headers={'Content-Length': '0',
- 'Content-Type': 'foo/bar',
- 'X-Delete-After': '60'})
- self.app.update_request(req)
- res = controller.PUT(req)
- self.assertEquals(res.status, '201 Fake')
- self.assertEquals(req.headers.get('x-delete-at'),
- str(int(t + 60)))
- finally:
- time.time = orig_time
-
- def test_PUT_non_int_delete_after(self):
- with save_globals():
- controller = proxy_server.ObjectController(self.app, 'account',
- 'container', 'object')
- set_http_connect(200, 200, 201, 201, 201)
self.app.memcache.store = {}
- req = Request.blank('/v1/a/c/o', {},
- headers={'Content-Length': '0',
- 'Content-Type': 'foo/bar',
- 'X-Delete-After': '60.1'})
- self.app.update_request(req)
res = controller.PUT(req)
- self.assertEquals(res.status, '400 Bad Request')
- self.assertTrue('Non-integer X-Delete-After' in res.body)
-
- def test_PUT_negative_delete_after(self):
- with save_globals():
- controller = proxy_server.ObjectController(self.app, 'account',
- 'container', 'object')
- set_http_connect(200, 200, 201, 201, 201)
- self.app.memcache.store = {}
- req = Request.blank('/v1/a/c/o', {},
- headers={'Content-Length': '0',
- 'Content-Type': 'foo/bar',
- 'X-Delete-After': '-60'})
+ self.assertEquals(201, res.status_int)
+
+ @patch_policies([
+ StoragePolicy(0, 'zero', False, object_ring=FakeRing()),
+ StoragePolicy(1, 'one', True, object_ring=FakeRing())
+ ])
+ def test_cross_policy_DELETE_versioning(self):
+ # reset the router post patch_policies
+ self.app.obj_controller_router = proxy_server.ObjectControllerRouter()
+ requests = []
+
+ def capture_requests(ipaddr, port, device, partition, method, path,
+ headers=None, query_string=None):
+ requests.append((method, path, headers))
+
+ def fake_container_info(app, env, account, container, **kwargs):
+ info = {'status': 200, 'sync_key': None, 'storage_policy': None,
+ 'meta': {}, 'cors': {'allow_origin': None,
+ 'expose_headers': None,
+ 'max_age': None},
+ 'sysmeta': {}, 'read_acl': None, 'object_count': None,
+ 'write_acl': None, 'versions': None,
+ 'partition': 1, 'bytes': None,
+ 'nodes': [{'zone': 0, 'ip': '10.0.0.0', 'region': 0,
+ 'id': 0, 'device': 'sda', 'port': 1000},
+ {'zone': 1, 'ip': '10.0.0.1', 'region': 1,
+ 'id': 1, 'device': 'sdb', 'port': 1001},
+ {'zone': 2, 'ip': '10.0.0.2', 'region': 0,
+ 'id': 2, 'device': 'sdc', 'port': 1002}]}
+ if container == 'c':
+ info['storage_policy'] = '1'
+ info['versions'] = 'c-versions'
+ elif container == 'c-versions':
+ info['storage_policy'] = '0'
+ else:
+ self.fail('Unexpected call to get_info for %r' % container)
+ return info
+ container_listing = json.dumps([{'name': 'old_version'}])
+ with save_globals():
+ resp_status = (
+ 200, 200, # listings for versions container
+ 200, 200, 200, # get: for the last version
+ 201, 201, 201, # put: move the last version
+ 200, 200, 200, # delete: for the last version
+ )
+ body_iter = iter([container_listing] + [
+ '' for x in range(len(resp_status) - 1)])
+ set_http_connect(*resp_status, body_iter=body_iter,
+ give_connect=capture_requests)
+ req = Request.blank('/v1/a/c/current_version', method='DELETE')
self.app.update_request(req)
- res = controller.PUT(req)
- self.assertEquals(res.status, '400 Bad Request')
- self.assertTrue('X-Delete-At in past' in res.body)
-
- def test_PUT_delete_at(self):
- with save_globals():
- given_headers = {}
-
- def fake_connect_put_node(nodes, part, path, headers,
- logger_thread_locals):
- given_headers.update(headers)
-
- controller = proxy_server.ObjectController(self.app, 'account',
- 'container', 'object')
- controller._connect_put_node = fake_connect_put_node
- set_http_connect(200, 200)
self.app.memcache.store = {}
- t = str(int(time.time() + 100))
- req = Request.blank('/v1/a/c/o', {},
- headers={'Content-Length': '0',
- 'Content-Type': 'foo/bar',
- 'X-Delete-At': t})
- self.app.update_request(req)
- controller.PUT(req)
- self.assertEquals(given_headers.get('X-Delete-At'), t)
- self.assertTrue('X-Delete-At-Host' in given_headers)
- self.assertTrue('X-Delete-At-Device' in given_headers)
- self.assertTrue('X-Delete-At-Partition' in given_headers)
- self.assertTrue('X-Delete-At-Container' in given_headers)
-
- t = str(int(time.time() + 100)) + '.1'
- req = Request.blank('/v1/a/c/o', {},
- headers={'Content-Length': '0',
- 'Content-Type': 'foo/bar',
- 'X-Delete-At': t})
- self.app.update_request(req)
- resp = controller.PUT(req)
- self.assertEquals(resp.status_int, 400)
- self.assertTrue('Non-integer X-Delete-At' in resp.body)
-
- t = str(int(time.time() - 100))
- req = Request.blank('/v1/a/c/o', {},
- headers={'Content-Length': '0',
- 'Content-Type': 'foo/bar',
- 'X-Delete-At': t})
- self.app.update_request(req)
- resp = controller.PUT(req)
- self.assertEquals(resp.status_int, 400)
- self.assertTrue('X-Delete-At in past' in resp.body)
-
+ with mock.patch('swift.proxy.controllers.base.get_info',
+ fake_container_info):
+ resp = self.app.handle_request(req)
+ self.assertEquals(200, resp.status_int)
+ expected = [('GET', '/a/c-versions')] * 2 + \
+ [('GET', '/a/c-versions/old_version')] * 3 + \
+ [('PUT', '/a/c/current_version')] * 3 + \
+ [('DELETE', '/a/c-versions/old_version')] * 3
+ self.assertEqual(expected, [(m, p) for m, p, h in requests])
+ for method, path, headers in requests:
+ if 'current_version' in path:
+ expected_storage_policy = 1
+ elif 'old_version' in path:
+ expected_storage_policy = 0
+ else:
+ continue
+ storage_policy_index = \
+ int(headers['X-Backend-Storage-Policy-Index'])
+ self.assertEqual(
+ expected_storage_policy, storage_policy_index,
+ 'Unexpected %s request for %s '
+ 'with storage policy index %s' % (
+ method, path, storage_policy_index))
+
+ @unpatch_policies
def test_leak_1(self):
_request_instances = weakref.WeakKeyDictionary()
_orig_init = Request.__init__
@@ -3747,8 +5778,8 @@ class TestObjectController(unittest.TestCase):
def test_OPTIONS(self):
with save_globals():
- controller = proxy_server.ObjectController(self.app, 'a',
- 'c', 'o.jpg')
+ controller = ReplicatedObjectController(
+ self.app, 'a', 'c', 'o.jpg')
def my_empty_container_info(*args):
return {}
@@ -3855,7 +5886,8 @@ class TestObjectController(unittest.TestCase):
def test_CORS_valid(self):
with save_globals():
- controller = proxy_server.ObjectController(self.app, 'a', 'c', 'o')
+ controller = ReplicatedObjectController(
+ self.app, 'a', 'c', 'o')
def stubContainerInfo(*args):
return {
@@ -3908,7 +5940,8 @@ class TestObjectController(unittest.TestCase):
def test_CORS_valid_with_obj_headers(self):
with save_globals():
- controller = proxy_server.ObjectController(self.app, 'a', 'c', 'o')
+ controller = ReplicatedObjectController(
+ self.app, 'a', 'c', 'o')
def stubContainerInfo(*args):
return {
@@ -3969,20 +6002,21 @@ class TestObjectController(unittest.TestCase):
def test_PUT_x_container_headers_with_equal_replicas(self):
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '5'}, body='12345')
- controller = proxy_server.ObjectController(self.app, 'a', 'c', 'o')
+ controller = ReplicatedObjectController(
+ self.app, 'a', 'c', 'o')
seen_headers = self._gather_x_container_headers(
controller.PUT, req,
200, 200, 201, 201, 201) # HEAD HEAD PUT PUT PUT
self.assertEqual(
seen_headers, [
{'X-Container-Host': '10.0.0.0:1000',
- 'X-Container-Partition': '1',
+ 'X-Container-Partition': '0',
'X-Container-Device': 'sda'},
{'X-Container-Host': '10.0.0.1:1001',
- 'X-Container-Partition': '1',
+ 'X-Container-Partition': '0',
'X-Container-Device': 'sdb'},
{'X-Container-Host': '10.0.0.2:1002',
- 'X-Container-Partition': '1',
+ 'X-Container-Partition': '0',
'X-Container-Device': 'sdc'}])
def test_PUT_x_container_headers_with_fewer_container_replicas(self):
@@ -3990,7 +6024,8 @@ class TestObjectController(unittest.TestCase):
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '5'}, body='12345')
- controller = proxy_server.ObjectController(self.app, 'a', 'c', 'o')
+ controller = ReplicatedObjectController(
+ self.app, 'a', 'c', 'o')
seen_headers = self._gather_x_container_headers(
controller.PUT, req,
200, 200, 201, 201, 201) # HEAD HEAD PUT PUT PUT
@@ -3998,10 +6033,10 @@ class TestObjectController(unittest.TestCase):
self.assertEqual(
seen_headers, [
{'X-Container-Host': '10.0.0.0:1000',
- 'X-Container-Partition': '1',
+ 'X-Container-Partition': '0',
'X-Container-Device': 'sda'},
{'X-Container-Host': '10.0.0.1:1001',
- 'X-Container-Partition': '1',
+ 'X-Container-Partition': '0',
'X-Container-Device': 'sdb'},
{'X-Container-Host': None,
'X-Container-Partition': None,
@@ -4012,7 +6047,8 @@ class TestObjectController(unittest.TestCase):
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '5'}, body='12345')
- controller = proxy_server.ObjectController(self.app, 'a', 'c', 'o')
+ controller = ReplicatedObjectController(
+ self.app, 'a', 'c', 'o')
seen_headers = self._gather_x_container_headers(
controller.PUT, req,
200, 200, 201, 201, 201) # HEAD HEAD PUT PUT PUT
@@ -4020,13 +6056,13 @@ class TestObjectController(unittest.TestCase):
self.assertEqual(
seen_headers, [
{'X-Container-Host': '10.0.0.0:1000,10.0.0.3:1003',
- 'X-Container-Partition': '1',
+ 'X-Container-Partition': '0',
'X-Container-Device': 'sda,sdd'},
{'X-Container-Host': '10.0.0.1:1001',
- 'X-Container-Partition': '1',
+ 'X-Container-Partition': '0',
'X-Container-Device': 'sdb'},
{'X-Container-Host': '10.0.0.2:1002',
- 'X-Container-Partition': '1',
+ 'X-Container-Partition': '0',
'X-Container-Device': 'sdc'}])
def test_POST_x_container_headers_with_more_container_replicas(self):
@@ -4036,7 +6072,8 @@ class TestObjectController(unittest.TestCase):
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'POST'},
headers={'Content-Type': 'application/stuff'})
- controller = proxy_server.ObjectController(self.app, 'a', 'c', 'o')
+ controller = ReplicatedObjectController(
+ self.app, 'a', 'c', 'o')
seen_headers = self._gather_x_container_headers(
controller.POST, req,
200, 200, 200, 200, 200) # HEAD HEAD POST POST POST
@@ -4044,13 +6081,13 @@ class TestObjectController(unittest.TestCase):
self.assertEqual(
seen_headers, [
{'X-Container-Host': '10.0.0.0:1000,10.0.0.3:1003',
- 'X-Container-Partition': '1',
+ 'X-Container-Partition': '0',
'X-Container-Device': 'sda,sdd'},
{'X-Container-Host': '10.0.0.1:1001',
- 'X-Container-Partition': '1',
+ 'X-Container-Partition': '0',
'X-Container-Device': 'sdb'},
{'X-Container-Host': '10.0.0.2:1002',
- 'X-Container-Partition': '1',
+ 'X-Container-Partition': '0',
'X-Container-Device': 'sdc'}])
def test_DELETE_x_container_headers_with_more_container_replicas(self):
@@ -4059,20 +6096,21 @@ class TestObjectController(unittest.TestCase):
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'DELETE'},
headers={'Content-Type': 'application/stuff'})
- controller = proxy_server.ObjectController(self.app, 'a', 'c', 'o')
+ controller = ReplicatedObjectController(
+ self.app, 'a', 'c', 'o')
seen_headers = self._gather_x_container_headers(
controller.DELETE, req,
200, 200, 200, 200, 200) # HEAD HEAD DELETE DELETE DELETE
self.assertEqual(seen_headers, [
{'X-Container-Host': '10.0.0.0:1000,10.0.0.3:1003',
- 'X-Container-Partition': '1',
+ 'X-Container-Partition': '0',
'X-Container-Device': 'sda,sdd'},
{'X-Container-Host': '10.0.0.1:1001',
- 'X-Container-Partition': '1',
+ 'X-Container-Partition': '0',
'X-Container-Device': 'sdb'},
{'X-Container-Host': '10.0.0.2:1002',
- 'X-Container-Partition': '1',
+ 'X-Container-Partition': '0',
'X-Container-Device': 'sdc'}
])
@@ -4081,15 +6119,15 @@ class TestObjectController(unittest.TestCase):
self.app.container_ring.set_replicas(2)
delete_at_timestamp = int(time.time()) + 100000
- delete_at_container = str(
- delete_at_timestamp /
- self.app.expiring_objects_container_divisor *
- self.app.expiring_objects_container_divisor)
+ delete_at_container = utils.get_expirer_container(
+ delete_at_timestamp, self.app.expiring_objects_container_divisor,
+ 'a', 'c', 'o')
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Type': 'application/stuff',
'Content-Length': '0',
'X-Delete-At': str(delete_at_timestamp)})
- controller = proxy_server.ObjectController(self.app, 'a', 'c', 'o')
+ controller = ReplicatedObjectController(
+ self.app, 'a', 'c', 'o')
seen_headers = self._gather_x_container_headers(
controller.PUT, req,
200, 200, 201, 201, 201, # HEAD HEAD PUT PUT PUT
@@ -4099,11 +6137,11 @@ class TestObjectController(unittest.TestCase):
self.assertEqual(seen_headers, [
{'X-Delete-At-Host': '10.0.0.0:1000',
'X-Delete-At-Container': delete_at_container,
- 'X-Delete-At-Partition': '1',
+ 'X-Delete-At-Partition': '0',
'X-Delete-At-Device': 'sda'},
{'X-Delete-At-Host': '10.0.0.1:1001',
'X-Delete-At-Container': delete_at_container,
- 'X-Delete-At-Partition': '1',
+ 'X-Delete-At-Partition': '0',
'X-Delete-At-Device': 'sdb'},
{'X-Delete-At-Host': None,
'X-Delete-At-Container': None,
@@ -4118,15 +6156,15 @@ class TestObjectController(unittest.TestCase):
self.app.expiring_objects_container_divisor = 60
delete_at_timestamp = int(time.time()) + 100000
- delete_at_container = str(
- delete_at_timestamp /
- self.app.expiring_objects_container_divisor *
- self.app.expiring_objects_container_divisor)
+ delete_at_container = utils.get_expirer_container(
+ delete_at_timestamp, self.app.expiring_objects_container_divisor,
+ 'a', 'c', 'o')
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Type': 'application/stuff',
'Content-Length': 0,
'X-Delete-At': str(delete_at_timestamp)})
- controller = proxy_server.ObjectController(self.app, 'a', 'c', 'o')
+ controller = ReplicatedObjectController(
+ self.app, 'a', 'c', 'o')
seen_headers = self._gather_x_container_headers(
controller.PUT, req,
200, 200, 201, 201, 201, # HEAD HEAD PUT PUT PUT
@@ -4135,38 +6173,483 @@ class TestObjectController(unittest.TestCase):
self.assertEqual(seen_headers, [
{'X-Delete-At-Host': '10.0.0.0:1000,10.0.0.3:1003',
'X-Delete-At-Container': delete_at_container,
- 'X-Delete-At-Partition': '1',
+ 'X-Delete-At-Partition': '0',
'X-Delete-At-Device': 'sda,sdd'},
{'X-Delete-At-Host': '10.0.0.1:1001',
'X-Delete-At-Container': delete_at_container,
- 'X-Delete-At-Partition': '1',
+ 'X-Delete-At-Partition': '0',
'X-Delete-At-Device': 'sdb'},
{'X-Delete-At-Host': '10.0.0.2:1002',
'X-Delete-At-Container': delete_at_container,
- 'X-Delete-At-Partition': '1',
+ 'X-Delete-At-Partition': '0',
'X-Delete-At-Device': 'sdc'}
])
+class TestECMismatchedFA(unittest.TestCase):
+ def tearDown(self):
+ prosrv = _test_servers[0]
+ # don't leak error limits and poison other tests
+ prosrv._error_limiting = {}
+
+ def test_mixing_different_objects_fragment_archives(self):
+ (prosrv, acc1srv, acc2srv, con1srv, con2srv, obj1srv,
+ obj2srv, obj3srv) = _test_servers
+ ec_policy = POLICIES[3]
+
+ @public
+ def bad_disk(req):
+ return Response(status=507, body="borken")
+
+ ensure_container = Request.blank(
+ "/v1/a/ec-crazytown",
+ environ={"REQUEST_METHOD": "PUT"},
+ headers={"X-Storage-Policy": "ec", "X-Auth-Token": "t"})
+ resp = ensure_container.get_response(prosrv)
+ self.assertTrue(resp.status_int in (201, 202))
+
+ obj1 = "first version..."
+ put_req1 = Request.blank(
+ "/v1/a/ec-crazytown/obj",
+ environ={"REQUEST_METHOD": "PUT"},
+ headers={"X-Auth-Token": "t"})
+ put_req1.body = obj1
+
+ obj2 = u"versiĆ³n segundo".encode("utf-8")
+ put_req2 = Request.blank(
+ "/v1/a/ec-crazytown/obj",
+ environ={"REQUEST_METHOD": "PUT"},
+ headers={"X-Auth-Token": "t"})
+ put_req2.body = obj2
+
+ # pyeclib has checks for unequal-length; we don't want to trip those
+ self.assertEqual(len(obj1), len(obj2))
+
+ # Servers obj1 and obj2 will have the first version of the object
+ prosrv._error_limiting = {}
+ with nested(
+ mock.patch.object(obj3srv, 'PUT', bad_disk),
+ mock.patch(
+ 'swift.common.storage_policy.ECStoragePolicy.quorum')):
+ type(ec_policy).quorum = mock.PropertyMock(return_value=2)
+ resp = put_req1.get_response(prosrv)
+ self.assertEqual(resp.status_int, 201)
+
+ # Server obj3 (and, in real life, some handoffs) will have the
+ # second version of the object.
+ prosrv._error_limiting = {}
+ with nested(
+ mock.patch.object(obj1srv, 'PUT', bad_disk),
+ mock.patch.object(obj2srv, 'PUT', bad_disk),
+ mock.patch(
+ 'swift.common.storage_policy.ECStoragePolicy.quorum'),
+ mock.patch(
+ 'swift.proxy.controllers.base.Controller._quorum_size',
+ lambda *a, **kw: 1)):
+ type(ec_policy).quorum = mock.PropertyMock(return_value=1)
+ resp = put_req2.get_response(prosrv)
+ self.assertEqual(resp.status_int, 201)
+
+ # A GET that only sees 1 fragment archive should fail
+ get_req = Request.blank("/v1/a/ec-crazytown/obj",
+ environ={"REQUEST_METHOD": "GET"},
+ headers={"X-Auth-Token": "t"})
+ prosrv._error_limiting = {}
+ with nested(
+ mock.patch.object(obj1srv, 'GET', bad_disk),
+ mock.patch.object(obj2srv, 'GET', bad_disk)):
+ resp = get_req.get_response(prosrv)
+ self.assertEqual(resp.status_int, 503)
+
+ # A GET that sees 2 matching FAs will work
+ get_req = Request.blank("/v1/a/ec-crazytown/obj",
+ environ={"REQUEST_METHOD": "GET"},
+ headers={"X-Auth-Token": "t"})
+ prosrv._error_limiting = {}
+ with mock.patch.object(obj3srv, 'GET', bad_disk):
+ resp = get_req.get_response(prosrv)
+ self.assertEqual(resp.status_int, 200)
+ self.assertEqual(resp.body, obj1)
+
+ # A GET that sees 2 mismatching FAs will fail
+ get_req = Request.blank("/v1/a/ec-crazytown/obj",
+ environ={"REQUEST_METHOD": "GET"},
+ headers={"X-Auth-Token": "t"})
+ prosrv._error_limiting = {}
+ with mock.patch.object(obj2srv, 'GET', bad_disk):
+ resp = get_req.get_response(prosrv)
+ self.assertEqual(resp.status_int, 503)
+
+
+class TestObjectECRangedGET(unittest.TestCase):
+ def setUp(self):
+ self.app = proxy_server.Application(
+ None, FakeMemcache(),
+ logger=debug_logger('proxy-ut'),
+ account_ring=FakeRing(),
+ container_ring=FakeRing())
+
+ @classmethod
+ def setUpClass(cls):
+ cls.obj_name = 'range-get-test'
+ cls.tiny_obj_name = 'range-get-test-tiny'
+ cls.aligned_obj_name = 'range-get-test-aligned'
+
+ # Note: only works if called with unpatched policies
+ prolis = _test_sockets[0]
+ sock = connect_tcp(('localhost', prolis.getsockname()[1]))
+ fd = sock.makefile()
+ fd.write('PUT /v1/a/ec-con HTTP/1.1\r\n'
+ 'Host: localhost\r\n'
+ 'Connection: close\r\n'
+ 'Content-Length: 0\r\n'
+ 'X-Storage-Token: t\r\n'
+ 'X-Storage-Policy: ec\r\n'
+ '\r\n')
+ fd.flush()
+ headers = readuntil2crlfs(fd)
+ exp = 'HTTP/1.1 2'
+ assert headers[:len(exp)] == exp, "container PUT failed"
+
+ seg_size = POLICIES.get_by_name("ec").ec_segment_size
+ cls.seg_size = seg_size
+ # EC segment size is 4 KiB, hence this gives 4 segments, which we
+ # then verify with a quick sanity check
+ cls.obj = ' my hovercraft is full of eels '.join(
+ str(s) for s in range(431))
+ assert seg_size * 4 > len(cls.obj) > seg_size * 3, \
+ "object is wrong number of segments"
+
+ cls.tiny_obj = 'tiny, tiny object'
+ assert len(cls.tiny_obj) < seg_size, "tiny_obj too large"
+
+ cls.aligned_obj = "".join(
+ "abcdEFGHijkl%04d" % x for x in range(512))
+ assert len(cls.aligned_obj) % seg_size == 0, "aligned obj not aligned"
+
+ for obj_name, obj in ((cls.obj_name, cls.obj),
+ (cls.tiny_obj_name, cls.tiny_obj),
+ (cls.aligned_obj_name, cls.aligned_obj)):
+ sock = connect_tcp(('localhost', prolis.getsockname()[1]))
+ fd = sock.makefile()
+ fd.write('PUT /v1/a/ec-con/%s HTTP/1.1\r\n'
+ 'Host: localhost\r\n'
+ 'Connection: close\r\n'
+ 'Content-Length: %d\r\n'
+ 'X-Storage-Token: t\r\n'
+ 'Content-Type: application/octet-stream\r\n'
+ '\r\n%s' % (obj_name, len(obj), obj))
+ fd.flush()
+ headers = readuntil2crlfs(fd)
+ exp = 'HTTP/1.1 201'
+ assert headers[:len(exp)] == exp, \
+ "object PUT failed %s" % obj_name
+
+ def _get_obj(self, range_value, obj_name=None):
+ if obj_name is None:
+ obj_name = self.obj_name
+
+ prolis = _test_sockets[0]
+ sock = connect_tcp(('localhost', prolis.getsockname()[1]))
+ fd = sock.makefile()
+ fd.write('GET /v1/a/ec-con/%s HTTP/1.1\r\n'
+ 'Host: localhost\r\n'
+ 'Connection: close\r\n'
+ 'X-Storage-Token: t\r\n'
+ 'Range: %s\r\n'
+ '\r\n' % (obj_name, range_value))
+ fd.flush()
+ headers = readuntil2crlfs(fd)
+ # e.g. "HTTP/1.1 206 Partial Content\r\n..."
+ status_code = int(headers[9:12])
+ headers = parse_headers_string(headers)
+
+ gotten_obj = ''
+ while True:
+ buf = fd.read(64)
+ if not buf:
+ break
+ gotten_obj += buf
+
+ return (status_code, headers, gotten_obj)
+
+ def test_unaligned(self):
+ # One segment's worth of data, but straddling two segment boundaries
+ # (so it has data from three segments)
+ status, headers, gotten_obj = self._get_obj("bytes=3783-7878")
+ self.assertEqual(status, 206)
+ self.assertEqual(headers['Content-Length'], "4096")
+ self.assertEqual(headers['Content-Range'], "bytes 3783-7878/14513")
+ self.assertEqual(len(gotten_obj), 4096)
+ self.assertEqual(gotten_obj, self.obj[3783:7879])
+
+ def test_aligned_left(self):
+ # First byte is aligned to a segment boundary, last byte is not
+ status, headers, gotten_obj = self._get_obj("bytes=0-5500")
+ self.assertEqual(status, 206)
+ self.assertEqual(headers['Content-Length'], "5501")
+ self.assertEqual(headers['Content-Range'], "bytes 0-5500/14513")
+ self.assertEqual(len(gotten_obj), 5501)
+ self.assertEqual(gotten_obj, self.obj[:5501])
+
+ def test_aligned_range(self):
+ # Ranged GET that wants exactly one segment
+ status, headers, gotten_obj = self._get_obj("bytes=4096-8191")
+ self.assertEqual(status, 206)
+ self.assertEqual(headers['Content-Length'], "4096")
+ self.assertEqual(headers['Content-Range'], "bytes 4096-8191/14513")
+ self.assertEqual(len(gotten_obj), 4096)
+ self.assertEqual(gotten_obj, self.obj[4096:8192])
+
+ def test_aligned_range_end(self):
+ # Ranged GET that wants exactly the last segment
+ status, headers, gotten_obj = self._get_obj("bytes=12288-14512")
+ self.assertEqual(status, 206)
+ self.assertEqual(headers['Content-Length'], "2225")
+ self.assertEqual(headers['Content-Range'], "bytes 12288-14512/14513")
+ self.assertEqual(len(gotten_obj), 2225)
+ self.assertEqual(gotten_obj, self.obj[12288:])
+
+ def test_aligned_range_aligned_obj(self):
+ # Ranged GET that wants exactly the last segment, which is full-size
+ status, headers, gotten_obj = self._get_obj("bytes=4096-8191",
+ self.aligned_obj_name)
+ self.assertEqual(status, 206)
+ self.assertEqual(headers['Content-Length'], "4096")
+ self.assertEqual(headers['Content-Range'], "bytes 4096-8191/8192")
+ self.assertEqual(len(gotten_obj), 4096)
+ self.assertEqual(gotten_obj, self.aligned_obj[4096:8192])
+
+ def test_byte_0(self):
+ # Just the first byte, but it's index 0, so that's easy to get wrong
+ status, headers, gotten_obj = self._get_obj("bytes=0-0")
+ self.assertEqual(status, 206)
+ self.assertEqual(headers['Content-Length'], "1")
+ self.assertEqual(headers['Content-Range'], "bytes 0-0/14513")
+ self.assertEqual(gotten_obj, self.obj[0])
+
+ def test_unsatisfiable(self):
+ # Goes just one byte too far off the end of the object, so it's
+ # unsatisfiable
+ status, _junk, _junk = self._get_obj(
+ "bytes=%d-%d" % (len(self.obj), len(self.obj) + 100))
+ self.assertEqual(status, 416)
+
+ def test_off_end(self):
+ # Ranged GET that's mostly off the end of the object, but overlaps
+ # it in just the last byte
+ status, headers, gotten_obj = self._get_obj(
+ "bytes=%d-%d" % (len(self.obj) - 1, len(self.obj) + 100))
+ self.assertEqual(status, 206)
+ self.assertEqual(headers['Content-Length'], '1')
+ self.assertEqual(headers['Content-Range'], 'bytes 14512-14512/14513')
+ self.assertEqual(gotten_obj, self.obj[-1])
+
+ def test_aligned_off_end(self):
+ # Ranged GET that starts on a segment boundary but asks for a whole lot
+ status, headers, gotten_obj = self._get_obj(
+ "bytes=%d-%d" % (8192, len(self.obj) + 100))
+ self.assertEqual(status, 206)
+ self.assertEqual(headers['Content-Length'], '6321')
+ self.assertEqual(headers['Content-Range'], 'bytes 8192-14512/14513')
+ self.assertEqual(gotten_obj, self.obj[8192:])
+
+ def test_way_off_end(self):
+ # Ranged GET that's mostly off the end of the object, but overlaps
+ # it in just the last byte, and wants multiple segments' worth off
+ # the end
+ status, headers, gotten_obj = self._get_obj(
+ "bytes=%d-%d" % (len(self.obj) - 1, len(self.obj) * 1000))
+ self.assertEqual(status, 206)
+ self.assertEqual(headers['Content-Length'], '1')
+ self.assertEqual(headers['Content-Range'], 'bytes 14512-14512/14513')
+ self.assertEqual(gotten_obj, self.obj[-1])
+
+ def test_boundaries(self):
+ # Wants the last byte of segment 1 + the first byte of segment 2
+ status, headers, gotten_obj = self._get_obj("bytes=4095-4096")
+ self.assertEqual(status, 206)
+ self.assertEqual(headers['Content-Length'], '2')
+ self.assertEqual(headers['Content-Range'], 'bytes 4095-4096/14513')
+ self.assertEqual(gotten_obj, self.obj[4095:4097])
+
+ def test_until_end(self):
+ # Wants the last byte of segment 1 + the rest
+ status, headers, gotten_obj = self._get_obj("bytes=4095-")
+ self.assertEqual(status, 206)
+ self.assertEqual(headers['Content-Length'], '10418')
+ self.assertEqual(headers['Content-Range'], 'bytes 4095-14512/14513')
+ self.assertEqual(gotten_obj, self.obj[4095:])
+
+ def test_small_suffix(self):
+ # Small range-suffix GET: the last 100 bytes (less than one segment)
+ status, headers, gotten_obj = self._get_obj("bytes=-100")
+ self.assertEqual(status, 206)
+ self.assertEqual(headers['Content-Length'], '100')
+ self.assertEqual(headers['Content-Range'], 'bytes 14413-14512/14513')
+ self.assertEqual(len(gotten_obj), 100)
+ self.assertEqual(gotten_obj, self.obj[-100:])
+
+ def test_small_suffix_aligned(self):
+ # Small range-suffix GET: the last 100 bytes, last segment is
+ # full-size
+ status, headers, gotten_obj = self._get_obj("bytes=-100",
+ self.aligned_obj_name)
+ self.assertEqual(status, 206)
+ self.assertEqual(headers['Content-Length'], '100')
+ self.assertEqual(headers['Content-Range'], 'bytes 8092-8191/8192')
+ self.assertEqual(len(gotten_obj), 100)
+
+ def test_suffix_two_segs(self):
+ # Ask for enough data that we need the last two segments. The last
+ # segment is short, though, so this ensures we compensate for that.
+ #
+ # Note that the total range size is less than one full-size segment.
+ suffix_len = len(self.obj) % self.seg_size + 1
+
+ status, headers, gotten_obj = self._get_obj("bytes=-%d" % suffix_len)
+ self.assertEqual(status, 206)
+ self.assertEqual(headers['Content-Length'], str(suffix_len))
+ self.assertEqual(headers['Content-Range'],
+ 'bytes %d-%d/%d' % (len(self.obj) - suffix_len,
+ len(self.obj) - 1,
+ len(self.obj)))
+ self.assertEqual(len(gotten_obj), suffix_len)
+
+ def test_large_suffix(self):
+ # Large range-suffix GET: the last 5000 bytes (more than one segment)
+ status, headers, gotten_obj = self._get_obj("bytes=-5000")
+ self.assertEqual(status, 206)
+ self.assertEqual(headers['Content-Length'], '5000')
+ self.assertEqual(headers['Content-Range'], 'bytes 9513-14512/14513')
+ self.assertEqual(len(gotten_obj), 5000)
+ self.assertEqual(gotten_obj, self.obj[-5000:])
+
+ def test_overlarge_suffix(self):
+ # The last N+1 bytes of an N-byte object
+ status, headers, gotten_obj = self._get_obj(
+ "bytes=-%d" % (len(self.obj) + 1))
+ self.assertEqual(status, 206)
+ self.assertEqual(headers['Content-Length'], '14513')
+ self.assertEqual(headers['Content-Range'], 'bytes 0-14512/14513')
+ self.assertEqual(len(gotten_obj), len(self.obj))
+ self.assertEqual(gotten_obj, self.obj)
+
+ def test_small_suffix_tiny_object(self):
+ status, headers, gotten_obj = self._get_obj(
+ "bytes=-5", self.tiny_obj_name)
+ self.assertEqual(status, 206)
+ self.assertEqual(headers['Content-Length'], '5')
+ self.assertEqual(headers['Content-Range'], 'bytes 12-16/17')
+ self.assertEqual(gotten_obj, self.tiny_obj[12:])
+
+ def test_overlarge_suffix_tiny_object(self):
+ status, headers, gotten_obj = self._get_obj(
+ "bytes=-1234567890", self.tiny_obj_name)
+ self.assertEqual(status, 206)
+ self.assertEqual(headers['Content-Length'], '17')
+ self.assertEqual(headers['Content-Range'], 'bytes 0-16/17')
+ self.assertEqual(len(gotten_obj), len(self.tiny_obj))
+ self.assertEqual(gotten_obj, self.tiny_obj)
+
+
+@patch_policies([
+ StoragePolicy(0, 'zero', True, object_ring=FakeRing(base_port=3000)),
+ StoragePolicy(1, 'one', False, object_ring=FakeRing(base_port=3000)),
+ StoragePolicy(2, 'two', False, True, object_ring=FakeRing(base_port=3000))
+])
class TestContainerController(unittest.TestCase):
"Test swift.proxy_server.ContainerController"
def setUp(self):
- self.app = proxy_server.Application(None, FakeMemcache(),
- account_ring=FakeRing(),
- container_ring=FakeRing(),
- object_ring=FakeRing(),
- logger=FakeLogger())
+ self.app = proxy_server.Application(
+ None, FakeMemcache(),
+ account_ring=FakeRing(),
+ container_ring=FakeRing(base_port=2000),
+ logger=debug_logger())
+
+ def test_convert_policy_to_index(self):
+ controller = swift.proxy.controllers.ContainerController(self.app,
+ 'a', 'c')
+ expected = {
+ 'zero': 0,
+ 'ZeRo': 0,
+ 'one': 1,
+ 'OnE': 1,
+ }
+ for name, index in expected.items():
+ req = Request.blank('/a/c', headers={'Content-Length': '0',
+ 'Content-Type': 'text/plain',
+ 'X-Storage-Policy': name})
+ self.assertEqual(controller._convert_policy_to_index(req), index)
+ # default test
+ req = Request.blank('/a/c', headers={'Content-Length': '0',
+ 'Content-Type': 'text/plain'})
+ self.assertEqual(controller._convert_policy_to_index(req), None)
+ # negative test
+ req = Request.blank('/a/c', headers={'Content-Length': '0',
+ 'Content-Type': 'text/plain',
+ 'X-Storage-Policy': 'nada'})
+ self.assertRaises(HTTPException, controller._convert_policy_to_index,
+ req)
+ # storage policy two is deprecated
+ req = Request.blank('/a/c', headers={'Content-Length': '0',
+ 'Content-Type': 'text/plain',
+ 'X-Storage-Policy': 'two'})
+ self.assertRaises(HTTPException, controller._convert_policy_to_index,
+ req)
+
+ def test_convert_index_to_name(self):
+ policy = random.choice(list(POLICIES))
+ req = Request.blank('/v1/a/c')
+ with mocked_http_conn(
+ 200, 200,
+ headers={'X-Backend-Storage-Policy-Index': int(policy)},
+ ) as fake_conn:
+ resp = req.get_response(self.app)
+ self.assertRaises(StopIteration, fake_conn.code_iter.next)
+ self.assertEqual(resp.status_int, 200)
+ self.assertEqual(resp.headers['X-Storage-Policy'], policy.name)
+
+ def test_no_convert_index_to_name_when_container_not_found(self):
+ policy = random.choice(list(POLICIES))
+ req = Request.blank('/v1/a/c')
+ with mocked_http_conn(
+ 200, 404, 404, 404,
+ headers={'X-Backend-Storage-Policy-Index':
+ int(policy)}) as fake_conn:
+ resp = req.get_response(self.app)
+ self.assertRaises(StopIteration, fake_conn.code_iter.next)
+ self.assertEqual(resp.status_int, 404)
+ self.assertEqual(resp.headers['X-Storage-Policy'], None)
+
+ def test_error_convert_index_to_name(self):
+ req = Request.blank('/v1/a/c')
+ with mocked_http_conn(
+ 200, 200,
+ headers={'X-Backend-Storage-Policy-Index': '-1'}) as fake_conn:
+ resp = req.get_response(self.app)
+ self.assertRaises(StopIteration, fake_conn.code_iter.next)
+ self.assertEqual(resp.status_int, 200)
+ self.assertEqual(resp.headers['X-Storage-Policy'], None)
+ error_lines = self.app.logger.get_lines_for_level('error')
+ self.assertEqual(2, len(error_lines))
+ for msg in error_lines:
+ expected = "Could not translate " \
+ "X-Backend-Storage-Policy-Index ('-1')"
+ self.assertTrue(expected in msg)
def test_transfer_headers(self):
src_headers = {'x-remove-versions-location': 'x',
- 'x-container-read': '*:user'}
+ 'x-container-read': '*:user',
+ 'x-remove-container-sync-key': 'x'}
dst_headers = {'x-versions-location': 'backup'}
controller = swift.proxy.controllers.ContainerController(self.app,
'a', 'c')
controller.transfer_headers(src_headers, dst_headers)
expected_headers = {'x-versions-location': '',
- 'x-container-read': '*:user'}
+ 'x-container-read': '*:user',
+ 'x-container-sync-key': ''}
self.assertEqual(dst_headers, expected_headers)
def assert_status_map(self, method, statuses, expected,
@@ -4249,22 +6732,78 @@ class TestContainerController(unittest.TestCase):
# return 200 and cache 200 for and container
test_status_map((200, 200, 404, 404), 200, 200, 200)
test_status_map((200, 200, 500, 404), 200, 200, 200)
- # return 304 dont cache container
+ # return 304 don't cache container
test_status_map((200, 304, 500, 404), 304, None, 200)
# return 404 and cache 404 for container
test_status_map((200, 404, 404, 404), 404, 404, 200)
test_status_map((200, 404, 404, 500), 404, 404, 200)
- # return 503, dont cache container
+ # return 503, don't cache container
test_status_map((200, 500, 500, 500), 503, None, 200)
self.assertFalse(self.app.account_autocreate)
# In all the following tests cache 404 for account
- # return 404 (as account is not found) and dont cache container
+ # return 404 (as account is not found) and don't cache container
test_status_map((404, 404, 404), 404, None, 404)
# This should make no difference
self.app.account_autocreate = True
test_status_map((404, 404, 404), 404, None, 404)
+ def test_PUT_policy_headers(self):
+ backend_requests = []
+
+ def capture_requests(ipaddr, port, device, partition, method,
+ path, headers=None, query_string=None):
+ if method == 'PUT':
+ backend_requests.append(headers)
+
+ def test_policy(requested_policy):
+ with save_globals():
+ mock_conn = set_http_connect(200, 201, 201, 201,
+ give_connect=capture_requests)
+ self.app.memcache.store = {}
+ req = Request.blank('/v1/a/test', method='PUT',
+ headers={'Content-Length': 0})
+ if requested_policy:
+ expected_policy = requested_policy
+ req.headers['X-Storage-Policy'] = policy.name
+ else:
+ expected_policy = POLICIES.default
+ res = req.get_response(self.app)
+ if expected_policy.is_deprecated:
+ self.assertEquals(res.status_int, 400)
+ self.assertEqual(0, len(backend_requests))
+ expected = 'is deprecated'
+ self.assertTrue(expected in res.body,
+ '%r did not include %r' % (
+ res.body, expected))
+ return
+ self.assertEquals(res.status_int, 201)
+ self.assertEqual(
+ expected_policy.object_ring.replicas,
+ len(backend_requests))
+ for headers in backend_requests:
+ if not requested_policy:
+ self.assertFalse('X-Backend-Storage-Policy-Index' in
+ headers)
+ self.assertTrue(
+ 'X-Backend-Storage-Policy-Default' in headers)
+ self.assertEqual(
+ int(expected_policy),
+ int(headers['X-Backend-Storage-Policy-Default']))
+ else:
+ self.assertTrue('X-Backend-Storage-Policy-Index' in
+ headers)
+ self.assertEqual(int(headers
+ ['X-Backend-Storage-Policy-Index']),
+ int(policy))
+ # make sure all mocked responses are consumed
+ self.assertRaises(StopIteration, mock_conn.code_iter.next)
+
+ test_policy(None) # no policy header
+ for policy in POLICIES:
+ backend_requests = [] # reset backend requests
+ test_policy(policy)
+
def test_PUT(self):
with save_globals():
controller = proxy_server.ContainerController(self.app, 'account',
@@ -4323,6 +6862,47 @@ class TestContainerController(unittest.TestCase):
503, 201, 201), # put container success
201, missing_container=True)
+ def test_PUT_autocreate_account_with_sysmeta(self):
+ # x-account-sysmeta headers in a container PUT request should be
+ # transferred to the account autocreate PUT request
+ with save_globals():
+ controller = proxy_server.ContainerController(self.app, 'account',
+ 'container')
+
+ def test_status_map(statuses, expected, headers=None, **kwargs):
+ set_http_connect(*statuses, **kwargs)
+ self.app.memcache.store = {}
+ req = Request.blank('/v1/a/c', {}, headers=headers)
+ req.content_length = 0
+ self.app.update_request(req)
+ res = controller.PUT(req)
+ expected = str(expected)
+ self.assertEquals(res.status[:len(expected)], expected)
+
+ self.app.account_autocreate = True
+ calls = []
+ callback = _make_callback_func(calls)
+ key, value = 'X-Account-Sysmeta-Blah', 'something'
+ headers = {key: value}
+
+ # all goes according to plan
+ test_status_map(
+ (404, 404, 404, # account_info fails on 404
+ 201, 201, 201, # PUT account
+ 200, # account_info success
+ 201, 201, 201), # put container success
+ 201, missing_container=True,
+ headers=headers,
+ give_connect=callback)
+
+ self.assertEqual(10, len(calls))
+ for call in calls[3:6]:
+ self.assertEqual('/account', call['path'])
+ self.assertTrue(key in call['headers'],
+ '%s call, key %s missing in headers %s' %
+ (call['method'], key, call['headers']))
+ self.assertEqual(value, call['headers'][key])
+
def test_POST(self):
with save_globals():
controller = proxy_server.ContainerController(self.app, 'account',
@@ -4359,7 +6939,14 @@ class TestContainerController(unittest.TestCase):
self.app.max_containers_per_account = 12345
controller = proxy_server.ContainerController(self.app, 'account',
'container')
- self.assert_status_map(controller.PUT, (201, 201, 201), 403,
+ self.assert_status_map(controller.PUT,
+ (200, 200, 201, 201, 201), 201,
+ missing_container=True)
+
+ controller = proxy_server.ContainerController(self.app, 'account',
+ 'container_new')
+
+ self.assert_status_map(controller.PUT, (200, 404, 404, 404), 403,
missing_container=True)
self.app.max_containers_per_account = 12345
@@ -4372,7 +6959,7 @@ class TestContainerController(unittest.TestCase):
def test_PUT_max_container_name_length(self):
with save_globals():
- limit = MAX_CONTAINER_NAME_LENGTH
+ limit = constraints.MAX_CONTAINER_NAME_LENGTH
controller = proxy_server.ContainerController(self.app, 'account',
'1' * limit)
self.assert_status_map(controller.PUT,
@@ -4398,9 +6985,7 @@ class TestContainerController(unittest.TestCase):
for meth in ('DELETE', 'PUT'):
with save_globals():
self.app.memcache = FakeMemcacheReturnsNone()
- for dev in self.app.account_ring.devs.values():
- del dev['errors']
- del dev['last_error']
+ self.app._error_limiting = {}
controller = proxy_server.ContainerController(self.app,
'account',
'container')
@@ -4437,9 +7022,10 @@ class TestContainerController(unittest.TestCase):
resp = getattr(controller, meth)(req)
self.assertEquals(resp.status_int, 404)
- for dev in self.app.account_ring.devs.values():
- dev['errors'] = self.app.error_suppression_limit + 1
- dev['last_error'] = time.time()
+ for dev in self.app.account_ring.devs:
+ set_node_errors(self.app, dev,
+ self.app.error_suppression_limit + 1,
+ time.time())
set_http_connect(200, 200, 200, 200, 200, 200)
# Make sure it is a blank request wthout env caching
req = Request.blank('/v1/a/c',
@@ -4477,19 +7063,26 @@ class TestContainerController(unittest.TestCase):
with save_globals():
controller = proxy_server.ContainerController(self.app, 'account',
'container')
+ container_ring = controller.app.container_ring
controller.app.sort_nodes = lambda l: l
self.assert_status_map(controller.HEAD, (200, 503, 200, 200), 200,
missing_container=False)
+
self.assertEquals(
- controller.app.container_ring.devs[0]['errors'], 2)
- self.assert_('last_error' in controller.app.container_ring.devs[0])
+ node_error_count(controller.app, container_ring.devs[0]), 2)
+ self.assert_(
+ node_last_error(controller.app, container_ring.devs[0])
+ is not None)
for _junk in xrange(self.app.error_suppression_limit):
self.assert_status_map(controller.HEAD,
(200, 503, 503, 503), 503)
- self.assertEquals(controller.app.container_ring.devs[0]['errors'],
- self.app.error_suppression_limit + 1)
+ self.assertEquals(
+ node_error_count(controller.app, container_ring.devs[0]),
+ self.app.error_suppression_limit + 1)
self.assert_status_map(controller.HEAD, (200, 200, 200, 200), 503)
- self.assert_('last_error' in controller.app.container_ring.devs[0])
+ self.assert_(
+ node_last_error(controller.app, container_ring.devs[0])
+ is not None)
self.assert_status_map(controller.PUT, (200, 201, 201, 201), 503,
missing_container=True)
self.assert_status_map(controller.DELETE,
@@ -4606,14 +7199,15 @@ class TestContainerController(unittest.TestCase):
set_http_connect(201, 201, 201)
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': method},
headers={'X-Container-Meta-' +
- ('a' * MAX_META_NAME_LENGTH): 'v'})
+ ('a' * constraints.MAX_META_NAME_LENGTH): 'v'})
self.app.update_request(req)
resp = getattr(controller, method)(req)
self.assertEquals(resp.status_int, 201)
set_http_connect(201, 201, 201)
- req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': method},
- headers={'X-Container-Meta-' +
- ('a' * (MAX_META_NAME_LENGTH + 1)): 'v'})
+ req = Request.blank(
+ '/v1/a/c', environ={'REQUEST_METHOD': method},
+ headers={'X-Container-Meta-' +
+ ('a' * (constraints.MAX_META_NAME_LENGTH + 1)): 'v'})
self.app.update_request(req)
resp = getattr(controller, method)(req)
self.assertEquals(resp.status_int, 400)
@@ -4621,21 +7215,21 @@ class TestContainerController(unittest.TestCase):
set_http_connect(201, 201, 201)
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': method},
headers={'X-Container-Meta-Too-Long':
- 'a' * MAX_META_VALUE_LENGTH})
+ 'a' * constraints.MAX_META_VALUE_LENGTH})
self.app.update_request(req)
resp = getattr(controller, method)(req)
self.assertEquals(resp.status_int, 201)
set_http_connect(201, 201, 201)
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': method},
headers={'X-Container-Meta-Too-Long':
- 'a' * (MAX_META_VALUE_LENGTH + 1)})
+ 'a' * (constraints.MAX_META_VALUE_LENGTH + 1)})
self.app.update_request(req)
resp = getattr(controller, method)(req)
self.assertEquals(resp.status_int, 400)
set_http_connect(201, 201, 201)
headers = {}
- for x in xrange(MAX_META_COUNT):
+ for x in xrange(constraints.MAX_META_COUNT):
headers['X-Container-Meta-%d' % x] = 'v'
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': method},
headers=headers)
@@ -4644,7 +7238,7 @@ class TestContainerController(unittest.TestCase):
self.assertEquals(resp.status_int, 201)
set_http_connect(201, 201, 201)
headers = {}
- for x in xrange(MAX_META_COUNT + 1):
+ for x in xrange(constraints.MAX_META_COUNT + 1):
headers['X-Container-Meta-%d' % x] = 'v'
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': method},
headers=headers)
@@ -4654,16 +7248,17 @@ class TestContainerController(unittest.TestCase):
set_http_connect(201, 201, 201)
headers = {}
- header_value = 'a' * MAX_META_VALUE_LENGTH
+ header_value = 'a' * constraints.MAX_META_VALUE_LENGTH
size = 0
x = 0
- while size < MAX_META_OVERALL_SIZE - 4 - MAX_META_VALUE_LENGTH:
- size += 4 + MAX_META_VALUE_LENGTH
+ while size < (constraints.MAX_META_OVERALL_SIZE - 4
+ - constraints.MAX_META_VALUE_LENGTH):
+ size += 4 + constraints.MAX_META_VALUE_LENGTH
headers['X-Container-Meta-%04d' % x] = header_value
x += 1
- if MAX_META_OVERALL_SIZE - size > 1:
+ if constraints.MAX_META_OVERALL_SIZE - size > 1:
headers['X-Container-Meta-a'] = \
- 'a' * (MAX_META_OVERALL_SIZE - size - 1)
+ 'a' * (constraints.MAX_META_OVERALL_SIZE - size - 1)
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': method},
headers=headers)
self.app.update_request(req)
@@ -4671,7 +7266,7 @@ class TestContainerController(unittest.TestCase):
self.assertEquals(resp.status_int, 201)
set_http_connect(201, 201, 201)
headers['X-Container-Meta-a'] = \
- 'a' * (MAX_META_OVERALL_SIZE - size)
+ 'a' * (constraints.MAX_META_OVERALL_SIZE - size)
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': method},
headers=headers)
self.app.update_request(req)
@@ -4781,6 +7376,78 @@ class TestContainerController(unittest.TestCase):
controller.HEAD(req)
self.assert_(called[0])
+ def test_unauthorized_requests_when_account_not_found(self):
+ # verify unauthorized container requests always return response
+ # from swift.authorize
+ called = [0, 0]
+
+ def authorize(req):
+ called[0] += 1
+ return HTTPUnauthorized(request=req)
+
+ def account_info(*args):
+ called[1] += 1
+ return None, None, None
+
+ def _do_test(method):
+ with save_globals():
+ swift.proxy.controllers.Controller.account_info = account_info
+ app = proxy_server.Application(None, FakeMemcache(),
+ account_ring=FakeRing(),
+ container_ring=FakeRing())
+ set_http_connect(201, 201, 201)
+ req = Request.blank('/v1/a/c', {'REQUEST_METHOD': method})
+ req.environ['swift.authorize'] = authorize
+ self.app.update_request(req)
+ res = app.handle_request(req)
+ return res
+
+ for method in ('PUT', 'POST', 'DELETE'):
+ # no delay_denial on method, expect one call to authorize
+ called = [0, 0]
+ res = _do_test(method)
+ self.assertEqual(401, res.status_int)
+ self.assertEqual([1, 0], called)
+
+ for method in ('HEAD', 'GET'):
+ # delay_denial on method, expect two calls to authorize
+ called = [0, 0]
+ res = _do_test(method)
+ self.assertEqual(401, res.status_int)
+ self.assertEqual([2, 1], called)
+
+ def test_authorized_requests_when_account_not_found(self):
+ # verify authorized container requests always return 404 when
+ # account not found
+ called = [0, 0]
+
+ def authorize(req):
+ called[0] += 1
+
+ def account_info(*args):
+ called[1] += 1
+ return None, None, None
+
+ def _do_test(method):
+ with save_globals():
+ swift.proxy.controllers.Controller.account_info = account_info
+ app = proxy_server.Application(None, FakeMemcache(),
+ account_ring=FakeRing(),
+ container_ring=FakeRing())
+ set_http_connect(201, 201, 201)
+ req = Request.blank('/v1/a/c', {'REQUEST_METHOD': method})
+ req.environ['swift.authorize'] = authorize
+ self.app.update_request(req)
+ res = app.handle_request(req)
+ return res
+
+ for method in ('PUT', 'POST', 'DELETE', 'HEAD', 'GET'):
+ # expect one call to authorize
+ called = [0, 0]
+ res = _do_test(method)
+ self.assertEqual(404, res.status_int)
+ self.assertEqual([1, 1], called)
+
def test_OPTIONS_get_info_drops_origin(self):
with save_globals():
controller = proxy_server.ContainerController(self.app, 'a', 'c')
@@ -5008,10 +7675,10 @@ class TestContainerController(unittest.TestCase):
200, 201, 201, 201) # HEAD PUT PUT PUT
self.assertEqual(seen_headers, [
{'X-Account-Host': '10.0.0.0:1000',
- 'X-Account-Partition': '1',
+ 'X-Account-Partition': '0',
'X-Account-Device': 'sda'},
{'X-Account-Host': '10.0.0.1:1001',
- 'X-Account-Partition': '1',
+ 'X-Account-Partition': '0',
'X-Account-Device': 'sdb'},
{'X-Account-Host': None,
'X-Account-Partition': None,
@@ -5028,13 +7695,13 @@ class TestContainerController(unittest.TestCase):
200, 201, 201, 201) # HEAD PUT PUT PUT
self.assertEqual(seen_headers, [
{'X-Account-Host': '10.0.0.0:1000,10.0.0.3:1003',
- 'X-Account-Partition': '1',
+ 'X-Account-Partition': '0',
'X-Account-Device': 'sda,sdd'},
{'X-Account-Host': '10.0.0.1:1001',
- 'X-Account-Partition': '1',
+ 'X-Account-Partition': '0',
'X-Account-Device': 'sdb'},
{'X-Account-Host': '10.0.0.2:1002',
- 'X-Account-Partition': '1',
+ 'X-Account-Partition': '0',
'X-Account-Device': 'sdc'}
])
@@ -5048,10 +7715,10 @@ class TestContainerController(unittest.TestCase):
200, 204, 204, 204) # HEAD DELETE DELETE DELETE
self.assertEqual(seen_headers, [
{'X-Account-Host': '10.0.0.0:1000',
- 'X-Account-Partition': '1',
+ 'X-Account-Partition': '0',
'X-Account-Device': 'sda'},
{'X-Account-Host': '10.0.0.1:1001',
- 'X-Account-Partition': '1',
+ 'X-Account-Partition': '0',
'X-Account-Device': 'sdb'},
{'X-Account-Host': None,
'X-Account-Partition': None,
@@ -5068,13 +7735,13 @@ class TestContainerController(unittest.TestCase):
200, 204, 204, 204) # HEAD DELETE DELETE DELETE
self.assertEqual(seen_headers, [
{'X-Account-Host': '10.0.0.0:1000,10.0.0.3:1003',
- 'X-Account-Partition': '1',
+ 'X-Account-Partition': '0',
'X-Account-Device': 'sda,sdd'},
{'X-Account-Host': '10.0.0.1:1001',
- 'X-Account-Partition': '1',
+ 'X-Account-Partition': '0',
'X-Account-Device': 'sdb'},
{'X-Account-Host': '10.0.0.2:1002',
- 'X-Account-Partition': '1',
+ 'X-Account-Partition': '0',
'X-Account-Device': 'sdc'}
])
@@ -5087,7 +7754,7 @@ class TestContainerController(unittest.TestCase):
req = Request.blank('/v1/a/c', method='PUT', headers={'': ''})
with save_globals():
- new_connect = set_http_connect(200, # account existance check
+ new_connect = set_http_connect(200, # account existence check
201, 201, 201,
give_connect=capture_timestamps)
resp = self.app.handle_request(req)
@@ -5096,7 +7763,7 @@ class TestContainerController(unittest.TestCase):
self.assertRaises(StopIteration, new_connect.code_iter.next)
self.assertEqual(2, resp.status_int // 100)
- timestamps.pop(0) # account existance check
+ timestamps.pop(0) # account existence check
self.assertEqual(3, len(timestamps))
for timestamp in timestamps:
self.assertEqual(timestamp, timestamps[0])
@@ -5112,7 +7779,7 @@ class TestContainerController(unittest.TestCase):
req = Request.blank('/v1/a/c', method='DELETE', headers={'': ''})
self.app.update_request(req)
with save_globals():
- new_connect = set_http_connect(200, # account existance check
+ new_connect = set_http_connect(200, # account existence check
201, 201, 201,
give_connect=capture_timestamps)
resp = self.app.handle_request(req)
@@ -5121,7 +7788,7 @@ class TestContainerController(unittest.TestCase):
self.assertRaises(StopIteration, new_connect.code_iter.next)
self.assertEqual(2, resp.status_int // 100)
- timestamps.pop(0) # account existance check
+ timestamps.pop(0) # account existence check
self.assertEqual(3, len(timestamps))
for timestamp in timestamps:
self.assertEqual(timestamp, timestamps[0])
@@ -5141,18 +7808,20 @@ class TestContainerController(unittest.TestCase):
self.assert_(got_exc)
+@patch_policies([StoragePolicy(0, 'zero', True, object_ring=FakeRing())])
class TestAccountController(unittest.TestCase):
def setUp(self):
self.app = proxy_server.Application(None, FakeMemcache(),
account_ring=FakeRing(),
- container_ring=FakeRing(),
- object_ring=FakeRing)
+ container_ring=FakeRing())
- def assert_status_map(self, method, statuses, expected, env_expected=None):
+ def assert_status_map(self, method, statuses, expected, env_expected=None,
+ headers=None, **kwargs):
+ headers = headers or {}
with save_globals():
- set_http_connect(*statuses)
- req = Request.blank('/v1/a', {})
+ set_http_connect(*statuses, **kwargs)
+ req = Request.blank('/v1/a', {}, headers=headers)
self.app.update_request(req)
res = method(req)
self.assertEquals(res.status_int, expected)
@@ -5311,9 +7980,36 @@ class TestAccountController(unittest.TestCase):
controller.POST,
(404, 404, 404, 403, 403, 403, 400, 400, 400), 400)
+ def test_POST_autocreate_with_sysmeta(self):
+ with save_globals():
+ controller = proxy_server.AccountController(self.app, 'account')
+ self.app.memcache = FakeMemcacheReturnsNone()
+ # first test with autocreate being False
+ self.assertFalse(self.app.account_autocreate)
+ self.assert_status_map(controller.POST,
+ (404, 404, 404), 404)
+ # next turn it on and test account being created than updated
+ controller.app.account_autocreate = True
+ calls = []
+ callback = _make_callback_func(calls)
+ key, value = 'X-Account-Sysmeta-Blah', 'something'
+ headers = {key: value}
+ self.assert_status_map(
+ controller.POST,
+ (404, 404, 404, 202, 202, 202, 201, 201, 201), 201,
+ # POST , autocreate PUT, POST again
+ headers=headers,
+ give_connect=callback)
+ self.assertEqual(9, len(calls))
+ for call in calls:
+ self.assertTrue(key in call['headers'],
+ '%s call, key %s missing in headers %s' %
+ (call['method'], key, call['headers']))
+ self.assertEqual(value, call['headers'][key])
+
def test_connection_refused(self):
self.app.account_ring.get_nodes('account')
- for dev in self.app.account_ring.devs.values():
+ for dev in self.app.account_ring.devs:
dev['ip'] = '127.0.0.1'
dev['port'] = 1 # can't connect on this port
controller = proxy_server.AccountController(self.app, 'account')
@@ -5324,7 +8020,7 @@ class TestAccountController(unittest.TestCase):
def test_other_socket_error(self):
self.app.account_ring.get_nodes('account')
- for dev in self.app.account_ring.devs.values():
+ for dev in self.app.account_ring.devs:
dev['ip'] = '127.0.0.1'
dev['port'] = -1 # invalid port number
controller = proxy_server.AccountController(self.app, 'account')
@@ -5377,7 +8073,7 @@ class TestAccountController(unittest.TestCase):
def test_PUT_max_account_name_length(self):
with save_globals():
self.app.allow_account_management = True
- limit = MAX_ACCOUNT_NAME_LENGTH
+ limit = constraints.MAX_ACCOUNT_NAME_LENGTH
controller = proxy_server.AccountController(self.app, '1' * limit)
self.assert_status_map(controller.PUT, (201, 201, 201), 201)
controller = proxy_server.AccountController(
@@ -5392,6 +8088,12 @@ class TestAccountController(unittest.TestCase):
self.assert_status_map(controller.PUT, (201, -1, -1), 503)
self.assert_status_map(controller.PUT, (503, 503, -1), 503)
+ def test_PUT_status(self):
+ with save_globals():
+ self.app.allow_account_management = True
+ controller = proxy_server.AccountController(self.app, 'account')
+ self.assert_status_map(controller.PUT, (201, 201, 202), 202)
+
def test_PUT_metadata(self):
self.metadata_helper('PUT')
@@ -5452,14 +8154,15 @@ class TestAccountController(unittest.TestCase):
set_http_connect(201, 201, 201)
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': method},
headers={'X-Account-Meta-' +
- ('a' * MAX_META_NAME_LENGTH): 'v'})
+ ('a' * constraints.MAX_META_NAME_LENGTH): 'v'})
self.app.update_request(req)
resp = getattr(controller, method)(req)
self.assertEquals(resp.status_int, 201)
set_http_connect(201, 201, 201)
- req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': method},
- headers={'X-Account-Meta-' +
- ('a' * (MAX_META_NAME_LENGTH + 1)): 'v'})
+ req = Request.blank(
+ '/v1/a/c', environ={'REQUEST_METHOD': method},
+ headers={'X-Account-Meta-' +
+ ('a' * (constraints.MAX_META_NAME_LENGTH + 1)): 'v'})
self.app.update_request(req)
resp = getattr(controller, method)(req)
self.assertEquals(resp.status_int, 400)
@@ -5467,21 +8170,21 @@ class TestAccountController(unittest.TestCase):
set_http_connect(201, 201, 201)
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': method},
headers={'X-Account-Meta-Too-Long':
- 'a' * MAX_META_VALUE_LENGTH})
+ 'a' * constraints.MAX_META_VALUE_LENGTH})
self.app.update_request(req)
resp = getattr(controller, method)(req)
self.assertEquals(resp.status_int, 201)
set_http_connect(201, 201, 201)
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': method},
headers={'X-Account-Meta-Too-Long':
- 'a' * (MAX_META_VALUE_LENGTH + 1)})
+ 'a' * (constraints.MAX_META_VALUE_LENGTH + 1)})
self.app.update_request(req)
resp = getattr(controller, method)(req)
self.assertEquals(resp.status_int, 400)
set_http_connect(201, 201, 201)
headers = {}
- for x in xrange(MAX_META_COUNT):
+ for x in xrange(constraints.MAX_META_COUNT):
headers['X-Account-Meta-%d' % x] = 'v'
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': method},
headers=headers)
@@ -5490,7 +8193,7 @@ class TestAccountController(unittest.TestCase):
self.assertEquals(resp.status_int, 201)
set_http_connect(201, 201, 201)
headers = {}
- for x in xrange(MAX_META_COUNT + 1):
+ for x in xrange(constraints.MAX_META_COUNT + 1):
headers['X-Account-Meta-%d' % x] = 'v'
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': method},
headers=headers)
@@ -5500,16 +8203,17 @@ class TestAccountController(unittest.TestCase):
set_http_connect(201, 201, 201)
headers = {}
- header_value = 'a' * MAX_META_VALUE_LENGTH
+ header_value = 'a' * constraints.MAX_META_VALUE_LENGTH
size = 0
x = 0
- while size < MAX_META_OVERALL_SIZE - 4 - MAX_META_VALUE_LENGTH:
- size += 4 + MAX_META_VALUE_LENGTH
+ while size < (constraints.MAX_META_OVERALL_SIZE - 4
+ - constraints.MAX_META_VALUE_LENGTH):
+ size += 4 + constraints.MAX_META_VALUE_LENGTH
headers['X-Account-Meta-%04d' % x] = header_value
x += 1
- if MAX_META_OVERALL_SIZE - size > 1:
+ if constraints.MAX_META_OVERALL_SIZE - size > 1:
headers['X-Account-Meta-a'] = \
- 'a' * (MAX_META_OVERALL_SIZE - size - 1)
+ 'a' * (constraints.MAX_META_OVERALL_SIZE - size - 1)
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': method},
headers=headers)
self.app.update_request(req)
@@ -5517,7 +8221,7 @@ class TestAccountController(unittest.TestCas