From 513988915aa1af13a989d062b021fe1562cbf18d Mon Sep 17 00:00:00 2001 From: venkata edara Date: Wed, 10 May 2017 13:27:38 +0530 Subject: Rebase to Swift 2.10.1 (newton) Change-Id: I53a962c9a301089c8aed0b43c50f944c30225944 Signed-off-by: venkata edara Reviewed-on: https://review.gluster.org/16653 Reviewed-by: Prashanth Pai Tested-by: Prashanth Pai --- test/unit/__init__.py | 296 ++++++++++++++++++++++++++++++++++++++------------ 1 file changed, 224 insertions(+), 72 deletions(-) (limited to 'test/unit/__init__.py') diff --git a/test/unit/__init__.py b/test/unit/__init__.py index 372fb58..ee2a262 100644 --- a/test/unit/__init__.py +++ b/test/unit/__init__.py @@ -15,10 +15,12 @@ """ Swift tests """ +from __future__ import print_function import os import copy import logging import errno +from six.moves import range import sys from contextlib import contextmanager, closing from collections import defaultdict, Iterable @@ -30,20 +32,28 @@ import eventlet from eventlet.green import socket from tempfile import mkdtemp from shutil import rmtree -from swift.common.utils import Timestamp +import signal +import json + + +from swift.common.utils import Timestamp, NOTICE from test import get_config -from swift.common import swob, utils +from swift.common import utils +from swift.common.header_key_dict import HeaderKeyDict from swift.common.ring import Ring, RingData from hashlib import md5 import logging.handlers -from httplib import HTTPException + +from six.moves.http_client import HTTPException from swift.common import storage_policy -from swift.common.storage_policy import StoragePolicy, ECStoragePolicy +from swift.common.storage_policy import (StoragePolicy, ECStoragePolicy, + VALID_EC_TYPES) import functools -import cPickle as pickle +import six.moves.cPickle as pickle from gzip import GzipFile import mock as mocklib import inspect +from nose import SkipTest EMPTY_ETAG = md5().hexdigest() @@ -53,6 +63,22 @@ if not os.path.basename(sys.argv[0]).startswith('swift'): utils.HASH_PATH_SUFFIX = 'endcap' +EC_TYPE_PREFERENCE = [ + 'liberasurecode_rs_vand', + 'jerasure_rs_vand', +] +for eclib_name in EC_TYPE_PREFERENCE: + if eclib_name in VALID_EC_TYPES: + break +else: + raise SystemExit('ERROR: unable to find suitable PyECLib type' + ' (none of %r found in %r)' % ( + EC_TYPE_PREFERENCE, + VALID_EC_TYPES, + )) +DEFAULT_TEST_EC_TYPE = eclib_name + + def patch_policies(thing_or_policies=None, legacy_only=False, with_ec_default=False, fake_ring_args=None): if isinstance(thing_or_policies, ( @@ -67,7 +93,7 @@ def patch_policies(thing_or_policies=None, legacy_only=False, elif with_ec_default: default_policies = [ ECStoragePolicy(0, name='ec', is_default=True, - ec_type='jerasure_rs_vand', ec_ndata=10, + ec_type=DEFAULT_TEST_EC_TYPE, ec_ndata=10, ec_nparity=4, ec_segment_size=4096), StoragePolicy(1, name='unu'), ] @@ -183,13 +209,6 @@ class FakeRing(Ring): def __init__(self, replicas=3, max_more_nodes=0, part_power=0, base_port=1000): - """ - :param part_power: make part calculation based on the path - - If you set a part_power when you setup your FakeRing the parts you get - out of ring methods will actually be based on the path - otherwise we - exercise the real ring code, but ignore the result and return 1. - """ self._base_port = base_port self.max_more_nodes = max_more_nodes self._part_shift = 32 - part_power @@ -207,7 +226,8 @@ class FakeRing(Ring): for x in range(self.replicas): ip = '10.0.0.%s' % x port = self._base_port + x - self._devs.append({ + # round trip through json to ensure unicode like real rings + self._devs.append(json.loads(json.dumps({ 'ip': ip, 'replication_ip': ip, 'port': port, @@ -216,7 +236,7 @@ class FakeRing(Ring): 'zone': x % 3, 'region': x % 2, 'id': x, - }) + }))) @property def replica_count(self): @@ -226,9 +246,7 @@ class FakeRing(Ring): return [dict(node, index=i) for i, node in enumerate(list(self._devs))] def get_more_nodes(self, part): - # replicas^2 is the true cap - for x in xrange(self.replicas, min(self.replicas + self.max_more_nodes, - self.replicas * self.replicas)): + for x in range(self.replicas, (self.replicas + self.max_more_nodes)): yield {'ip': '10.0.0.%s' % x, 'replication_ip': '10.0.0.%s' % x, 'port': self._base_port + x, @@ -244,9 +262,9 @@ def write_fake_ring(path, *devs): Pretty much just a two node, two replica, 2 part power ring... """ dev1 = {'id': 0, 'zone': 0, 'device': 'sda1', 'ip': '127.0.0.1', - 'port': 6000} + 'port': 6200} dev2 = {'id': 0, 'zone': 0, 'device': 'sdb1', 'ip': '127.0.0.1', - 'port': 6000} + 'port': 6200} dev1_updates, dev2_updates = devs or ({}, {}) @@ -266,7 +284,7 @@ class FabricatedRing(Ring): your tests needs. """ - def __init__(self, replicas=6, devices=8, nodes=4, port=6000, + def __init__(self, replicas=6, devices=8, nodes=4, port=6200, part_power=4): self.devices = devices self.nodes = nodes @@ -459,6 +477,12 @@ class UnmockTimeModule(object): logging.time = UnmockTimeModule() +class WARN_DEPRECATED(Exception): + def __init__(self, msg): + self.msg = msg + print(self.msg) + + class FakeLogger(logging.Logger, object): # a thread safe fake logger @@ -478,8 +502,21 @@ class FakeLogger(logging.Logger, object): logging.INFO: 'info', logging.DEBUG: 'debug', logging.CRITICAL: 'critical', + NOTICE: 'notice', } + def warn(self, *args, **kwargs): + raise WARN_DEPRECATED("Deprecated Method warn use warning instead") + + def notice(self, msg, *args, **kwargs): + """ + Convenience function for syslog priority LOG_NOTICE. The python + logging lvl is set to 25, just above info. SysLogHandler is + monkey patched to map this log lvl to the LOG_NOTICE syslog + priority. + """ + self.log(NOTICE, msg, *args, **kwargs) + def _log(self, level, msg, *args, **kwargs): store_name = self.store_in[level] cargs = [msg] @@ -495,7 +532,9 @@ class FakeLogger(logging.Logger, object): def _clear(self): self.log_dict = defaultdict(list) self.lines_dict = {'critical': [], 'error': [], 'info': [], - 'warning': [], 'debug': []} + 'warning': [], 'debug': [], 'notice': []} + + clear = _clear # this is a public interface def get_lines_for_level(self, level): if level not in self.lines_dict: @@ -560,8 +599,8 @@ class FakeLogger(logging.Logger, object): try: line = record.getMessage() except TypeError: - print 'WARNING: unable to format log message %r %% %r' % ( - record.msg, record.args) + print('WARNING: unable to format log message %r %% %r' % ( + record.msg, record.args)) raise self.lines_dict[record.levelname.lower()].append(line) @@ -575,17 +614,24 @@ class FakeLogger(logging.Logger, object): pass +class DebugSwiftLogFormatter(utils.SwiftLogFormatter): + + def format(self, record): + msg = super(DebugSwiftLogFormatter, self).format(record) + return msg.replace('#012', '\n') + + class DebugLogger(FakeLogger): """A simple stdout logging version of FakeLogger""" def __init__(self, *args, **kwargs): FakeLogger.__init__(self, *args, **kwargs) - self.formatter = logging.Formatter( + self.formatter = DebugSwiftLogFormatter( "%(server)s %(levelname)s: %(message)s") def handle(self, record): self._handle(record) - print self.formatter.format(record) + print(self.formatter.format(record)) class DebugLogAdapter(utils.LogAdapter): @@ -704,6 +750,74 @@ def mock(update): delattr(module, attr) +class FakeStatus(object): + """ + This will work with our fake_http_connect, if you hand in one of these + instead of a status int or status int tuple to the "codes" iter you can + add some eventlet sleep to the expect and response stages of the + connection. + """ + + def __init__(self, status, expect_sleep=None, response_sleep=None): + """ + :param status: the response status int, or a tuple of + ([expect_status, ...], response_status) + :param expect_sleep: float, time to eventlet sleep during expect, can + be a iter of floats + :param response_sleep: float, time to eventlet sleep during response + """ + # connect exception + if isinstance(status, (Exception, eventlet.Timeout)): + raise status + if isinstance(status, tuple): + self.expect_status = list(status[:-1]) + self.status = status[-1] + self.explicit_expect_list = True + else: + self.expect_status, self.status = ([], status) + self.explicit_expect_list = False + if not self.expect_status: + # when a swift backend service returns a status before reading + # from the body (mostly an error response) eventlet.wsgi will + # respond with that status line immediately instead of 100 + # Continue, even if the client sent the Expect 100 header. + # BufferedHttp and the proxy both see these error statuses + # when they call getexpect, so our FakeConn tries to act like + # our backend services and return certain types of responses + # as expect statuses just like a real backend server would do. + if self.status in (507, 412, 409): + self.expect_status = [status] + else: + self.expect_status = [100, 100] + + # setup sleep attributes + if not isinstance(expect_sleep, (list, tuple)): + expect_sleep = [expect_sleep] * len(self.expect_status) + self.expect_sleep_list = list(expect_sleep) + while len(self.expect_sleep_list) < len(self.expect_status): + self.expect_sleep_list.append(None) + self.response_sleep = response_sleep + + def get_response_status(self): + if self.response_sleep is not None: + eventlet.sleep(self.response_sleep) + if self.expect_status and self.explicit_expect_list: + raise Exception('Test did not consume all fake ' + 'expect status: %r' % (self.expect_status,)) + if isinstance(self.status, (Exception, eventlet.Timeout)): + raise self.status + return self.status + + def get_expect_status(self): + expect_sleep = self.expect_sleep_list.pop(0) + if expect_sleep is not None: + eventlet.sleep(expect_sleep) + expect_status = self.expect_status.pop(0) + if isinstance(expect_status, (Exception, eventlet.Timeout)): + raise expect_status + return expect_status + + class SlowBody(object): """ This will work with our fake_http_connect, if you hand in these @@ -740,30 +854,10 @@ def fake_http_connect(*code_iter, **kwargs): def __init__(self, status, etag=None, body='', timestamp='1', headers=None, expect_headers=None, connection_id=None, - give_send=None): - # connect exception - if isinstance(status, (Exception, eventlet.Timeout)): - raise status - if isinstance(status, tuple): - self.expect_status = list(status[:-1]) - self.status = status[-1] - self.explicit_expect_list = True - else: - self.expect_status, self.status = ([], status) - self.explicit_expect_list = False - if not self.expect_status: - # when a swift backend service returns a status before reading - # from the body (mostly an error response) eventlet.wsgi will - # respond with that status line immediately instead of 100 - # Continue, even if the client sent the Expect 100 header. - # BufferedHttp and the proxy both see these error statuses - # when they call getexpect, so our FakeConn tries to act like - # our backend services and return certain types of responses - # as expect statuses just like a real backend server would do. - if self.status in (507, 412, 409): - self.expect_status = [status] - else: - self.expect_status = [100, 100] + give_send=None, give_expect=None): + if not isinstance(status, FakeStatus): + status = FakeStatus(status) + self._status = status self.reason = 'Fake' self.host = '1.2.3.4' self.port = '1234' @@ -776,6 +870,8 @@ def fake_http_connect(*code_iter, **kwargs): self.timestamp = timestamp self.connection_id = connection_id self.give_send = give_send + self.give_expect = give_expect + self.closed = False if 'slow' in kwargs and isinstance(kwargs['slow'], list): try: self._next_sleep = kwargs['slow'].pop(0) @@ -785,11 +881,6 @@ def fake_http_connect(*code_iter, **kwargs): eventlet.sleep() def getresponse(self): - if self.expect_status and self.explicit_expect_list: - raise Exception('Test did not consume all fake ' - 'expect status: %r' % (self.expect_status,)) - if isinstance(self.status, (Exception, eventlet.Timeout)): - raise self.status exc = kwargs.get('raise_exc') if exc: if isinstance(exc, (Exception, eventlet.Timeout)): @@ -797,16 +888,21 @@ def fake_http_connect(*code_iter, **kwargs): raise Exception('test') if kwargs.get('raise_timeout_exc'): raise eventlet.Timeout() + self.status = self._status.get_response_status() return self def getexpect(self): - expect_status = self.expect_status.pop(0) - if isinstance(self.expect_status, (Exception, eventlet.Timeout)): - raise self.expect_status + if self.give_expect: + self.give_expect(self) + expect_status = self._status.get_expect_status() headers = dict(self.expect_headers) if expect_status == 409: headers['X-Backend-Timestamp'] = self.timestamp - return FakeConn(expect_status, headers=headers) + response = FakeConn(expect_status, + timestamp=self.timestamp, + headers=headers) + response.status = expect_status + return response def getheaders(self): etag = self.etag @@ -816,7 +912,7 @@ def fake_http_connect(*code_iter, **kwargs): else: etag = '"68b329da9893e34099c7d8ad5cb9c940"' - headers = swob.HeaderKeyDict({ + headers = HeaderKeyDict({ 'content-length': len(self.body), 'content-type': 'x-application/test', 'x-timestamp': self.timestamp, @@ -834,7 +930,7 @@ def fake_http_connect(*code_iter, **kwargs): # when timestamp is None, HeaderKeyDict raises KeyError headers.pop('x-timestamp', None) try: - if container_ts_iter.next() is False: + if next(container_ts_iter) is False: headers['x-container-timestamp'] = '1' except StopIteration: pass @@ -865,9 +961,9 @@ def fake_http_connect(*code_iter, **kwargs): self.body = self.body[amt:] return rv - def send(self, amt=None): + def send(self, data=None): if self.give_send: - self.give_send(self.connection_id, amt) + self.give_send(self, data) am_slow, value = self.get_slow() if am_slow: if self.received < 4: @@ -875,10 +971,10 @@ def fake_http_connect(*code_iter, **kwargs): eventlet.sleep(value) def getheader(self, name, default=None): - return swob.HeaderKeyDict(self.getheaders()).get(name, default) + return HeaderKeyDict(self.getheaders()).get(name, default) def close(self): - pass + self.closed = True timestamps_iter = iter(kwargs.get('timestamps') or ['1'] * len(code_iter)) etag_iter = iter(kwargs.get('etags') or [None] * len(code_iter)) @@ -911,27 +1007,28 @@ def fake_http_connect(*code_iter, **kwargs): kwargs['give_content_type'](args[6]['Content-Type']) else: kwargs['give_content_type']('') - i, status = conn_id_and_code_iter.next() + i, status = next(conn_id_and_code_iter) if 'give_connect' in kwargs: give_conn_fn = kwargs['give_connect'] argspec = inspect.getargspec(give_conn_fn) if argspec.keywords or 'connection_id' in argspec.args: ckwargs['connection_id'] = i give_conn_fn(*args, **ckwargs) - etag = etag_iter.next() - headers = headers_iter.next() - expect_headers = expect_headers_iter.next() - timestamp = timestamps_iter.next() + etag = next(etag_iter) + headers = next(headers_iter) + expect_headers = next(expect_headers_iter) + timestamp = next(timestamps_iter) if status <= 0: raise HTTPException() if body_iter is None: body = static_body or '' else: - body = body_iter.next() + body = next(body_iter) return FakeConn(status, etag, body=body, timestamp=timestamp, headers=headers, expect_headers=expect_headers, - connection_id=i, give_send=kwargs.get('give_send')) + connection_id=i, give_send=kwargs.get('give_send'), + give_expect=kwargs.get('give_expect')) connect.code_iter = code_iter @@ -966,3 +1063,58 @@ def mocked_http_conn(*args, **kwargs): def make_timestamp_iter(): return iter(Timestamp(t) for t in itertools.count(int(time.time()))) + + +class Timeout(object): + def __init__(self, seconds): + self.seconds = seconds + + def __enter__(self): + signal.signal(signal.SIGALRM, self._exit) + signal.alarm(self.seconds) + + def __exit__(self, type, value, traceback): + signal.alarm(0) + + def _exit(self, signum, frame): + class TimeoutException(Exception): + pass + raise TimeoutException + + +def requires_o_tmpfile_support(func): + @functools.wraps(func) + def wrapper(*args, **kwargs): + if not utils.o_tmpfile_supported(): + raise SkipTest('Requires O_TMPFILE support') + return func(*args, **kwargs) + return wrapper + + +def encode_frag_archive_bodies(policy, body): + """ + Given a stub body produce a list of complete frag_archive bodies as + strings in frag_index order. + + :param policy: a StoragePolicy instance, with policy_type EC_POLICY + :param body: a string, the body to encode into frag archives + + :returns: list of strings, the complete frag_archive bodies for the given + plaintext + """ + segment_size = policy.ec_segment_size + # split up the body into buffers + chunks = [body[x:x + segment_size] + for x in range(0, len(body), segment_size)] + # encode the buffers into fragment payloads + fragment_payloads = [] + for chunk in chunks: + fragments = policy.pyeclib_driver.encode(chunk) + if not fragments: + break + fragment_payloads.append(fragments) + + # join up the fragment payloads per node + ec_archive_bodies = [''.join(frags) + for frags in zip(*fragment_payloads)] + return ec_archive_bodies -- cgit