summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPeter Portante <peter.portante@redhat.com>2012-10-18 15:34:57 -0400
committerPeter Portante <peter.portante@redhat.com>2013-04-29 16:35:55 -0400
commitc61aea1464475923690cb6180cb0c484748de775 (patch)
tree461094dc01f6c5d94ab778f04d1e1182fb608fcc
parenta3a03e0ed84433c4801a320a7a81c0baeaf7bc08 (diff)
object-storage: reduce the number of getxattr system calls by one
Initial step towards addressing BZ 865619. Prior to the patch, reading metadata for a given object or container involves at minimum three getxattr system calls for objects that use only one xattr key/value pair: 1. (via pyxattr) getxattr() to see if key exists and get its value length (so it can allocate memory for second call below) 2. (from pyxattr) getxattr() to get actual value data 3. (via pyxattr) getxattr() to see if following key exists For objects and containers that only have to use one xattr key/value pair, this patch reduces the number system calls by one. This can be significant given that almost every Swift API operation requires reads of the object or containers metadata. This patch is mostly a change to plugins.utils.read_metadata() to try to unpickle the accumulated metadata as each key/value pair is read, rather than trying to accumulate all the key/value pairs and unpickle at the end. Once we get enough data to form the pickle, we no longer keep trying to get more keys, even if those keys exist. The extra keys can exist when the size of the stored metadata shrinks below a key threshold. See https://bugzilla.redhat.com/show_bug.cgi?id=865619 for more details. See also https://bugzilla.redhat.com/show_bug.cgi?id=865858. This patch also adds a unit test infrastructure, and uses it to test with full coverage, the read_metadata, write_metadata and clean_metadata functions. As a result, we found that an infinite loop would occur in clean_metadata() when an unexpected IOError would occur trying to remove a key (this patch contains a fix). Change-Id: Ia1838c5e73af453b65360c1c525824231aa7c5d4 BUG: 865619 Signed-off-by: Peter Portante <peter.portante@redhat.com> Reviewed-on: http://review.gluster.org/4109 Reviewed-by: Kaleb KEITHLEY <kkeithle@redhat.com> Reviewed-by: Jeff Darcy <jdarcy@redhat.com> Tested-by: Anand Avati <avati@redhat.com>
-rwxr-xr-x.unittests6
-rw-r--r--swift/1.4.8/plugins/utils.py56
-rw-r--r--swift/1.4.8/test/__init__.py1
-rw-r--r--swift/1.4.8/test/unit/__init__.py0
-rw-r--r--swift/1.4.8/test/unit/plugins/__init__.py0
-rw-r--r--swift/1.4.8/test/unit/plugins/test_utils.py231
6 files changed, 281 insertions, 13 deletions
diff --git a/.unittests b/.unittests
new file mode 100755
index 0000000..5be925d
--- /dev/null
+++ b/.unittests
@@ -0,0 +1,6 @@
+#!/bin/bash
+
+cd swift/1.4.8/test/unit
+nosetests --exe --with-coverage --cover-package plugins --cover-erase $@
+rm -f .coverage
+cd -
diff --git a/swift/1.4.8/plugins/utils.py b/swift/1.4.8/plugins/utils.py
index 61112c8..72b2f38 100644
--- a/swift/1.4.8/plugins/utils.py
+++ b/swift/1.4.8/plugins/utils.py
@@ -190,34 +190,63 @@ def _add_timestamp(metadata_i):
def read_metadata(path):
"""
- Helper function to read the pickled metadata from a File/Directory .
+ Helper function to read the pickled metadata from a File/Directory.
:param path: File/Directory to read metadata from.
:returns: dictionary of metadata
"""
- metadata = ''
+ metadata = None
+ metadata_s = ''
key = 0
- while True:
+ while metadata is None:
try:
- metadata += xattr.get(path, '%s%s' % (METADATA_KEY, (key or '')))
+ metadata_s += xattr.get(path, '%s%s' % (METADATA_KEY, (key or '')))
except IOError as err:
- if err.errno != errno.ENODATA:
- logging.exception("xattr.get failed on %s key %s err: %s", path, key, str(err))
- break
- key += 1
- if metadata:
- return pickle.loads(metadata)
- else:
- return {}
+ if err.errno == errno.ENODATA:
+ if key > 0:
+ # No errors reading the xattr keys, but since we have not
+ # been able to find enough chunks to get a successful
+ # unpickle operation, we consider the metadata lost, and
+ # drop the existing data so that the internal state can be
+ # recreated.
+ clean_metadata(path)
+ # We either could not find any metadata key, or we could find
+ # some keys, but were not successful in performing the
+ # unpickling (missing keys perhaps)? Either way, just report
+ # to the caller we have no metadata.
+ metadata = {}
+ else:
+ logging.exception("xattr.get failed on %s key %s err: %s",
+ path, key, str(err))
+ # Note that we don't touch the keys on errors fetching the
+ # data since it could be a transient state.
+ raise
+ else:
+ try:
+ # If this key provides all or the remaining part of the pickle
+ # data, we don't need to keep searching for more keys. This
+ # means if we only need to store data in N xattr key/value
+ # pair, we only need to invoke xattr get N times. With large
+ # keys sizes we are shooting for N = 1.
+ metadata = pickle.loads(metadata_s)
+ assert isinstance(metadata, dict)
+ except EOFError, pickle.UnpicklingError:
+ # We still are not able recognize this existing data collected
+ # as a pickled object. Make sure we loop around to try to get
+ # more from another xattr key.
+ metadata = None
+ key += 1
+ return metadata
def write_metadata(path, metadata):
"""
Helper function to write pickled metadata for a File/Directory.
:param path: File/Directory path to write the metadata
- :param metadata: metadata to write
+ :param metadata: dictionary to metadata write
"""
+ assert isinstance(metadata, dict)
metastr = pickle.dumps(metadata, PICKLE_PROTOCOL)
key = 0
while metastr:
@@ -237,6 +266,7 @@ def clean_metadata(path):
except IOError as err:
if err.errno == errno.ENODATA:
break
+ raise
key += 1
def dir_empty(path):
diff --git a/swift/1.4.8/test/__init__.py b/swift/1.4.8/test/__init__.py
new file mode 100644
index 0000000..8b13789
--- /dev/null
+++ b/swift/1.4.8/test/__init__.py
@@ -0,0 +1 @@
+
diff --git a/swift/1.4.8/test/unit/__init__.py b/swift/1.4.8/test/unit/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/swift/1.4.8/test/unit/__init__.py
diff --git a/swift/1.4.8/test/unit/plugins/__init__.py b/swift/1.4.8/test/unit/plugins/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/swift/1.4.8/test/unit/plugins/__init__.py
diff --git a/swift/1.4.8/test/unit/plugins/test_utils.py b/swift/1.4.8/test/unit/plugins/test_utils.py
new file mode 100644
index 0000000..daecc42
--- /dev/null
+++ b/swift/1.4.8/test/unit/plugins/test_utils.py
@@ -0,0 +1,231 @@
+# Copyright (c) 2012 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+""" Tests for plugins.utils """
+
+import unittest
+import errno
+import xattr
+import cPickle as pickle
+from collections import defaultdict
+from plugins import utils
+
+#
+# Somewhat hacky way of emulating the operation of xattr calls. They are made
+# against a dictionary that stores the xattr key/value pairs.
+#
+_xattrs = {}
+_xattr_op_cnt = defaultdict(int)
+_xattr_err = {}
+
+def _xkey(path, key):
+ return "%s:%s" % (path, key)
+
+def _setxattr(path, key, value):
+ _xattr_op_cnt['set'] += 1
+ xkey = _xkey(path, key)
+ if xkey in _xattr_err:
+ e = IOError()
+ e.errno = _xattr_err[xkey]
+ raise e
+ global _xattrs
+ _xattrs[xkey] = value
+
+def _getxattr(path, key):
+ _xattr_op_cnt['get'] += 1
+ xkey = _xkey(path, key)
+ if xkey in _xattr_err:
+ e = IOError()
+ e.errno = _xattr_err[xkey]
+ raise e
+ global _xattrs
+ if xkey in _xattrs:
+ ret_val = _xattrs[xkey]
+ else:
+ e = IOError("Fake IOError")
+ e.errno = errno.ENODATA
+ raise e
+ return ret_val
+
+def _removexattr(path, key):
+ _xattr_op_cnt['remove'] += 1
+ xkey = _xkey(path, key)
+ if xkey in _xattr_err:
+ e = IOError()
+ e.errno = _xattr_err[xkey]
+ raise e
+ global _xattrs
+ if xkey in _xattrs:
+ del _xattrs[xkey]
+ else:
+ e = IOError("Fake IOError")
+ e.errno = errno.ENODATA
+ raise e
+
+def _initxattr():
+ global _xattrs
+ _xattrs = {}
+ global _xattr_op_cnt
+ _xattr_op_cnt = defaultdict(int)
+ global _xattr_err
+ _xattr_err = {}
+
+ # Save the current methods
+ global _xattr_set; _xattr_set = xattr.set
+ global _xattr_get; _xattr_get = xattr.get
+ global _xattr_remove; _xattr_remove = xattr.remove
+
+ # Monkey patch the calls we use with our internal unit test versions
+ xattr.set = _setxattr
+ xattr.get = _getxattr
+ xattr.remove = _removexattr
+
+def _destroyxattr():
+ # Restore the current methods just in case
+ global _xattr_set; xattr.set = _xattr_set
+ global _xattr_get; xattr.get = _xattr_get
+ global _xattr_remove; xattr.remove = _xattr_remove
+ # Destroy the stored values and
+ global _xattrs; _xattrs = None
+
+
+class TestUtils(unittest.TestCase):
+ """ Tests for plugins.utils """
+
+ def setUp(self):
+ _initxattr()
+
+ def tearDown(self):
+ _destroyxattr()
+
+ def test_write_metadata(self):
+ path = "/tmp/foo/w"
+ orig_d = { 'bar' : 'foo' }
+ utils.write_metadata(path, orig_d)
+ xkey = _xkey(path, utils.METADATA_KEY)
+ assert len(_xattrs.keys()) == 1
+ assert xkey in _xattrs
+ assert orig_d == pickle.loads(_xattrs[xkey])
+ assert _xattr_op_cnt['set'] == 1
+
+ def test_write_metadata_err(self):
+ path = "/tmp/foo/w"
+ orig_d = { 'bar' : 'foo' }
+ xkey = _xkey(path, utils.METADATA_KEY)
+ _xattr_err[xkey] = errno.EOPNOTSUPP
+ try:
+ utils.write_metadata(path, orig_d)
+ except IOError as e:
+ assert e.errno == errno.EOPNOTSUPP
+ assert len(_xattrs.keys()) == 0
+ assert _xattr_op_cnt['set'] == 1
+ else:
+ self.fail("Expected an IOError exception on write")
+
+ def test_write_metadata_multiple(self):
+ # At 64 KB an xattr key/value pair, this should generate three keys.
+ path = "/tmp/foo/w"
+ orig_d = { 'bar' : 'x' * 150000 }
+ utils.write_metadata(path, orig_d)
+ assert len(_xattrs.keys()) == 3, "Expected 3 keys, found %d" % len(_xattrs.keys())
+ payload = ''
+ for i in range(0,3):
+ xkey = _xkey(path, "%s%s" % (utils.METADATA_KEY, i or ''))
+ assert xkey in _xattrs
+ assert len(_xattrs[xkey]) <= utils.MAX_XATTR_SIZE
+ payload += _xattrs[xkey]
+ assert orig_d == pickle.loads(payload)
+ assert _xattr_op_cnt['set'] == 3, "%r" % _xattr_op_cnt
+
+ def test_clean_metadata(self):
+ path = "/tmp/foo/c"
+ expected_d = { 'a': 'y' * 150000 }
+ expected_p = pickle.dumps(expected_d, utils.PICKLE_PROTOCOL)
+ for i in range(0,3):
+ xkey = _xkey(path, "%s%s" % (utils.METADATA_KEY, i or ''))
+ _xattrs[xkey] = expected_p[:utils.MAX_XATTR_SIZE]
+ expected_p = expected_p[utils.MAX_XATTR_SIZE:]
+ assert not expected_p
+ utils.clean_metadata(path)
+ assert _xattr_op_cnt['remove'] == 4, "%r" % _xattr_op_cnt
+
+ def test_clean_metadata_err(self):
+ path = "/tmp/foo/c"
+ xkey = _xkey(path, utils.METADATA_KEY)
+ _xattrs[xkey] = pickle.dumps({ 'a': 'y' }, utils.PICKLE_PROTOCOL)
+ _xattr_err[xkey] = errno.EOPNOTSUPP
+ try:
+ utils.clean_metadata(path)
+ except IOError as e:
+ assert e.errno == errno.EOPNOTSUPP
+ assert _xattr_op_cnt['remove'] == 1, "%r" % _xattr_op_cnt
+ else:
+ self.fail("Expected an IOError exception on remove")
+
+ def test_read_metadata(self):
+ path = "/tmp/foo/r"
+ expected_d = { 'a': 'y' }
+ xkey = _xkey(path, utils.METADATA_KEY)
+ _xattrs[xkey] = pickle.dumps(expected_d, utils.PICKLE_PROTOCOL)
+ res_d = utils.read_metadata(path)
+ assert res_d == expected_d, "Expected %r, result %r" % (expected_d, res_d)
+ assert _xattr_op_cnt['get'] == 1, "%r" % _xattr_op_cnt
+
+ def test_read_metadata_notfound(self):
+ path = "/tmp/foo/r"
+ res_d = utils.read_metadata(path)
+ assert res_d == {}
+ assert _xattr_op_cnt['get'] == 1, "%r" % _xattr_op_cnt
+
+ def test_read_metadata_err(self):
+ path = "/tmp/foo/r"
+ expected_d = { 'a': 'y' }
+ xkey = _xkey(path, utils.METADATA_KEY)
+ _xattrs[xkey] = pickle.dumps(expected_d, utils.PICKLE_PROTOCOL)
+ _xattr_err[xkey] = errno.EOPNOTSUPP
+ try:
+ res_d = utils.read_metadata(path)
+ except IOError as e:
+ assert e.errno == errno.EOPNOTSUPP
+ assert (_xattr_op_cnt['get'] == 1), "%r" % _xattr_op_cnt
+ else:
+ self.fail("Expected an IOError exception on get")
+
+ def test_read_metadata_multiple(self):
+ path = "/tmp/foo/r"
+ expected_d = { 'a': 'y' * 150000 }
+ expected_p = pickle.dumps(expected_d, utils.PICKLE_PROTOCOL)
+ for i in range(0,3):
+ xkey = _xkey(path, "%s%s" % (utils.METADATA_KEY, i or ''))
+ _xattrs[xkey] = expected_p[:utils.MAX_XATTR_SIZE]
+ expected_p = expected_p[utils.MAX_XATTR_SIZE:]
+ assert not expected_p
+ res_d = utils.read_metadata(path)
+ assert res_d == expected_d, "Expected %r, result %r" % (expected_d, res_d)
+ assert _xattr_op_cnt['get'] == 3, "%r" % _xattr_op_cnt
+
+ def test_read_metadata_multiple_one_missing(self):
+ path = "/tmp/foo/r"
+ expected_d = { 'a': 'y' * 150000 }
+ expected_p = pickle.dumps(expected_d, utils.PICKLE_PROTOCOL)
+ for i in range(0,2):
+ xkey = _xkey(path, "%s%s" % (utils.METADATA_KEY, i or ''))
+ _xattrs[xkey] = expected_p[:utils.MAX_XATTR_SIZE]
+ expected_p = expected_p[utils.MAX_XATTR_SIZE:]
+ assert len(expected_p) <= utils.MAX_XATTR_SIZE
+ res_d = utils.read_metadata(path)
+ assert res_d == {}
+ assert _xattr_op_cnt['get'] == 3, "%r" % _xattr_op_cnt
+ assert len(_xattrs.keys()) == 0, "Expected 0 keys, found %d" % len(_xattrs.keys())