summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--configure.ac2
-rw-r--r--xlators/cluster/Makefile.am2
-rw-r--r--xlators/cluster/ec/Makefile.am3
-rw-r--r--xlators/cluster/ec/src/Makefile.am49
-rw-r--r--xlators/cluster/ec/src/ec-combine.c787
-rw-r--r--xlators/cluster/ec/src/ec-combine.h44
-rw-r--r--xlators/cluster/ec/src/ec-common.c1109
-rw-r--r--xlators/cluster/ec/src/ec-common.h105
-rw-r--r--xlators/cluster/ec/src/ec-data.c261
-rw-r--r--xlators/cluster/ec/src/ec-data.h260
-rw-r--r--xlators/cluster/ec/src/ec-dir-read.c571
-rw-r--r--xlators/cluster/ec/src/ec-dir-write.c2102
-rw-r--r--xlators/cluster/ec/src/ec-fops.h211
-rw-r--r--xlators/cluster/ec/src/ec-generic.c1660
-rw-r--r--xlators/cluster/ec/src/ec-gf.c10120
-rw-r--r--xlators/cluster/ec/src/ec-gf.h114
-rw-r--r--xlators/cluster/ec/src/ec-heal.c1470
-rw-r--r--xlators/cluster/ec/src/ec-helpers.c594
-rw-r--r--xlators/cluster/ec/src/ec-helpers.h59
-rw-r--r--xlators/cluster/ec/src/ec-inode-read.c1764
-rw-r--r--xlators/cluster/ec/src/ec-inode-write.c2235
-rw-r--r--xlators/cluster/ec/src/ec-locks.c1292
-rw-r--r--xlators/cluster/ec/src/ec-mem-types.h39
-rw-r--r--xlators/cluster/ec/src/ec-method.c182
-rw-r--r--xlators/cluster/ec/src/ec-method.h42
-rw-r--r--xlators/cluster/ec/src/ec.c904
-rw-r--r--xlators/cluster/ec/src/ec.h54
27 files changed, 26034 insertions, 1 deletions
diff --git a/configure.ac b/configure.ac
index 8ea92b988b4..d7350912fd9 100644
--- a/configure.ac
+++ b/configure.ac
@@ -72,6 +72,8 @@ AC_CONFIG_FILES([Makefile
xlators/cluster/stripe/src/Makefile
xlators/cluster/dht/Makefile
xlators/cluster/dht/src/Makefile
+ xlators/cluster/ec/Makefile
+ xlators/cluster/ec/src/Makefile
xlators/performance/Makefile
xlators/performance/write-behind/Makefile
xlators/performance/write-behind/src/Makefile
diff --git a/xlators/cluster/Makefile.am b/xlators/cluster/Makefile.am
index 0990822a7d3..903fbb39f12 100644
--- a/xlators/cluster/Makefile.am
+++ b/xlators/cluster/Makefile.am
@@ -1,3 +1,3 @@
-SUBDIRS = stripe afr dht
+SUBDIRS = stripe afr dht ec
CLEANFILES =
diff --git a/xlators/cluster/ec/Makefile.am b/xlators/cluster/ec/Makefile.am
new file mode 100644
index 00000000000..d471a3f9243
--- /dev/null
+++ b/xlators/cluster/ec/Makefile.am
@@ -0,0 +1,3 @@
+SUBDIRS = src
+
+CLEANFILES =
diff --git a/xlators/cluster/ec/src/Makefile.am b/xlators/cluster/ec/src/Makefile.am
new file mode 100644
index 00000000000..e2a9330a944
--- /dev/null
+++ b/xlators/cluster/ec/src/Makefile.am
@@ -0,0 +1,49 @@
+xlator_LTLIBRARIES = ec.la
+xlatordir = $(libdir)/glusterfs/$(PACKAGE_VERSION)/xlator/cluster
+
+ec_sources := ec.c
+ec_sources += ec-data.c
+ec_sources += ec-helpers.c
+ec_sources += ec-common.c
+ec_sources += ec-generic.c
+ec_sources += ec-locks.c
+ec_sources += ec-dir-read.c
+ec_sources += ec-dir-write.c
+ec_sources += ec-inode-read.c
+ec_sources += ec-inode-write.c
+ec_sources += ec-combine.c
+ec_sources += ec-gf.c
+ec_sources += ec-method.c
+ec_sources += ec-heal.c
+
+ec_headers := ec.h
+ec_headers += ec-mem-types.h
+ec_headers += ec-helpers.h
+ec_headers += ec-data.h
+ec_headers += ec-fops.h
+ec_headers += ec-common.h
+ec_headers += ec-combine.h
+ec_headers += ec-gf.h
+ec_headers += ec-method.h
+
+ec_ext_sources = $(top_builddir)/xlators/lib/src/libxlator.c
+
+ec_ext_headers = $(top_builddir)/xlators/lib/src/libxlator.h
+
+ec_la_LDFLAGS = -module -avoid-version
+ec_la_SOURCES = $(ec_sources) $(ec_headers) $(ec_ext_sources) $(ec_ext_headers)
+ec_la_LIBADD = $(top_builddir)/libglusterfs/src/libglusterfs.la
+
+AM_CPPFLAGS = $(GF_CPPFLAGS)
+AM_CPPFLAGS += -I$(top_srcdir)/libglusterfs/src
+AM_CPPFLAGS += -I$(top_srcdir)/xlators/lib/src
+
+AM_CFLAGS = -Wall $(GF_CFLAGS)
+
+CLEANFILES =
+
+install-data-hook:
+ ln -sf ec.so $(DESTDIR)$(xlatordir)/disperse.so
+
+uninstall-local:
+ rm -f $(DESTDIR)$(xlatordir)/disperse.so
diff --git a/xlators/cluster/ec/src/ec-combine.c b/xlators/cluster/ec/src/ec-combine.c
new file mode 100644
index 00000000000..07d819a9a3d
--- /dev/null
+++ b/xlators/cluster/ec/src/ec-combine.c
@@ -0,0 +1,787 @@
+/*
+ Copyright (c) 2012 DataLab, s.l. <http://www.datalab.es>
+
+ This file is part of the cluster/ec translator for GlusterFS.
+
+ The cluster/ec translator for GlusterFS is free software: you can
+ redistribute it and/or modify it under the terms of the GNU General
+ Public License as published by the Free Software Foundation, either
+ version 3 of the License, or (at your option) any later version.
+
+ The cluster/ec translator for GlusterFS is distributed in the hope
+ that it will be useful, but WITHOUT ANY WARRANTY; without even the
+ implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+ PURPOSE. See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with the cluster/ec translator for GlusterFS. If not, see
+ <http://www.gnu.org/licenses/>.
+*/
+
+#include <fnmatch.h>
+
+#include "libxlator.h"
+
+#include "ec-data.h"
+#include "ec-helpers.h"
+#include "ec-common.h"
+#include "ec-combine.h"
+
+struct _ec_dict_info;
+typedef struct _ec_dict_info ec_dict_info_t;
+
+struct _ec_dict_combine;
+typedef struct _ec_dict_combine ec_dict_combine_t;
+
+struct _ec_dict_info
+{
+ dict_t * dict;
+ int32_t count;
+};
+
+struct _ec_dict_combine
+{
+ ec_cbk_data_t * cbk;
+ int32_t which;
+};
+
+void ec_iatt_time_merge(uint32_t * dst_sec, uint32_t * dst_nsec,
+ uint32_t src_sec, uint32_t src_nsec)
+{
+ if ((*dst_sec < src_sec) ||
+ ((*dst_sec == src_sec) && (*dst_nsec < src_nsec)))
+ {
+ *dst_sec = src_sec;
+ *dst_nsec = src_nsec;
+ }
+}
+
+int32_t ec_iatt_combine(struct iatt * dst, struct iatt * src, int32_t count)
+{
+ int32_t i;
+
+ for (i = 0; i < count; i++)
+ {
+ if ((dst->ia_ino != src->ia_ino) ||
+ (dst->ia_uid != src->ia_uid) ||
+ (dst->ia_gid != src->ia_gid) ||
+ (((dst->ia_type == IA_IFBLK) || (dst->ia_type == IA_IFCHR)) &&
+ (dst->ia_rdev != src->ia_rdev)) ||
+ ((dst->ia_type == IA_IFREG) && (dst->ia_size != src->ia_size)) ||
+ (st_mode_from_ia(dst->ia_prot, dst->ia_type) !=
+ st_mode_from_ia(src->ia_prot, src->ia_type)) ||
+ (uuid_compare(dst->ia_gfid, src->ia_gfid) != 0))
+ {
+ gf_log(THIS->name, GF_LOG_WARNING,
+ "Failed to combine iatt (inode: %lu-%lu, links: %u-%u, "
+ "uid: %u-%u, gid: %u-%u, rdev: %lu-%lu, size: %lu-%lu, "
+ "mode: %o-%o)",
+ dst->ia_ino, src->ia_ino, dst->ia_nlink, src->ia_nlink,
+ dst->ia_uid, src->ia_uid, dst->ia_gid, src->ia_gid,
+ dst->ia_rdev, src->ia_rdev, dst->ia_size, src->ia_size,
+ st_mode_from_ia(dst->ia_prot, dst->ia_type),
+ st_mode_from_ia(src->ia_prot, dst->ia_type));
+
+ return 0;
+ }
+ }
+
+ while (count-- > 0)
+ {
+ dst->ia_blocks += src->ia_blocks;
+ if (dst->ia_blksize < src->ia_blksize)
+ {
+ dst->ia_blksize = src->ia_blksize;
+ }
+
+ ec_iatt_time_merge(&dst->ia_atime, &dst->ia_atime_nsec, src->ia_atime,
+ src->ia_atime_nsec);
+ ec_iatt_time_merge(&dst->ia_mtime, &dst->ia_mtime_nsec, src->ia_mtime,
+ src->ia_mtime_nsec);
+ ec_iatt_time_merge(&dst->ia_ctime, &dst->ia_ctime_nsec, src->ia_ctime,
+ src->ia_ctime_nsec);
+ }
+
+ return 1;
+}
+
+void ec_iatt_rebuild(ec_t * ec, struct iatt * iatt, int32_t count,
+ int32_t answers)
+{
+ size_t blocks;
+
+ while (count-- > 0)
+ {
+ blocks = iatt[count].ia_blocks * ec->fragments + answers - 1;
+ blocks /= answers;
+ iatt[count].ia_blocks = blocks;
+ }
+}
+
+int32_t ec_dict_data_compare(dict_t * dict, char * key, data_t * value,
+ void * arg)
+{
+ ec_dict_info_t * info = arg;
+ data_t * data;
+
+ data = dict_get(info->dict, key);
+ if (data == NULL)
+ {
+ gf_log("ec", GF_LOG_DEBUG, "key '%s' found only on one dict", key);
+
+ return -1;
+ }
+
+ info->count--;
+
+ if ((strcmp(key, GF_CONTENT_KEY) == 0) ||
+ (strcmp(key, GF_XATTR_PATHINFO_KEY) == 0) ||
+ (strcmp(key, GF_XATTR_USER_PATHINFO_KEY) == 0) ||
+ (strcmp(key, GF_XATTR_LOCKINFO_KEY) == 0) ||
+ (strcmp(key, GF_XATTR_CLRLK_CMD) == 0) ||
+ (strcmp(key, GLUSTERFS_OPEN_FD_COUNT) == 0) ||
+ (fnmatch(GF_XATTR_STIME_PATTERN, key, 0) == 0) ||
+ (XATTR_IS_NODE_UUID(key)))
+ {
+ return 0;
+ }
+
+ if ((data->len != value->len) ||
+ (memcmp(data->data, value->data, data->len) != 0))
+ {
+ gf_log("ec", GF_LOG_DEBUG, "key '%s' is different (size: %u, %u)",
+ key, data->len, value->len);
+
+ return -1;
+ }
+
+ return 0;
+}
+
+int32_t ec_dict_data_show(dict_t * dict, char * key, data_t * value,
+ void * arg)
+{
+ if (dict_get(arg, key) == NULL)
+ {
+ gf_log("ec", GF_LOG_DEBUG, "key '%s' found only on one dict", key);
+ }
+
+ return 0;
+}
+
+int32_t ec_dict_compare(dict_t * dict1, dict_t * dict2)
+{
+ ec_dict_info_t info;
+ dict_t * dict;
+
+ if (dict1 != NULL)
+ {
+ info.dict = dict1;
+ info.count = dict1->count;
+ dict = dict2;
+ }
+ else if (dict2 != NULL)
+ {
+ info.dict = dict2;
+ info.count = dict2->count;
+ dict = dict1;
+ }
+ else
+ {
+ return 1;
+ }
+
+ if (dict != NULL)
+ {
+ if (dict_foreach(dict, ec_dict_data_compare, &info) != 0)
+ {
+ return 0;
+ }
+ }
+
+ if (info.count != 0)
+ {
+ dict_foreach(info.dict, ec_dict_data_show, dict);
+ }
+
+ return (info.count == 0);
+}
+
+int32_t ec_dict_list(data_t ** list, int32_t * count, ec_cbk_data_t * cbk,
+ int32_t which, char * key)
+{
+ ec_cbk_data_t * ans;
+ dict_t * dict;
+ int32_t i, max;
+
+ max = *count;
+ i = 0;
+ for (ans = cbk; ans != NULL; ans = ans->next)
+ {
+ if (i >= max)
+ {
+ gf_log(cbk->fop->xl->name, GF_LOG_ERROR, "Unexpected number of "
+ "dictionaries");
+
+ return 0;
+ }
+
+ dict = (which == EC_COMBINE_XDATA) ? ans->xdata : ans->dict;
+ list[i] = dict_get(dict, key);
+ if (list[i] == NULL)
+ {
+ gf_log(cbk->fop->xl->name, GF_LOG_ERROR, "Unexpected missing "
+ "dictionary entry");
+
+ return 0;
+ }
+
+ i++;
+ }
+
+ *count = i;
+
+ return 1;
+}
+
+char * ec_concat_prepare(xlator_t * xl, char ** sep, char ** post,
+ const char * fmt, va_list args)
+{
+ char * str, * tmp;
+ int32_t len;
+
+ len = gf_vasprintf(&str, fmt, args);
+ if (len < 0)
+ {
+ return NULL;
+ }
+
+ tmp = strchr(str, '{');
+ if (tmp == NULL)
+ {
+ goto out;
+ }
+ *tmp++ = 0;
+ *sep = tmp;
+ tmp = strchr(tmp, '}');
+ if (tmp == NULL)
+ {
+ goto out;
+ }
+ *tmp++ = 0;
+ *post = tmp;
+
+ return str;
+
+out:
+ gf_log(xl->name, GF_LOG_ERROR, "Invalid concat format");
+
+ GF_FREE(str);
+
+ return NULL;
+}
+
+int32_t ec_dict_data_concat(const char * fmt, ec_cbk_data_t * cbk,
+ int32_t which, char * key, ...)
+{
+ data_t * data[cbk->count];
+ size_t len, tmp;
+ char * str = NULL, * pre = NULL, * sep, * post;
+ dict_t * dict;
+ va_list args;
+ int32_t i, num, prelen, postlen, seplen;
+ int32_t ret = -1;
+
+ num = cbk->count;
+ if (!ec_dict_list(data, &num, cbk, which, key))
+ {
+ return -1;
+ }
+
+ va_start(args, key);
+ pre = ec_concat_prepare(cbk->fop->xl, &sep, &post, fmt, args);
+ va_end(args);
+
+ if (pre == NULL)
+ {
+ return -1;
+ }
+
+ prelen = strlen(pre);
+ seplen = strlen(sep);
+ postlen = strlen(post);
+
+ len = prelen + (num - 1) * seplen + postlen + 1;
+ for (i = 0; i < num; i++)
+ {
+ len += data[i]->len - 1;
+ }
+
+ str = GF_MALLOC(len, gf_common_mt_char);
+ if (str == NULL)
+ {
+ goto out;
+ }
+
+ memcpy(str, pre, prelen);
+ len = prelen;
+ for (i = 0; i < num; i++)
+ {
+ memcpy(str + len, sep, seplen);
+ len += seplen;
+ tmp = data[i]->len - 1;
+ memcpy(str + len, data[i]->data, tmp);
+ len += tmp;
+ }
+ memcpy(str + len, post, postlen + 1);
+
+ dict = (which == EC_COMBINE_XDATA) ? cbk->xdata : cbk->dict;
+ if (dict_set_dynstr(dict, key, str) != 0)
+ {
+ goto out;
+ }
+
+ str = NULL;
+
+ ret = 0;
+
+out:
+ GF_FREE(str);
+ GF_FREE(pre);
+
+ return ret;
+}
+
+int32_t ec_dict_data_merge(ec_cbk_data_t * cbk, int32_t which, char * key)
+{
+ data_t * data[cbk->count];
+ dict_t * dict, * lockinfo, * tmp;
+ char * ptr = NULL;
+ int32_t i, num, len;
+ int32_t ret = -1;
+
+ num = cbk->count;
+ if (!ec_dict_list(data, &num, cbk, which, key))
+ {
+ return -1;
+ }
+
+ if (dict_unserialize(data[0]->data, data[0]->len, &lockinfo) != 0)
+ {
+ return -1;
+ }
+
+ for (i = 1; i < num; i++)
+ {
+ if (dict_unserialize(data[i]->data, data[i]->len, &tmp) != 0)
+ {
+ goto out;
+ }
+ if (dict_copy(tmp, lockinfo) == NULL)
+ {
+ dict_unref(tmp);
+
+ goto out;
+ }
+
+ dict_unref(tmp);
+ }
+
+ len = dict_serialized_length(lockinfo);
+ if (len < 0)
+ {
+ goto out;
+ }
+ ptr = GF_MALLOC(len, gf_common_mt_char);
+ if (ptr == NULL)
+ {
+ goto out;
+ }
+ if (dict_serialize(lockinfo, ptr) != 0)
+ {
+ goto out;
+ }
+ dict = (which == EC_COMBINE_XDATA) ? cbk->xdata : cbk->dict;
+ if (dict_set_dynptr(dict, key, ptr, len) != 0)
+ {
+ goto out;
+ }
+
+ ptr = NULL;
+
+ ret = 0;
+
+out:
+ GF_FREE(ptr);
+ dict_unref(lockinfo);
+
+ return ret;
+}
+
+int32_t ec_dict_data_uuid(ec_cbk_data_t * cbk, int32_t which, char * key)
+{
+ ec_cbk_data_t * ans, * min;
+ dict_t * src, * dst;
+ data_t * data;
+
+ min = cbk;
+ for (ans = cbk->next; ans != NULL; ans = ans->next)
+ {
+ if (ans->idx < min->idx)
+ {
+ min = ans;
+ }
+ }
+
+ if (min != cbk)
+ {
+ src = (which == EC_COMBINE_XDATA) ? min->xdata : min->dict;
+ dst = (which == EC_COMBINE_XDATA) ? cbk->xdata : cbk->dict;
+
+ data = dict_get(src, key);
+ if (data == NULL)
+ {
+ return -1;
+ }
+ if (dict_set(dst, key, data) != 0)
+ {
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+int32_t ec_dict_data_max(ec_cbk_data_t * cbk, int32_t which, char * key)
+{
+ data_t * data[cbk->count];
+ dict_t * dict;
+ int32_t i, num;
+ uint32_t max, tmp;
+
+ num = cbk->count;
+ if (!ec_dict_list(data, &num, cbk, which, key))
+ {
+ return -1;
+ }
+
+ if (num <= 1)
+ {
+ return 0;
+ }
+
+ max = data_to_uint32(data[0]);
+ for (i = 1; i < num; i++)
+ {
+ tmp = data_to_uint32(data[i]);
+ if (max < tmp)
+ {
+ max = tmp;
+ }
+ }
+
+ dict = (which == EC_COMBINE_XDATA) ? cbk->xdata : cbk->dict;
+ if (dict_set_uint32(dict, key, max) != 0)
+ {
+ return -1;
+ }
+
+ return 0;
+}
+
+int32_t ec_dict_data_stime(ec_cbk_data_t * cbk, int32_t which, char * key)
+{
+ data_t * data[cbk->count];
+ dict_t * dict;
+ int32_t i, num;
+
+ num = cbk->count;
+ if (!ec_dict_list(data, &num, cbk, which, key))
+ {
+ return -1;
+ }
+
+ dict = (which == EC_COMBINE_XDATA) ? cbk->xdata : cbk->dict;
+ for (i = 1; i < num; i++)
+ {
+ if (gf_get_max_stime(cbk->fop->xl, dict, key, data[i]) != 0)
+ {
+ gf_log(cbk->fop->xl->name, GF_LOG_ERROR, "STIME combination "
+ "failed");
+
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+int32_t ec_dict_data_combine(dict_t * dict, char * key, data_t * value,
+ void * arg)
+{
+ ec_dict_combine_t * data = arg;
+
+ if ((strcmp(key, GF_XATTR_PATHINFO_KEY) == 0) ||
+ (strcmp(key, GF_XATTR_USER_PATHINFO_KEY) == 0))
+ {
+ return ec_dict_data_concat("(<EC:%s> { })", data->cbk, data->which,
+ key, data->cbk->fop->xl->name);
+ }
+
+ if (strncmp(key, GF_XATTR_CLRLK_CMD, strlen(GF_XATTR_CLRLK_CMD)) == 0)
+ {
+ return ec_dict_data_concat("{\n}", data->cbk, data->which, key);
+ }
+
+ if (strncmp(key, GF_XATTR_LOCKINFO_KEY,
+ strlen(GF_XATTR_LOCKINFO_KEY)) == 0)
+ {
+ return ec_dict_data_merge(data->cbk, data->which, key);
+ }
+
+ if (strcmp(key, GLUSTERFS_OPEN_FD_COUNT) == 0)
+ {
+ return ec_dict_data_max(data->cbk, data->which, key);
+ }
+
+ if (XATTR_IS_NODE_UUID(key))
+ {
+ return ec_dict_data_uuid(data->cbk, data->which, key);
+ }
+
+ if (fnmatch(GF_XATTR_STIME_PATTERN, key, FNM_NOESCAPE) == 0)
+ {
+ return ec_dict_data_stime(data->cbk, data->which, key);
+ }
+
+ return 0;
+}
+
+int32_t ec_dict_combine(ec_cbk_data_t * cbk, int32_t which)
+{
+ dict_t * dict;
+ ec_dict_combine_t data;
+
+ data.cbk = cbk;
+ data.which = which;
+
+ dict = (which == EC_COMBINE_XDATA) ? cbk->xdata : cbk->dict;
+ if ((dict != NULL) &&
+ (dict_foreach(dict, ec_dict_data_combine, &data) != 0))
+ {
+ gf_log(cbk->fop->xl->name, GF_LOG_ERROR, "Dictionary combination "
+ "failed");
+
+ return 0;
+ }
+
+ return 1;
+}
+
+int32_t ec_vector_compare(struct iovec * dst_vector, int32_t dst_count,
+ struct iovec * src_vector, int32_t src_count)
+{
+ size_t dst_size = 0, src_size = 0;
+
+ if (dst_count > 0)
+ {
+ dst_size = iov_length(dst_vector, dst_count);
+ }
+ if (src_count > 0)
+ {
+ src_size = iov_length(src_vector, src_count);
+ }
+
+ return (dst_size == src_size);
+}
+
+int32_t ec_flock_compare(struct gf_flock * dst, struct gf_flock * src)
+{
+ if ((dst->l_type != src->l_type) ||
+ (dst->l_whence != src->l_whence) ||
+ (dst->l_start != src->l_start) ||
+ (dst->l_len != src->l_len) ||
+ (dst->l_pid != src->l_pid) ||
+ !is_same_lkowner(&dst->l_owner, &src->l_owner))
+ {
+ return 0;
+ }
+
+ return 1;
+}
+
+void ec_statvfs_combine(struct statvfs * dst, struct statvfs * src)
+{
+ if (dst->f_bsize < src->f_bsize)
+ {
+ dst->f_bsize = src->f_bsize;
+ }
+
+ if (dst->f_frsize < src->f_frsize)
+ {
+ dst->f_blocks *= dst->f_frsize;
+ dst->f_blocks /= src->f_frsize;
+
+ dst->f_bfree *= dst->f_frsize;
+ dst->f_bfree /= src->f_frsize;
+
+ dst->f_bavail *= dst->f_frsize;
+ dst->f_bavail /= src->f_frsize;
+
+ dst->f_frsize = src->f_frsize;
+ }
+ else if (dst->f_frsize > src->f_frsize)
+ {
+ src->f_blocks *= src->f_frsize;
+ src->f_blocks /= dst->f_frsize;
+
+ src->f_bfree *= src->f_frsize;
+ src->f_bfree /= dst->f_frsize;
+
+ src->f_bavail *= src->f_frsize;
+ src->f_bavail /= dst->f_frsize;
+ }
+ if (dst->f_blocks > src->f_blocks)
+ {
+ dst->f_blocks = src->f_blocks;
+ }
+ if (dst->f_bfree > src->f_bfree)
+ {
+ dst->f_bfree = src->f_bfree;
+ }
+ if (dst->f_bavail > src->f_bavail)
+ {
+ dst->f_bavail = src->f_bavail;
+ }
+
+ if (dst->f_files < src->f_files)
+ {
+ dst->f_files = src->f_files;
+ }
+ if (dst->f_ffree > src->f_ffree)
+ {
+ dst->f_ffree = src->f_ffree;
+ }
+ if (dst->f_favail > src->f_favail)
+ {
+ dst->f_favail = src->f_favail;
+ }
+ if (dst->f_namemax > src->f_namemax)
+ {
+ dst->f_namemax = src->f_namemax;
+ }
+
+ if (dst->f_flag != src->f_flag)
+ {
+ gf_log(THIS->name, GF_LOG_DEBUG, "Mismatching file system flags "
+ "(%lX, %lX)",
+ dst->f_flag, src->f_flag);
+ }
+ dst->f_flag &= src->f_flag;
+}
+
+int32_t ec_combine_check(ec_cbk_data_t * dst, ec_cbk_data_t * src,
+ ec_combine_f combine)
+{
+ ec_fop_data_t * fop = dst->fop;
+
+ if (dst->op_ret != src->op_ret)
+ {
+ gf_log(fop->xl->name, GF_LOG_DEBUG, "Mismatching return code in "
+ "answers of '%s': %d <-> %d",
+ ec_fop_name(fop->id), dst->op_ret, src->op_ret);
+
+ return 0;
+ }
+ if (dst->op_ret < 0)
+ {
+ if (dst->op_errno != src->op_errno)
+ {
+ gf_log(fop->xl->name, GF_LOG_DEBUG, "Mismatching errno code in "
+ "answers of '%s': %d <-> %d",
+ ec_fop_name(fop->id), dst->op_errno, src->op_errno);
+
+ return 0;
+ }
+ }
+
+ if (!ec_dict_compare(dst->xdata, src->xdata))
+ {
+ gf_log(fop->xl->name, GF_LOG_WARNING, "Mismatching xdata in answers "
+ "of '%s'",
+ ec_fop_name(fop->id));
+
+ return 0;
+ }
+
+ if ((dst->op_ret >= 0) && (combine != NULL))
+ {
+ return combine(fop, dst, src);
+ }
+
+ return 1;
+}
+
+void ec_combine(ec_cbk_data_t * cbk, ec_combine_f combine)
+{
+ ec_fop_data_t * fop = cbk->fop;
+ ec_cbk_data_t * ans = NULL, * tmp = NULL;
+ struct list_head * item = NULL;
+ int32_t needed = 0, report = 0;
+ char str[32];
+
+ LOCK(&fop->lock);
+
+ item = fop->cbk_list.prev;
+ list_for_each_entry(ans, &fop->cbk_list, list)
+ {
+ if (ec_combine_check(cbk, ans, combine))
+ {
+ cbk->count += ans->count;
+ cbk->mask |= ans->mask;
+
+ item = ans->list.prev;
+ while (item != &fop->cbk_list)
+ {
+ tmp = list_entry(item, ec_cbk_data_t, list);
+ if (tmp->count >= cbk->count)
+ {
+ break;
+ }
+ item = item->prev;
+ }
+ list_del(&ans->list);
+
+ cbk->next = ans;
+
+ break;
+ }
+ }
+ list_add(&cbk->list, item);
+
+ ec_trace("ANSWER", fop, "combine=%s[%d]",
+ ec_bin(str, sizeof(str), cbk->mask, 0), cbk->count);
+
+ if ((cbk->count == fop->expected) && (fop->answer == NULL))
+ {
+ fop->answer = cbk;
+
+ ec_update_bad(fop, cbk->mask);
+
+ report = 1;
+ }
+
+ ans = list_entry(fop->cbk_list.next, ec_cbk_data_t, list);
+ needed = fop->minimum - ans->count - fop->winds + 1;
+
+ UNLOCK(&fop->lock);
+
+ if (needed > 0)
+ {
+ ec_dispatch_next(fop, cbk->idx);
+ }
+ else if (report)
+ {
+ ec_report(fop, 0);
+ }
+}
diff --git a/xlators/cluster/ec/src/ec-combine.h b/xlators/cluster/ec/src/ec-combine.h
new file mode 100644
index 00000000000..0cf5a91dc03
--- /dev/null
+++ b/xlators/cluster/ec/src/ec-combine.h
@@ -0,0 +1,44 @@
+/*
+ Copyright (c) 2012 DataLab, s.l. <http://www.datalab.es>
+
+ This file is part of the cluster/ec translator for GlusterFS.
+
+ The cluster/ec translator for GlusterFS is free software: you can
+ redistribute it and/or modify it under the terms of the GNU General
+ Public License as published by the Free Software Foundation, either
+ version 3 of the License, or (at your option) any later version.
+
+ The cluster/ec translator for GlusterFS is distributed in the hope
+ that it will be useful, but WITHOUT ANY WARRANTY; without even the
+ implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+ PURPOSE. See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with the cluster/ec translator for GlusterFS. If not, see
+ <http://www.gnu.org/licenses/>.
+*/
+
+#ifndef __EC_COMBINE_H__
+#define __EC_COMBINE_H__
+
+#define EC_COMBINE_DICT 0
+#define EC_COMBINE_XDATA 1
+
+typedef int32_t (* ec_combine_f)(ec_fop_data_t * fop, ec_cbk_data_t * dst,
+ ec_cbk_data_t * src);
+
+void ec_iatt_rebuild(ec_t * ec, struct iatt * iatt, int32_t count,
+ int32_t answers);
+
+int32_t ec_iatt_combine(struct iatt * dst, struct iatt * src, int32_t count);
+int32_t ec_dict_compare(dict_t * dict1, dict_t * dict2);
+int32_t ec_vector_compare(struct iovec * dst_vector, int32_t dst_count,
+ struct iovec * src_vector, int32_t src_count);
+int32_t ec_flock_compare(struct gf_flock * dst, struct gf_flock * src);
+void ec_statvfs_combine(struct statvfs * dst, struct statvfs * src);
+
+int32_t ec_dict_combine(ec_cbk_data_t * cbk, int32_t which);
+
+void ec_combine(ec_cbk_data_t * cbk, ec_combine_f combine);
+
+#endif /* __EC_COMBINE_H__ */
diff --git a/xlators/cluster/ec/src/ec-common.c b/xlators/cluster/ec/src/ec-common.c
new file mode 100644
index 00000000000..a4423d94aa9
--- /dev/null
+++ b/xlators/cluster/ec/src/ec-common.c
@@ -0,0 +1,1109 @@
+/*
+ Copyright (c) 2012 DataLab, s.l. <http://www.datalab.es>
+
+ This file is part of the cluster/ec translator for GlusterFS.
+
+ The cluster/ec translator for GlusterFS is free software: you can
+ redistribute it and/or modify it under the terms of the GNU General
+ Public License as published by the Free Software Foundation, either
+ version 3 of the License, or (at your option) any later version.
+
+ The cluster/ec translator for GlusterFS is distributed in the hope
+ that it will be useful, but WITHOUT ANY WARRANTY; without even the
+ implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+ PURPOSE. See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with the cluster/ec translator for GlusterFS. If not, see
+ <http://www.gnu.org/licenses/>.
+*/
+
+#include "byte-order.h"
+
+#include "ec-mem-types.h"
+#include "ec-data.h"
+#include "ec-helpers.h"
+#include "ec-combine.h"
+#include "ec-common.h"
+#include "ec-fops.h"
+#include "ec.h"
+
+int32_t ec_child_valid(ec_t * ec, ec_fop_data_t * fop, int32_t idx)
+{
+ return (idx < ec->nodes) && (((fop->remaining >> idx) & 1) == 1);
+}
+
+int32_t ec_child_next(ec_t * ec, ec_fop_data_t * fop, int32_t idx)
+{
+ while (!ec_child_valid(ec, fop, idx))
+ {
+ if (++idx >= ec->nodes)
+ {
+ idx = 0;
+ }
+ if (idx == fop->first)
+ {
+ return -1;
+ }
+ }
+
+ return idx;
+}
+
+uintptr_t ec_inode_good(inode_t * inode, xlator_t * xl)
+{
+ ec_inode_t * ctx;
+ uintptr_t bad = 0;
+
+ ctx = ec_inode_get(inode, xl);
+ if (ctx != NULL)
+ {
+ bad = ctx->bad;
+ }
+
+ return ~bad;
+}
+
+uintptr_t ec_fd_good(fd_t * fd, xlator_t * xl)
+{
+ ec_fd_t * ctx;
+ uintptr_t bad = 0;
+
+ ctx = ec_fd_get(fd, xl);
+ if ((ctx != NULL) && (ctx->loc.inode != NULL))
+ {
+ bad = ctx->bad;
+ }
+
+ return ~bad;
+}
+
+uintptr_t ec_update_inode(ec_fop_data_t * fop, inode_t * inode, uintptr_t good,
+ uintptr_t bad)
+{
+ ec_inode_t * ctx = NULL;
+
+ if (inode != NULL)
+ {
+ LOCK(&inode->lock);
+
+ ctx = __ec_inode_get(inode, fop->xl);
+ if (ctx != NULL)
+ {
+ ctx->bad &= ~good;
+ bad |= ctx->bad;
+ ctx->bad = bad;
+ }
+
+ UNLOCK(&inode->lock);
+ }
+
+ return bad;
+}
+
+uintptr_t ec_update_fd(ec_fop_data_t * fop, fd_t * fd, uintptr_t good,
+ uintptr_t bad)
+{
+ ec_fd_t * ctx = NULL;
+
+ LOCK(&fd->lock);
+
+ ctx = __ec_fd_get(fd, fop->xl);
+ if ((ctx != NULL) && (ctx->loc.inode != NULL))
+ {
+ ctx->bad &= ~good;
+ bad |= ctx->bad;
+ ctx->bad = bad;
+ }
+
+ UNLOCK(&fd->lock);
+
+ return bad;
+}
+
+int32_t ec_heal_report(call_frame_t * frame, void * cookie, xlator_t * this,
+ int32_t op_ret, int32_t op_errno, uintptr_t mask,
+ uintptr_t good, uintptr_t bad, dict_t * xdata)
+{
+ if (op_ret < 0)
+ {
+ gf_log(this->name, GF_LOG_WARNING, "Heal failed (error %d)", op_errno);
+ }
+ else
+ {
+ gf_log(this->name, GF_LOG_INFO, "Heal succeeded on %d/%d subvolumes",
+ ec_bits_count(mask & ~ (good | bad)),
+ ec_bits_count(mask & ~good));
+ }
+
+ return 0;
+}
+
+void ec_check_status(ec_fop_data_t * fop)
+{
+ ec_t * ec = fop->xl->private;
+
+ if ((ec->xl_up & ~(fop->remaining | fop->good)) == 0)
+ {
+ return;
+ }
+
+ gf_log(fop->xl->name, GF_LOG_WARNING, "Operation failed on some "
+ "subvolumes (up=%lX, mask=%lX, "
+ "remaining=%lX, good=%lX, bad=%lX)",
+ ec->xl_up, fop->mask, fop->remaining, fop->good, fop->bad);
+
+ if (fop->fd != NULL)
+ {
+ ec_fheal(fop->frame, fop->xl, -1, EC_MINIMUM_ONE, ec_heal_report, NULL,
+ fop->fd, NULL);
+ }
+ else
+ {
+ ec_heal(fop->frame, fop->xl, -1, EC_MINIMUM_ONE, ec_heal_report, NULL,
+ &fop->loc[0], NULL);
+
+ if (fop->loc[1].inode != NULL)
+ {
+ ec_heal(fop->frame, fop->xl, -1, EC_MINIMUM_ONE, ec_heal_report,
+ NULL, &fop->loc[1], NULL);
+ }
+ }
+}
+
+void ec_update_bad(ec_fop_data_t * fop, uintptr_t good)
+{
+ uintptr_t bad;
+ int32_t update = 0;
+
+ bad = fop->mask & ~(fop->remaining | good);
+ if ((fop->bad & bad) != bad)
+ {
+ fop->bad |= bad;
+ update = 1;
+ }
+ if ((fop->good & good) != good)
+ {
+ fop->good |= good;
+ update = 1;
+ }
+
+ if (update && (fop->parent == NULL))
+ {
+ if ((fop->flags & EC_FLAG_UPDATE_LOC_PARENT) != 0)
+ {
+ ec_update_inode(fop, fop->loc[0].parent, good, bad);
+ }
+ if ((fop->flags & EC_FLAG_UPDATE_LOC_INODE) != 0)
+ {
+ ec_update_inode(fop, fop->loc[0].inode, good, bad);
+ }
+ ec_update_inode(fop, fop->loc[1].inode, good, bad);
+ if ((fop->flags & EC_FLAG_UPDATE_FD_INODE) != 0)
+ {
+ ec_update_inode(fop, fop->fd->inode, good, bad);
+ }
+ if ((fop->flags & EC_FLAG_UPDATE_FD) != 0)
+ {
+ ec_update_fd(fop, fop->fd, good, bad);
+ }
+
+ ec_check_status(fop);
+ }
+}
+
+
+void __ec_fop_set_error(ec_fop_data_t * fop, int32_t error)
+{
+ if ((error != 0) && (fop->error == 0))
+ {
+ fop->error = error;
+ }
+}
+
+void ec_fop_set_error(ec_fop_data_t * fop, int32_t error)
+{
+ LOCK(&fop->lock);
+
+ __ec_fop_set_error(fop, error);
+
+ UNLOCK(&fop->lock);
+}
+
+int32_t ec_check_complete(ec_fop_data_t * fop, ec_resume_f resume)
+{
+ int32_t error = -1;
+
+ LOCK(&fop->lock);
+
+ GF_ASSERT(fop->resume == NULL);
+
+ if (fop->jobs != 0)
+ {
+ ec_trace("WAIT", fop, "resume=%p", resume);
+
+ fop->resume = resume;
+ }
+ else
+ {
+ error = fop->error;
+ fop->error = 0;
+ }
+
+ UNLOCK(&fop->lock);
+
+ return error;
+}
+
+void ec_wait_winds(ec_fop_data_t * fop)
+{
+ LOCK(&fop->lock);
+
+ if (fop->winds > 0)
+ {
+ fop->jobs++;
+ fop->refs++;
+
+ fop->flags |= EC_FLAG_WAITING_WINDS;
+ }
+
+ UNLOCK(&fop->lock);
+}
+
+void ec_resume(ec_fop_data_t * fop, int32_t error)
+{
+ ec_resume_f resume = NULL;
+
+ LOCK(&fop->lock);
+
+ __ec_fop_set_error(fop, error);
+
+ if (--fop->jobs == 0)
+ {
+ resume = fop->resume;
+ fop->resume = NULL;
+ if (resume != NULL)
+ {
+ ec_trace("RESUME", fop, "error=%d", error);
+
+ if (fop->error != 0)
+ {
+ error = fop->error;
+ }
+ fop->error = 0;
+ }
+ }
+
+ UNLOCK(&fop->lock);
+
+ if (resume != NULL)
+ {
+ resume(fop, error);
+ }
+
+ ec_fop_data_release(fop);
+}
+
+void ec_resume_parent(ec_fop_data_t * fop, int32_t error)
+{
+ ec_fop_data_t * parent;
+
+ parent = fop->parent;
+ if (parent != NULL)
+ {
+ fop->parent = NULL;
+ ec_resume(parent, error);
+ }
+}
+
+void ec_report(ec_fop_data_t * fop, int32_t error)
+{
+ if (!list_empty(&fop->lock_list))
+ {
+ ec_owner_set(fop->frame, fop->frame->root);
+ }
+
+ ec_resume(fop, error);
+}
+
+void ec_complete(ec_fop_data_t * fop)
+{
+ ec_cbk_data_t * cbk = NULL;
+ int32_t ready = 0, report = 0;
+
+ LOCK(&fop->lock);
+
+ ec_trace("COMPLETE", fop, "");
+
+ if (--fop->winds == 0)
+ {
+ if ((fop->answer == NULL) && (fop->expected != 1))
+ {
+ if (!list_empty(&fop->cbk_list))
+ {
+ cbk = list_entry(fop->cbk_list.next, ec_cbk_data_t, list);
+ if ((cbk->count >= fop->minimum) &&
+ ((cbk->op_ret >= 0) || (cbk->op_errno != ENOTCONN)))
+ {
+ fop->answer = cbk;
+
+ ec_update_bad(fop, cbk->mask);
+ }
+ }
+
+ report = 1;
+ }
+ else if ((fop->flags & EC_FLAG_WAITING_WINDS) != 0)
+ {
+ ready = 1;
+ }
+ }
+
+ UNLOCK(&fop->lock);
+
+ if (report)
+ {
+ ec_report(fop, 0);
+ }
+ if (ready)
+ {
+ ec_resume(fop, 0);
+ }
+
+ ec_fop_data_release(fop);
+}
+
+int32_t ec_child_select(ec_fop_data_t * fop)
+{
+ ec_t * ec = fop->xl->private;
+ uintptr_t mask = 0;
+ int32_t first = 0, num = 0;
+
+ fop->mask &= ec->node_mask;
+
+ mask = ec->xl_up;
+ if (fop->loc[0].inode != NULL)
+ {
+ mask |= ec_inode_good(fop->loc[0].inode, fop->xl);
+ }
+ if (fop->loc[1].inode != NULL)
+ {
+ mask |= ec_inode_good(fop->loc[1].inode, fop->xl);
+ }
+ if (fop->fd != NULL)
+ {
+ if (fop->fd->inode != NULL)
+ {
+ mask |= ec_inode_good(fop->fd->inode, fop->xl);
+ }
+ mask |= ec_fd_good(fop->fd, fop->xl);
+ }
+ if ((fop->mask & ~mask) != 0)
+ {
+ gf_log(fop->xl->name, GF_LOG_WARNING, "Executing operation with "
+ "some subvolumes unavailable "
+ "(%lX)", fop->mask & ~mask);
+
+ fop->mask &= mask;
+ }
+
+ switch (fop->minimum)
+ {
+ case EC_MINIMUM_ALL:
+ fop->minimum = ec_bits_count(fop->mask);
+ if (fop->minimum >= ec->fragments)
+ {
+ break;
+ }
+ case EC_MINIMUM_MIN:
+ fop->minimum = ec->fragments;
+ break;
+ case EC_MINIMUM_ONE:
+ fop->minimum = 1;
+ }
+
+ first = ec->idx;
+ if (++first >= ec->nodes)
+ {
+ first = 0;
+ }
+ ec->idx = first;
+
+ fop->remaining = fop->mask;
+
+ ec_trace("SELECT", fop, "");
+
+ num = ec_bits_count(fop->mask);
+ if ((num < fop->minimum) && (num < ec->fragments))
+ {
+ gf_log(ec->xl->name, GF_LOG_ERROR, "Insufficient available childs "
+ "for this request (have %d, need "
+ "%d)", num, fop->minimum);
+
+ return 0;
+ }
+
+ LOCK(&fop->lock);
+
+ fop->jobs++;
+ fop->refs++;
+
+ UNLOCK(&fop->lock);
+
+ return 1;
+}
+
+int32_t ec_dispatch_next(ec_fop_data_t * fop, int32_t idx)
+{
+ ec_t * ec = fop->xl->private;
+
+ LOCK(&fop->lock);
+
+ idx = ec_child_next(ec, fop, idx);
+ if (idx >= 0)
+ {
+ fop->remaining ^= 1ULL << idx;
+
+ ec_trace("EXECUTE", fop, "idx=%d", idx);
+
+ fop->winds++;
+ fop->refs++;
+ }
+
+ UNLOCK(&fop->lock);
+
+ if (idx >= 0)
+ {
+ fop->wind(ec, fop, idx);
+ }
+
+ return idx;
+}
+
+void ec_dispatch_mask(ec_fop_data_t * fop, uintptr_t mask)
+{
+ ec_t * ec = fop->xl->private;
+ int32_t count, idx;
+
+ count = ec_bits_count(mask);
+
+ LOCK(&fop->lock);
+
+ ec_trace("EXECUTE", fop, "mask=%lX", mask);
+
+ fop->remaining ^= mask;
+
+ fop->winds += count;
+ fop->refs += count;
+
+ UNLOCK(&fop->lock);
+
+ idx = 0;
+ while (mask != 0)
+ {
+ if ((mask & 1) != 0)
+ {
+ fop->wind(ec, fop, idx);
+ }
+ idx++;
+ mask >>= 1;
+ }
+}
+
+void ec_dispatch_start(ec_fop_data_t * fop)
+{
+ fop->answer = NULL;
+ fop->good = 0;
+ fop->bad = 0;
+
+ INIT_LIST_HEAD(&fop->cbk_list);
+
+ if (!list_empty(&fop->lock_list))
+ {
+ ec_owner_copy(fop->frame, &fop->req_frame->root->lk_owner);
+ }
+}
+
+void ec_dispatch_one(ec_fop_data_t * fop)
+{
+ ec_t * ec = fop->xl->private;
+
+ ec_dispatch_start(fop);
+
+ if (ec_child_select(fop))
+ {
+ fop->expected = 1;
+ fop->first = ec->idx;
+
+ ec_dispatch_next(fop, fop->first);
+ }
+}
+
+int32_t ec_dispatch_one_retry(ec_fop_data_t * fop, int32_t idx, int32_t op_ret,
+ int32_t op_errno)
+{
+ if ((op_ret < 0) && (op_errno == ENOTCONN))
+ {
+ return (ec_dispatch_next(fop, idx) >= 0);
+ }
+
+ return 0;
+}
+
+void ec_dispatch_inc(ec_fop_data_t * fop)
+{
+ ec_dispatch_start(fop);
+
+ if (ec_child_select(fop))
+ {
+ fop->expected = ec_bits_count(fop->remaining);
+ fop->first = 0;
+
+ ec_dispatch_next(fop, 0);
+ }
+}
+
+void ec_dispatch_all(ec_fop_data_t * fop)
+{
+ ec_dispatch_start(fop);
+
+ if (ec_child_select(fop))
+ {
+ fop->expected = ec_bits_count(fop->remaining);
+ fop->first = 0;
+
+ ec_dispatch_mask(fop, fop->remaining);
+ }
+}
+
+void ec_dispatch_min(ec_fop_data_t * fop)
+{
+ ec_t * ec = fop->xl->private;
+ uintptr_t mask;
+ int32_t idx, count;
+
+ ec_dispatch_start(fop);
+
+ if (ec_child_select(fop))
+ {
+ fop->expected = count = ec->fragments;
+ fop->first = ec->idx;
+ idx = fop->first - 1;
+ mask = 0;
+ while (count-- > 0)
+ {
+ idx = ec_child_next(ec, fop, idx + 1);
+ mask |= 1ULL << idx;
+ }
+
+ ec_dispatch_mask(fop, mask);
+ }
+}
+
+ec_lock_t * ec_lock_allocate(xlator_t * xl, int32_t kind, loc_t * loc)
+{
+ ec_lock_t * lock;
+
+ if ((loc->inode == NULL) ||
+ (uuid_is_null(loc->gfid) && uuid_is_null(loc->inode->gfid)))
+ {
+ gf_log(xl->name, GF_LOG_ERROR, "Trying to lock based on an invalid "
+ "inode");
+
+ return NULL;
+ }
+
+ lock = GF_MALLOC(sizeof(*lock), ec_mt_ec_lock_t);
+ if (lock != NULL)
+ {
+ memset(lock, 0, sizeof(*lock));
+
+ lock->kind = kind;
+ if (!ec_loc_from_loc(xl, &lock->loc, loc))
+ {
+ GF_FREE(lock);
+ lock = NULL;
+ }
+ }
+
+ return lock;
+}
+
+void ec_lock_destroy(ec_lock_t * lock)
+{
+ GF_FREE(lock->basename);
+ loc_wipe(&lock->loc);
+
+ GF_FREE(lock);
+}
+
+int32_t ec_locked(call_frame_t * frame, void * cookie, xlator_t * this,
+ int32_t op_ret, int32_t op_errno, dict_t * xdata)
+{
+ ec_fop_data_t * fop = cookie;
+ ec_lock_t * lock = NULL;
+
+ if (op_ret >= 0)
+ {
+ lock = fop->data;
+ lock->mask = fop->good;
+ fop->parent->mask &= fop->good;
+
+ ec_trace("LOCKED", fop->parent, "lock=%p", lock);
+ }
+ else
+ {
+ gf_log(this->name, GF_LOG_WARNING, "Failed to complete preop lock");
+ }
+
+ return 0;
+}
+
+void ec_lock_entry(ec_fop_data_t * fop, loc_t * loc)
+{
+ ec_lock_t * lock = NULL;
+ char * name = NULL;
+ loc_t tmp;
+ int32_t error;
+
+ if ((fop->parent != NULL) || (fop->error != 0))
+ {
+ return;
+ }
+
+ error = ec_loc_parent(fop->xl, loc, &tmp, &name);
+ if (error != 0)
+ {
+ ec_fop_set_error(fop, error);
+
+ return;
+ }
+
+ LOCK(&fop->lock);
+
+ list_for_each_entry(lock, &fop->lock_list, list)
+ {
+ if ((lock->kind == EC_LOCK_ENTRY) &&
+ (lock->loc.inode == tmp.inode) &&
+ (strcmp(lock->basename, name) == 0))
+ {
+ ec_trace("LOCK_ENTRYLK", fop, "lock=%p, parent=%p, path=%s, "
+ "name=%s. Lock already acquired",
+ lock, loc->parent, loc->path, name);
+
+ lock = NULL;
+
+ goto unlock;
+ }
+ }
+
+ lock = ec_lock_allocate(fop->xl, EC_LOCK_ENTRY, &tmp);
+ if (lock != NULL)
+ {
+ lock->type = ENTRYLK_WRLCK;
+ lock->basename = name;
+
+ if (list_empty(&fop->lock_list))
+ {
+ ec_owner_set(fop->frame, fop->frame->root);
+ }
+ list_add_tail(&lock->list, &fop->lock_list);
+ }
+ else
+ {
+ __ec_fop_set_error(fop, EIO);
+ }
+
+unlock:
+ UNLOCK(&fop->lock);
+
+ loc_wipe(&tmp);
+
+ if (lock != NULL)
+ {
+ ec_trace("LOCK_ENTRYLK", fop, "lock=%p, parent=%p, path=%s, "
+ "basename=%s", lock, lock->loc.inode,
+ lock->loc.path, lock->basename);
+
+ ec_entrylk(fop->frame, fop->xl, -1, EC_MINIMUM_ALL, ec_locked, lock,
+ fop->xl->name, &lock->loc, lock->basename, ENTRYLK_LOCK,
+ lock->type, NULL);
+ }
+ else
+ {
+ GF_FREE(name);
+ }
+}
+
+void ec_lock_inode(ec_fop_data_t * fop, loc_t * loc)
+{
+ ec_lock_t * lock;
+
+ if ((fop->parent != NULL) || (fop->error != 0) || (loc->inode == NULL))
+ {
+ return;
+ }
+
+ LOCK(&fop->lock);
+
+ list_for_each_entry(lock, &fop->lock_list, list)
+ {
+ if ((lock->kind == EC_LOCK_INODE) && (lock->loc.inode == loc->inode))
+ {
+ UNLOCK(&fop->lock);
+
+ ec_trace("LOCK_INODELK", fop, "lock=%p, inode=%p. Lock already "
+ "acquired", lock, loc->inode);
+
+ return;
+ }
+ }
+
+ lock = ec_lock_allocate(fop->xl, EC_LOCK_INODE, loc);
+ if (lock != NULL)
+ {
+ lock->flock.l_type = F_WRLCK;
+ lock->flock.l_whence = SEEK_SET;
+
+ if (list_empty(&fop->lock_list))
+ {
+ ec_owner_set(fop->frame, fop->frame->root);
+ }
+ list_add_tail(&lock->list, &fop->lock_list);
+ }
+ else
+ {
+ __ec_fop_set_error(fop, EIO);
+ }
+
+ UNLOCK(&fop->lock);
+
+ if (lock != NULL)
+ {
+ ec_trace("LOCK_INODELK", fop, "lock=%p, inode=%p, owner=%p", lock,
+ lock->loc.inode, fop->frame->root);
+
+ ec_inodelk(fop->frame, fop->xl, -1, EC_MINIMUM_ALL, ec_locked, lock,
+ fop->xl->name, &lock->loc, F_SETLKW, &lock->flock, NULL);
+ }
+}
+
+void ec_lock_fd(ec_fop_data_t * fop, fd_t * fd)
+{
+ loc_t loc;
+
+ if ((fop->parent != NULL) || (fop->error != 0))
+ {
+ return;
+ }
+
+ if (ec_loc_from_fd(fop->xl, &loc, fd))
+ {
+ ec_lock_inode(fop, &loc);
+
+ loc_wipe(&loc);
+ }
+ else
+ {
+ ec_fop_set_error(fop, EIO);
+ }
+}
+
+int32_t ec_unlocked(call_frame_t * frame, void * cookie, xlator_t * this,
+ int32_t op_ret, int32_t op_errno, dict_t * xdata)
+{
+ ec_fop_data_t * fop = cookie;
+
+ if (op_ret < 0)
+ {
+ gf_log(this->name, GF_LOG_WARNING, "entry/inode unlocking failed (%s)",
+ ec_fop_name(fop->parent->id));
+ }
+ else
+ {
+ ec_trace("UNLOCKED", fop->parent, "lock=%p", fop->data);
+ }
+
+ return 0;
+}
+
+void ec_unlock(ec_fop_data_t * fop)
+{
+ ec_lock_t * lock, * item;
+
+ ec_trace("UNLOCK", fop, "");
+
+ list_for_each_entry_safe(lock, item, &fop->lock_list, list)
+ {
+ list_del(&lock->list);
+
+ if (lock->mask != 0)
+ {
+ switch (lock->kind)
+ {
+ case EC_LOCK_ENTRY:
+ ec_trace("UNLOCK_ENTRYLK", fop, "lock=%p, parent=%p, "
+ "path=%s, basename=%s",
+ lock, lock->loc.inode, lock->loc.path,
+ lock->basename);
+
+ ec_entrylk(fop->frame, fop->xl, lock->mask, EC_MINIMUM_ALL,
+ ec_unlocked, lock, fop->xl->name, &lock->loc,
+ lock->basename, ENTRYLK_UNLOCK, lock->type,
+ NULL);
+
+ break;
+
+ case EC_LOCK_INODE:
+ lock->flock.l_type = F_UNLCK;
+ ec_trace("UNLOCK_INODELK", fop, "lock=%p, inode=%p", lock,
+ lock->loc.inode);
+
+ ec_inodelk(fop->frame, fop->xl, lock->mask, EC_MINIMUM_ALL,
+ ec_unlocked, lock, fop->xl->name, &lock->loc,
+ F_SETLK, &lock->flock, NULL);
+
+ break;
+
+ default:
+ gf_log(fop->xl->name, GF_LOG_ERROR, "Invalid lock type");
+ }
+ }
+
+ loc_wipe(&lock->loc);
+
+ GF_FREE(lock);
+ }
+}
+
+int32_t ec_get_size_version_set(call_frame_t * frame, void * cookie,
+ xlator_t * this, int32_t op_ret,
+ int32_t op_errno, inode_t * inode,
+ struct iatt * buf, dict_t * xdata,
+ struct iatt * postparent)
+{
+ ec_fop_data_t * fop = cookie;
+
+ if (op_ret >= 0)
+ {
+ fop->parent->mask &= fop->good;
+ fop->parent->pre_size = fop->parent->post_size = buf->ia_size;
+ }
+ else
+ {
+ gf_log(this->name, GF_LOG_WARNING, "Failed to get size and version "
+ "(error %d)", op_errno);
+ ec_fop_set_error(fop, op_errno);
+ }
+
+ return 0;
+}
+
+void ec_get_size_version(ec_fop_data_t * fop)
+{
+ loc_t loc;
+ dict_t * xdata;
+ uid_t uid;
+ gid_t gid;
+ int32_t error = ENOMEM;
+
+ if (fop->parent != NULL)
+ {
+ fop->pre_size = fop->parent->pre_size;
+ fop->post_size = fop->parent->post_size;
+
+ return;
+ }
+
+ memset(&loc, 0, sizeof(loc));
+
+ xdata = dict_new();
+ if (xdata == NULL)
+ {
+ goto out;
+ }
+ if ((dict_set_uint64(xdata, EC_XATTR_VERSION, 0) != 0) ||
+ (dict_set_uint64(xdata, EC_XATTR_SIZE, 0) != 0))
+ {
+ goto out;
+ }
+
+ uid = fop->frame->root->uid;
+ gid = fop->frame->root->gid;
+
+ fop->frame->root->uid = 0;
+ fop->frame->root->gid = 0;
+
+ error = EIO;
+
+ if (fop->fd == NULL)
+ {
+ if (!ec_loc_from_loc(fop->xl, &loc, &fop->loc[0]))
+ {
+ goto out;
+ }
+ if (uuid_is_null(loc.pargfid))
+ {
+ if (loc.parent != NULL)
+ {
+ inode_unref(loc.parent);
+ loc.parent = NULL;
+ }
+ GF_FREE((char *)loc.path);
+ loc.path = NULL;
+ loc.name = NULL;
+ }
+ }
+ else if (!ec_loc_from_fd(fop->xl, &loc, fop->fd))
+ {
+ goto out;
+ }
+
+ ec_lookup(fop->frame, fop->xl, fop->mask, EC_MINIMUM_MIN,
+ ec_get_size_version_set, NULL, &loc, xdata);
+
+ fop->frame->root->uid = uid;
+ fop->frame->root->gid = gid;
+
+ error = 0;
+
+out:
+ loc_wipe(&loc);
+
+ if (xdata != NULL)
+ {
+ dict_unref(xdata);
+ }
+
+ ec_fop_set_error(fop, error);
+}
+
+int32_t ec_update_size_version_done(call_frame_t * frame, void * cookie,
+ xlator_t * this, int32_t op_ret,
+ int32_t op_errno, dict_t * xattr,
+ dict_t * xdata)
+{
+ ec_fop_data_t * fop = cookie;
+
+ if (op_ret < 0)
+ {
+ gf_log(fop->xl->name, GF_LOG_ERROR, "Failed to update version and "
+ "size (error %d)", op_errno);
+ }
+ else
+ {
+ fop->parent->mask &= fop->good;
+ }
+
+ return 0;
+}
+
+void ec_update_size_version(ec_fop_data_t * fop)
+{
+ dict_t * dict;
+ size_t size;
+ uid_t uid;
+ gid_t gid;
+
+ if (fop->parent != NULL)
+ {
+ fop->parent->post_size = fop->post_size;
+
+ return;
+ }
+
+ dict = dict_new();
+ if (dict == NULL)
+ {
+ goto out;
+ }
+
+ if (ec_dict_set_number(dict, EC_XATTR_VERSION, 1) != 0)
+ {
+ goto out;
+ }
+ size = fop->post_size;
+ if (fop->pre_size != size)
+ {
+ size -= fop->pre_size;
+ if (ec_dict_set_number(dict, EC_XATTR_SIZE, size) != 0)
+ {
+ goto out;
+ }
+ }
+
+ uid = fop->frame->root->uid;
+ gid = fop->frame->root->gid;
+
+ fop->frame->root->uid = 0;
+ fop->frame->root->gid = 0;
+
+ if (fop->fd == NULL)
+ {
+ ec_xattrop(fop->frame, fop->xl, fop->mask, EC_MINIMUM_MIN,
+ ec_update_size_version_done, NULL, &fop->loc[0],
+ GF_XATTROP_ADD_ARRAY64, dict, NULL);
+ }
+ else
+ {
+ ec_fxattrop(fop->frame, fop->xl, fop->mask, EC_MINIMUM_MIN,
+ ec_update_size_version_done, NULL, fop->fd,
+ GF_XATTROP_ADD_ARRAY64, dict, NULL);
+ }
+
+ fop->frame->root->uid = uid;
+ fop->frame->root->gid = gid;
+
+ dict_unref(dict);
+
+ return;
+
+out:
+ if (dict != NULL)
+ {
+ dict_unref(dict);
+ }
+
+ ec_fop_set_error(fop, EIO);
+
+ gf_log(fop->xl->name, GF_LOG_ERROR, "Unable to update version and size");
+}
+
+void __ec_manager(ec_fop_data_t * fop, int32_t error)
+{
+ do
+ {
+ ec_trace("MANAGER", fop, "error=%d", error);
+
+ if (fop->state == EC_STATE_END)
+ {
+ ec_fop_data_release(fop);
+
+ break;
+ }
+
+ if (error != 0)
+ {
+ fop->error = error;
+ fop->state = -fop->state;
+ }
+
+ fop->state = fop->handler(fop, fop->state);
+
+ error = ec_check_complete(fop, __ec_manager);
+ } while (error >= 0);
+}
+
+void ec_manager(ec_fop_data_t * fop, int32_t error)
+{
+ GF_ASSERT(fop->jobs == 0);
+ GF_ASSERT(fop->winds == 0);
+ GF_ASSERT(fop->error == 0);
+
+ if (fop->state == EC_STATE_START)
+ {
+ fop->state = EC_STATE_INIT;
+ }
+
+ __ec_manager(fop, error);
+}
diff --git a/xlators/cluster/ec/src/ec-common.h b/xlators/cluster/ec/src/ec-common.h
new file mode 100644
index 00000000000..83f3ba9637e
--- /dev/null
+++ b/xlators/cluster/ec/src/ec-common.h
@@ -0,0 +1,105 @@
+/*
+ Copyright (c) 2012 DataLab, s.l. <http://www.datalab.es>
+
+ This file is part of the cluster/ec translator for GlusterFS.
+
+ The cluster/ec translator for GlusterFS is free software: you can
+ redistribute it and/or modify it under the terms of the GNU General
+ Public License as published by the Free Software Foundation, either
+ version 3 of the License, or (at your option) any later version.
+
+ The cluster/ec translator for GlusterFS is distributed in the hope
+ that it will be useful, but WITHOUT ANY WARRANTY; without even the
+ implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+ PURPOSE. See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with the cluster/ec translator for GlusterFS. If not, see
+ <http://www.gnu.org/licenses/>.
+*/
+
+#ifndef __EC_COMMON_H__
+#define __EC_COMMON_H__
+
+#include "xlator.h"
+
+#include "ec-data.h"
+
+#define EC_FLAG_UPDATE_LOC_PARENT 0x0001
+#define EC_FLAG_UPDATE_LOC_INODE 0x0002
+#define EC_FLAG_UPDATE_FD 0x0004
+#define EC_FLAG_UPDATE_FD_INODE 0x0008
+
+#define EC_FLAG_WAITING_WINDS 0x0010
+
+#define EC_MINIMUM_ONE -1
+#define EC_MINIMUM_MIN -2
+#define EC_MINIMUM_ALL -3
+
+#define EC_LOCK_ENTRY 0
+#define EC_LOCK_INODE 1
+
+#define EC_STATE_START 0
+#define EC_STATE_END 0
+#define EC_STATE_INIT 1
+#define EC_STATE_LOCK 2
+#define EC_STATE_GET_SIZE_AND_VERSION 3
+#define EC_STATE_DISPATCH 4
+#define EC_STATE_PREPARE_ANSWER 5
+#define EC_STATE_REPORT 6
+#define EC_STATE_UPDATE_SIZE_AND_VERSION 7
+#define EC_STATE_UNLOCK 8
+
+#define EC_STATE_WRITE_START 100
+
+#define EC_STATE_HEAL_ENTRY_LOOKUP 200
+#define EC_STATE_HEAL_ENTRY_PREPARE 201
+#define EC_STATE_HEAL_PRE_INODELK_LOCK 202
+#define EC_STATE_HEAL_PRE_INODE_LOOKUP 203
+#define EC_STATE_HEAL_XATTRIBUTES_REMOVE 204
+#define EC_STATE_HEAL_XATTRIBUTES_SET 205
+#define EC_STATE_HEAL_ATTRIBUTES 206
+#define EC_STATE_HEAL_OPEN 207
+#define EC_STATE_HEAL_REOPEN_FD 208
+#define EC_STATE_HEAL_UNLOCK 209
+#define EC_STATE_HEAL_DATA_LOCK 210
+#define EC_STATE_HEAL_DATA_COPY 211
+#define EC_STATE_HEAL_DATA_UNLOCK 212
+#define EC_STATE_HEAL_POST_INODELK_LOCK 213
+#define EC_STATE_HEAL_POST_INODE_LOOKUP 214
+#define EC_STATE_HEAL_SETATTR 215
+#define EC_STATE_HEAL_POST_INODELK_UNLOCK 216
+#define EC_STATE_HEAL_DISPATCH 217
+
+int32_t ec_dispatch_one_retry(ec_fop_data_t * fop, int32_t idx, int32_t op_ret,
+ int32_t op_errno);
+int32_t ec_dispatch_next(ec_fop_data_t * fop, int32_t idx);
+
+void ec_complete(ec_fop_data_t * fop);
+
+void ec_update_bad(ec_fop_data_t * fop, uintptr_t good);
+
+void ec_fop_set_error(ec_fop_data_t * fop, int32_t error);
+
+void ec_lock_inode(ec_fop_data_t * fop, loc_t * loc);
+void ec_lock_entry(ec_fop_data_t * fop, loc_t * loc);
+void ec_lock_fd(ec_fop_data_t * fop, fd_t * fd);
+
+void ec_unlock(ec_fop_data_t * fop);
+
+void ec_get_size_version(ec_fop_data_t * fop);
+void ec_update_size_version(ec_fop_data_t * fop);
+
+void ec_dispatch_all(ec_fop_data_t * fop);
+void ec_dispatch_inc(ec_fop_data_t * fop);
+void ec_dispatch_min(ec_fop_data_t * fop);
+void ec_dispatch_one(ec_fop_data_t * fop);
+
+void ec_wait_winds(ec_fop_data_t * fop);
+
+void ec_resume_parent(ec_fop_data_t * fop, int32_t error);
+void ec_report(ec_fop_data_t * fop, int32_t error);
+
+void ec_manager(ec_fop_data_t * fop, int32_t error);
+
+#endif /* __EC_COMMON_H__ */
diff --git a/xlators/cluster/ec/src/ec-data.c b/xlators/cluster/ec/src/ec-data.c
new file mode 100644
index 00000000000..0e72fbbd3b6
--- /dev/null
+++ b/xlators/cluster/ec/src/ec-data.c
@@ -0,0 +1,261 @@
+/*
+ Copyright (c) 2012 DataLab, s.l. <http://www.datalab.es>
+
+ This file is part of the cluster/ec translator for GlusterFS.
+
+ The cluster/ec translator for GlusterFS is free software: you can
+ redistribute it and/or modify it under the terms of the GNU General
+ Public License as published by the Free Software Foundation, either
+ version 3 of the License, or (at your option) any later version.
+
+ The cluster/ec translator for GlusterFS is distributed in the hope
+ that it will be useful, but WITHOUT ANY WARRANTY; without even the
+ implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+ PURPOSE. See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with the cluster/ec translator for GlusterFS. If not, see
+ <http://www.gnu.org/licenses/>.
+*/
+
+#include "ec-mem-types.h"
+#include "ec-helpers.h"
+#include "ec-common.h"
+#include "ec-data.h"
+
+ec_cbk_data_t * ec_cbk_data_allocate(call_frame_t * frame, xlator_t * this,
+ ec_fop_data_t * fop, int32_t id,
+ int32_t idx, int32_t op_ret,
+ int32_t op_errno)
+{
+ ec_cbk_data_t * cbk;
+ ec_t * ec = this->private;
+
+ if (fop->xl != this)
+ {
+ gf_log(this->name, GF_LOG_ERROR, "Mismatching xlators between request "
+ "and answer (req=%s, ans=%s).",
+ fop->xl->name, this->name);
+
+ return NULL;
+ }
+ if (fop->frame != frame)
+ {
+ gf_log(this->name, GF_LOG_ERROR, "Mismatching frames between request "
+ "and answer (req=%p, ans=%p).",
+ fop->frame, frame);
+
+ return NULL;
+ }
+ if (fop->id != id)
+ {
+ gf_log(this->name, GF_LOG_ERROR, "Mismatching fops between request "
+ "and answer (req=%d, ans=%d).",
+ fop->id, id);
+
+ return NULL;
+ }
+
+ cbk = mem_get0(ec->cbk_pool);
+ if (cbk == NULL)
+ {
+ gf_log(this->name, GF_LOG_ERROR, "Failed to allocate memory for an "
+ "answer.");
+ }
+
+ cbk->fop = fop;
+ cbk->idx = idx;
+ cbk->mask = 1ULL << idx;
+ cbk->count = 1;
+ cbk->op_ret = op_ret;
+ cbk->op_errno = op_errno;
+
+ LOCK(&fop->lock);
+
+ list_add_tail(&cbk->answer_list, &fop->answer_list);
+
+ UNLOCK(&fop->lock);
+
+ return cbk;
+}
+
+void ec_cbk_data_destroy(ec_cbk_data_t * cbk)
+{
+ if (cbk->xdata != NULL)
+ {
+ dict_unref(cbk->xdata);
+ }
+ if (cbk->dict != NULL)
+ {
+ dict_unref(cbk->dict);
+ }
+ if (cbk->inode != NULL)
+ {
+ inode_unref(cbk->inode);
+ }
+ if (cbk->fd != NULL)
+ {
+ fd_unref(cbk->fd);
+ }
+ if (cbk->buffers != NULL)
+ {
+ iobref_unref(cbk->buffers);
+ }
+ GF_FREE(cbk->vector);
+
+ mem_put(cbk);
+}
+
+ec_fop_data_t * ec_fop_data_allocate(call_frame_t * frame, xlator_t * this,
+ int32_t id, uint32_t flags,
+ uintptr_t target, int32_t minimum,
+ ec_wind_f wind, ec_handler_f handler,
+ ec_cbk_t cbks, void * data)
+{
+ ec_fop_data_t * fop, * parent;
+ ec_t * ec = this->private;
+
+ fop = mem_get0(ec->fop_pool);
+ if (fop == NULL)
+ {
+ gf_log(this->name, GF_LOG_ERROR, "Failed to allocate memory for a "
+ "request.");
+
+ return NULL;
+ }
+
+ fop->xl = this;
+ fop->req_frame = frame;
+
+ /* fops need a private frame to be able to execute some postop operations
+ * even if the original fop has completed and reported back to the upper
+ * xlator and it has destroyed the base frame.
+ *
+ * TODO: minimize usage of private frames. Reuse req_frame as much as
+ * possible.
+ */
+ if (frame != NULL)
+ {
+ fop->frame = copy_frame(frame);
+ }
+ else
+ {
+ fop->frame = create_frame(this, this->ctx->pool);
+ }
+ if (fop->frame == NULL)
+ {
+ gf_log(this->name, GF_LOG_ERROR, "Failed to create a private frame "
+ "for a request");
+
+ mem_put(fop);
+
+ return NULL;
+ }
+ fop->id = id;
+ fop->refs = 1;
+
+ fop->flags = flags;
+ fop->minimum = minimum;
+ fop->mask = target;
+
+ INIT_LIST_HEAD(&fop->lock_list);
+ INIT_LIST_HEAD(&fop->cbk_list);
+ INIT_LIST_HEAD(&fop->answer_list);
+
+ fop->wind = wind;
+ fop->handler = handler;
+ fop->cbks = cbks;
+ fop->data = data;
+
+ LOCK_INIT(&fop->lock);
+
+ fop->frame->local = fop;
+
+ if (frame != NULL)
+ {
+ parent = frame->local;
+ if (parent != NULL)
+ {
+ LOCK(&parent->lock);
+
+ parent->jobs++;
+ parent->refs++;
+
+ UNLOCK(&parent->lock);
+ }
+
+ fop->parent = parent;
+ }
+
+ return fop;
+}
+
+void ec_fop_data_acquire(ec_fop_data_t * fop)
+{
+ LOCK(&fop->lock);
+
+ ec_trace("ACQUIRE", fop, "");
+
+ fop->refs++;
+
+ UNLOCK(&fop->lock);
+}
+
+void ec_fop_data_release(ec_fop_data_t * fop)
+{
+ ec_cbk_data_t * cbk, * tmp;
+ int32_t refs;
+
+ LOCK(&fop->lock);
+
+ ec_trace("RELEASE", fop, "");
+
+ refs = --fop->refs;
+
+ UNLOCK(&fop->lock);
+
+ if (refs == 0)
+ {
+ fop->frame->local = NULL;
+ STACK_DESTROY(fop->frame->root);
+
+ LOCK_DESTROY(&fop->lock);
+
+ if (fop->xdata != NULL)
+ {
+ dict_unref(fop->xdata);
+ }
+ if (fop->dict != NULL)
+ {
+ dict_unref(fop->dict);
+ }
+ if (fop->inode != NULL)
+ {
+ inode_unref(fop->inode);
+ }
+ if (fop->fd != NULL)
+ {
+ fd_unref(fop->fd);
+ }
+ if (fop->buffers != NULL)
+ {
+ iobref_unref(fop->buffers);
+ }
+ GF_FREE(fop->vector);
+ GF_FREE(fop->str[0]);
+ GF_FREE(fop->str[1]);
+ loc_wipe(&fop->loc[0]);
+ loc_wipe(&fop->loc[1]);
+
+ ec_resume_parent(fop, fop->error);
+
+ list_for_each_entry_safe(cbk, tmp, &fop->answer_list, answer_list)
+ {
+ list_del_init(&cbk->answer_list);
+
+ ec_cbk_data_destroy(cbk);
+ }
+
+ mem_put(fop);
+ }
+}
diff --git a/xlators/cluster/ec/src/ec-data.h b/xlators/cluster/ec/src/ec-data.h
new file mode 100644
index 00000000000..e83b6ad74eb
--- /dev/null
+++ b/xlators/cluster/ec/src/ec-data.h
@@ -0,0 +1,260 @@
+/*
+ Copyright (c) 2012 DataLab, s.l. <http://www.datalab.es>
+
+ This file is part of the cluster/ec translator for GlusterFS.
+
+ The cluster/ec translator for GlusterFS is free software: you can
+ redistribute it and/or modify it under the terms of the GNU General
+ Public License as published by the Free Software Foundation, either
+ version 3 of the License, or (at your option) any later version.
+
+ The cluster/ec translator for GlusterFS is distributed in the hope
+ that it will be useful, but WITHOUT ANY WARRANTY; without even the
+ implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+ PURPOSE. See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with the cluster/ec translator for GlusterFS. If not, see
+ <http://www.gnu.org/licenses/>.
+*/
+
+#ifndef __EC_DATA_H__
+#define __EC_DATA_H__
+
+#include "xlator.h"
+
+#include "ec.h"
+
+struct _ec_fd;
+typedef struct _ec_fd ec_fd_t;
+
+struct _ec_inode;
+typedef struct _ec_inode ec_inode_t;
+
+union _ec_cbk;
+typedef union _ec_cbk ec_cbk_t;
+
+struct _ec_lock;
+typedef struct _ec_lock ec_lock_t;
+
+struct _ec_fop_data;
+typedef struct _ec_fop_data ec_fop_data_t;
+
+struct _ec_cbk_data;
+typedef struct _ec_cbk_data ec_cbk_data_t;
+
+struct _ec_heal;
+typedef struct _ec_heal ec_heal_t;
+
+typedef void (* ec_wind_f)(ec_t *, ec_fop_data_t *, int32_t);
+typedef int32_t (* ec_handler_f)(ec_fop_data_t *, int32_t);
+typedef void (* ec_resume_f)(ec_fop_data_t *, int32_t);
+
+struct _ec_fd
+{
+ uintptr_t bad;
+ loc_t loc;
+ uintptr_t open;
+ int32_t flags;
+};
+
+struct _ec_inode
+{
+ uintptr_t bad;
+ ec_heal_t * heal;
+};
+
+typedef int32_t (* fop_heal_cbk_t)(call_frame_t *, void * cookie, xlator_t *,
+ int32_t, int32_t, uintptr_t, uintptr_t,
+ uintptr_t, dict_t *);
+typedef int32_t (* fop_fheal_cbk_t)(call_frame_t *, void * cookie, xlator_t *,
+ int32_t, int32_t, uintptr_t, uintptr_t,
+ uintptr_t, dict_t *);
+
+
+union _ec_cbk
+{
+ fop_access_cbk_t access;
+ fop_create_cbk_t create;
+ fop_discard_cbk_t discard;
+ fop_entrylk_cbk_t entrylk;
+ fop_fentrylk_cbk_t fentrylk;
+ fop_fallocate_cbk_t fallocate;
+ fop_flush_cbk_t flush;
+ fop_fsync_cbk_t fsync;
+ fop_fsyncdir_cbk_t fsyncdir;
+ fop_getxattr_cbk_t getxattr;
+ fop_fgetxattr_cbk_t fgetxattr;
+ fop_heal_cbk_t heal;
+ fop_fheal_cbk_t fheal;
+ fop_inodelk_cbk_t inodelk;
+ fop_finodelk_cbk_t finodelk;
+ fop_link_cbk_t link;
+ fop_lk_cbk_t lk;
+ fop_lookup_cbk_t lookup;
+ fop_mkdir_cbk_t mkdir;
+ fop_mknod_cbk_t mknod;
+ fop_open_cbk_t open;
+ fop_opendir_cbk_t opendir;
+ fop_readdir_cbk_t readdir;
+ fop_readdirp_cbk_t readdirp;
+ fop_readlink_cbk_t readlink;
+ fop_readv_cbk_t readv;
+ fop_removexattr_cbk_t removexattr;
+ fop_fremovexattr_cbk_t fremovexattr;
+ fop_rename_cbk_t rename;
+ fop_rmdir_cbk_t rmdir;
+ fop_setattr_cbk_t setattr;
+ fop_fsetattr_cbk_t fsetattr;
+ fop_setxattr_cbk_t setxattr;
+ fop_fsetxattr_cbk_t fsetxattr;
+ fop_stat_cbk_t stat;
+ fop_fstat_cbk_t fstat;
+ fop_statfs_cbk_t statfs;
+ fop_symlink_cbk_t symlink;
+ fop_truncate_cbk_t truncate;
+ fop_ftruncate_cbk_t ftruncate;
+ fop_unlink_cbk_t unlink;
+ fop_writev_cbk_t writev;
+ fop_xattrop_cbk_t xattrop;
+ fop_fxattrop_cbk_t fxattrop;
+ fop_zerofill_cbk_t zerofill;
+};
+
+struct _ec_lock
+{
+ struct list_head list;
+ uintptr_t mask;
+ int32_t kind;
+ loc_t loc;
+ union
+ {
+ struct
+ {
+ entrylk_type type;
+ char * basename;
+ };
+ struct gf_flock flock;
+ };
+};
+
+struct _ec_fop_data
+{
+ int32_t id;
+ int32_t refs;
+ int32_t state;
+ int32_t minimum;
+ int32_t expected;
+ int32_t winds;
+ int32_t jobs;
+ int32_t error;
+ ec_fop_data_t * parent;
+ xlator_t * xl;
+ call_frame_t * req_frame; // frame of the calling xlator
+ call_frame_t * frame; // frame used by this fop
+ struct list_head lock_list; // list locks held by this fop
+ struct list_head cbk_list; // sorted list of groups of answers
+ struct list_head answer_list; // list of answers
+ ec_cbk_data_t * answer; // accepted answer
+ size_t pre_size;
+ size_t post_size;
+ gf_lock_t lock;
+
+ uint32_t flags;
+ uint32_t first;
+ uintptr_t mask;
+ uintptr_t remaining;
+ uintptr_t good;
+ uintptr_t bad;
+
+ ec_wind_f wind;
+ ec_handler_f handler;
+ ec_resume_f resume;
+ ec_cbk_t cbks;
+ void * data;
+
+ size_t user_size;
+ size_t head;
+
+ dict_t * xdata;
+ dict_t * dict;
+ int32_t int32;
+ uint32_t uint32;
+ size_t size;
+ off_t offset;
+ mode_t mode[2];
+ entrylk_cmd entrylk_cmd;
+ entrylk_type entrylk_type;
+ gf_xattrop_flags_t xattrop_flags;
+ dev_t dev;
+ inode_t * inode;
+ fd_t * fd;
+ struct iatt iatt;
+ char * str[2];
+ loc_t loc[2];
+ struct gf_flock flock;
+ struct iovec * vector;
+ struct iobref * buffers;
+};
+
+struct _ec_cbk_data
+{
+ struct list_head list; // item in the sorted list of groups
+ struct list_head answer_list; // item in the list of answers
+ ec_fop_data_t * fop;
+ ec_cbk_data_t * next; // next answer in the same group
+ int32_t idx;
+ int32_t op_ret;
+ int32_t op_errno;
+ int32_t count;
+ uintptr_t mask;
+
+ dict_t * xdata;
+ dict_t * dict;
+ int32_t int32;
+ uintptr_t uintptr[3];
+ size_t size;
+ uint64_t version;
+ inode_t * inode;
+ fd_t * fd;
+ struct statvfs statvfs;
+ struct iatt iatt[5];
+ struct gf_flock flock;
+ struct iovec * vector;
+ struct iobref * buffers;
+};
+
+struct _ec_heal
+{
+ gf_lock_t lock;
+ xlator_t * xl;
+ ec_fop_data_t * fop;
+ ec_fop_data_t * lookup;
+ loc_t loc;
+ struct iatt iatt;
+ char * symlink;
+ fd_t * fd;
+ int32_t done;
+ uintptr_t available;
+ uintptr_t good;
+ uintptr_t bad;
+ uintptr_t open;
+ off_t offset;
+ size_t size;
+ uint64_t version;
+ size_t raw_size;
+};
+
+ec_cbk_data_t * ec_cbk_data_allocate(call_frame_t * frame, xlator_t * this,
+ ec_fop_data_t * fop, int32_t id,
+ int32_t idx, int32_t op_ret,
+ int32_t op_errno);
+ec_fop_data_t * ec_fop_data_allocate(call_frame_t * frame, xlator_t * this,
+ int32_t id, uint32_t flags,
+ uintptr_t target, int32_t minimum,
+ ec_wind_f wind, ec_handler_f handler,
+ ec_cbk_t cbks, void * data);
+void ec_fop_data_acquire(ec_fop_data_t * fop);
+void ec_fop_data_release(ec_fop_data_t * fop);
+
+#endif /* __EC_DATA_H__ */
diff --git a/xlators/cluster/ec/src/ec-dir-read.c b/xlators/cluster/ec/src/ec-dir-read.c
new file mode 100644
index 00000000000..1e7abc30d91
--- /dev/null
+++ b/xlators/cluster/ec/src/ec-dir-read.c
@@ -0,0 +1,571 @@
+/*
+ Copyright (c) 2012 DataLab, s.l. <http://www.datalab.es>
+
+ This file is part of the cluster/ec translator for GlusterFS.
+
+ The cluster/ec translator for GlusterFS is free software: you can
+ redistribute it and/or modify it under the terms of the GNU General
+ Public License as published by the Free Software Foundation, either
+ version 3 of the License, or (at your option) any later version.
+
+ The cluster/ec translator for GlusterFS is distributed in the hope
+ that it will be useful, but WITHOUT ANY WARRANTY; without even the
+ implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+ PURPOSE. See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with the cluster/ec translator for GlusterFS. If not, see
+ <http://www.gnu.org/licenses/>.
+*/
+
+#include "xlator.h"
+#include "defaults.h"
+
+#include "ec-helpers.h"
+#include "ec-common.h"
+#include "ec-combine.h"
+#include "ec-method.h"
+#include "ec-fops.h"
+
+/* FOP: opendir */
+
+int32_t ec_combine_opendir(ec_fop_data_t * fop, ec_cbk_data_t * dst,
+ ec_cbk_data_t * src)
+{
+ if (dst->fd != src->fd)
+ {
+ gf_log(fop->xl->name, GF_LOG_NOTICE, "Mismatching fd in answers "
+ "of 'GF_FOP_OPENDIR': %p <-> %p",
+ dst->fd, src->fd);
+
+ return 0;
+ }
+
+ return 1;
+}
+
+int32_t ec_opendir_cbk(call_frame_t * frame, void * cookie, xlator_t * this,
+ int32_t op_ret, int32_t op_errno, fd_t * fd,
+ dict_t * xdata)
+{
+ ec_fop_data_t * fop = NULL;
+ ec_cbk_data_t * cbk = NULL;
+ int32_t idx = (int32_t)(uintptr_t)cookie;
+
+ VALIDATE_OR_GOTO(this, out);
+ GF_VALIDATE_OR_GOTO(this->name, frame, out);
+ GF_VALIDATE_OR_GOTO(this->name, frame->local, out);
+ GF_VALIDATE_OR_GOTO(this->name, this->private, out);
+
+ fop = frame->local;
+
+ ec_trace("CBK", fop, "idx=%d, frame=%p, op_ret=%d, op_errno=%d", idx,
+ frame, op_ret, op_errno);
+
+ cbk = ec_cbk_data_allocate(frame, this, fop, GF_FOP_OPENDIR, idx, op_ret,
+ op_errno);
+ if (cbk != NULL)
+ {
+ if (op_ret >= 0)
+ {
+ if (fd != NULL)
+ {
+ cbk->fd = fd_ref(fd);
+ if (cbk->fd == NULL)
+ {
+ gf_log(this->name, GF_LOG_ERROR, "Failed to reference a "
+ "file descriptor.");
+
+ goto out;
+ }
+ }
+ }
+ if (xdata != NULL)
+ {
+ cbk->xdata = dict_ref(xdata);
+ if (cbk->xdata == NULL)
+ {
+ gf_log(this->name, GF_LOG_ERROR, "Failed to reference a "
+ "dictionary.");
+
+ goto out;
+ }
+ }
+
+ ec_combine(cbk, ec_combine_opendir);
+ }
+
+out:
+ if (fop != NULL)
+ {
+ ec_complete(fop);
+ }
+
+ return 0;
+}
+
+void ec_wind_opendir(ec_t * ec, ec_fop_data_t * fop, int32_t idx)
+{
+ ec_trace("WIND", fop, "idx=%d", idx);
+
+ STACK_WIND_COOKIE(fop->frame, ec_opendir_cbk, (void *)(uintptr_t)idx,
+ ec->xl_list[idx], ec->xl_list[idx]->fops->opendir,
+ &fop->loc[0], fop->fd, fop->xdata);
+}
+
+int32_t ec_manager_opendir(ec_fop_data_t * fop, int32_t state)
+{
+ ec_cbk_data_t * cbk;
+
+ switch (state)
+ {
+ case EC_STATE_INIT:
+ case EC_STATE_DISPATCH:
+ ec_dispatch_all(fop);
+
+ return EC_STATE_PREPARE_ANSWER;
+
+ case EC_STATE_PREPARE_ANSWER:
+ cbk = fop->answer;
+ if (cbk != NULL)
+ {
+ if (!ec_dict_combine(cbk, EC_COMBINE_XDATA))
+ {
+ if (cbk->op_ret >= 0)
+ {
+ cbk->op_ret = -1;
+ cbk->op_errno = EIO;
+ }
+ }
+ if (cbk->op_ret < 0)
+ {
+ ec_fop_set_error(fop, cbk->op_errno);
+ }
+ }
+ else
+ {
+ ec_fop_set_error(fop, EIO);
+ }
+
+ return EC_STATE_REPORT;
+
+ case EC_STATE_REPORT:
+ cbk = fop->answer;
+
+ GF_ASSERT(cbk != NULL);
+
+ if (fop->cbks.opendir != NULL)
+ {
+ fop->cbks.opendir(fop->req_frame, fop, fop->xl, cbk->op_ret,
+ cbk->op_errno, cbk->fd, cbk->xdata);
+ }
+
+ return EC_STATE_END;
+
+ case -EC_STATE_DISPATCH:
+ case -EC_STATE_PREPARE_ANSWER:
+ case -EC_STATE_REPORT:
+ GF_ASSERT(fop->error != 0);
+
+ if (fop->cbks.opendir != NULL)
+ {
+ fop->cbks.opendir(fop->req_frame, fop, fop->xl, -1, fop->error,
+ NULL, NULL);
+ }
+
+ return EC_STATE_END;
+
+ default:
+ gf_log(fop->xl->name, GF_LOG_ERROR, "Unhandled state %d for %s",
+ state, ec_fop_name(fop->id));
+
+ return EC_STATE_END;
+ }
+}
+
+void ec_opendir(call_frame_t * frame, xlator_t * this, uintptr_t target,
+ int32_t minimum, fop_opendir_cbk_t func, void * data,
+ loc_t * loc, fd_t * fd, dict_t * xdata)
+{
+ ec_cbk_t callback = { .opendir = func };
+ ec_fop_data_t * fop = NULL;
+ int32_t error = EIO;
+
+ gf_log("ec", GF_LOG_TRACE, "EC(OPENDIR) %p", frame);
+
+ VALIDATE_OR_GOTO(this, out);
+ GF_VALIDATE_OR_GOTO(this->name, frame, out);
+ GF_VALIDATE_OR_GOTO(this->name, this->private, out);
+
+ fop = ec_fop_data_allocate(frame, this, GF_FOP_OPENDIR, EC_FLAG_UPDATE_FD,
+ target, minimum, ec_wind_opendir,
+ ec_manager_opendir, callback, data);
+ if (fop == NULL)
+ {
+ goto out;
+ }
+
+ if (loc != NULL)
+ {
+ if (loc_copy(&fop->loc[0], loc) != 0)
+ {
+ gf_log(this->name, GF_LOG_ERROR, "Failed to copy a location.");
+
+ goto out;
+ }
+ }
+ if (fd != NULL)
+ {
+ fop->fd = fd_ref(fd);
+ if (fop->fd == NULL)
+ {
+ gf_log(this->name, GF_LOG_ERROR, "Failed to reference a "
+ "file descriptor.");
+
+ goto out;
+ }
+ }
+ if (xdata != NULL)
+ {
+ fop->xdata = dict_ref(xdata);
+ if (fop->xdata == NULL)
+ {
+ gf_log(this->name, GF_LOG_ERROR, "Failed to reference a "
+ "dictionary.");
+
+ goto out;
+ }
+ }
+
+ error = 0;
+
+out:
+ if (fop != NULL)
+ {
+ ec_manager(fop, error);
+ }
+ else
+ {
+ func(frame, NULL, this, -1, EIO, NULL, NULL);
+ }
+}
+
+/* FOP: readdir */
+
+void ec_adjust_readdir(ec_t * ec, int32_t idx, gf_dirent_t * entries)
+{
+ gf_dirent_t * entry;
+
+ list_for_each_entry(entry, &entries->list, list)
+ {
+ entry->d_off = ec_itransform(ec, idx, entry->d_off);
+
+ if (entry->d_stat.ia_type == IA_IFREG)
+ {
+ if ((entry->dict == NULL) ||
+ (ec_dict_del_number(entry->dict, EC_XATTR_SIZE,
+ &entry->d_stat.ia_size) != 0))
+ {
+ gf_log(ec->xl->name, GF_LOG_WARNING, "Unable to get exact "
+ "file size.");
+
+ entry->d_stat.ia_size *= ec->fragments;
+ }
+
+ ec_iatt_rebuild(ec, &entry->d_stat, 1, 1);
+ }
+ }
+}
+
+int32_t ec_readdir_cbk(call_frame_t * frame, void * cookie, xlator_t * this,
+ int32_t op_ret, int32_t op_errno, gf_dirent_t * entries,
+ dict_t * xdata)
+{
+ ec_fop_data_t * fop = NULL;
+ int32_t idx = (int32_t)(uintptr_t)cookie;
+
+ VALIDATE_OR_GOTO(this, out);
+ GF_VALIDATE_OR_GOTO(this->name, frame, out);
+ GF_VALIDATE_OR_GOTO(this->name, frame->local, out);
+ GF_VALIDATE_OR_GOTO(this->name, this->private, out);
+
+ fop = frame->local;
+
+ ec_trace("CBK", fop, "idx=%d, frame=%p, op_ret=%d, op_errno=%d", idx,
+ frame, op_ret, op_errno);
+
+ if (op_ret > 0)
+ {
+ ec_adjust_readdir(fop->xl->private, idx, entries);
+ }
+
+ if (!ec_dispatch_one_retry(fop, idx, op_ret, op_errno))
+ {
+ if (fop->cbks.readdir != NULL)
+ {
+ fop->cbks.readdir(fop->req_frame, fop, this, op_ret, op_errno,
+ entries, xdata);
+ }
+ }
+
+out:
+ if (fop != NULL)
+ {
+ ec_complete(fop);
+ }
+
+ return 0;
+}
+
+void ec_wind_readdir(ec_t * ec, ec_fop_data_t * fop, int32_t idx)
+{
+ ec_trace("WIND", fop, "idx=%d", idx);
+
+ STACK_WIND_COOKIE(fop->frame, ec_readdir_cbk, (void *)(uintptr_t)idx,
+ ec->xl_list[idx], ec->xl_list[idx]->fops->readdir,
+ fop->fd, fop->size, fop->offset, fop->xdata);
+}
+
+int32_t ec_manager_readdir(ec_fop_data_t * fop, int32_t state)
+{
+ switch (state)
+ {
+ case EC_STATE_INIT:
+ if (fop->xdata == NULL)
+ {
+ fop->xdata = dict_new();
+ if (fop->xdata == NULL)
+ {
+ gf_log(fop->xl->name, GF_LOG_ERROR, "Unable to prepare "
+ "readdirp request");
+
+ fop->error = EIO;
+
+ return EC_STATE_REPORT;
+ }
+ }
+ if (dict_set_uint64(fop->xdata, EC_XATTR_SIZE, 0) != 0)
+ {
+ gf_log(fop->xl->name, GF_LOG_ERROR, "Unable to prepare "
+ "readdirp request");
+
+ fop->error = EIO;
+
+ return EC_STATE_REPORT;
+ }
+
+ if (fop->offset != 0)
+ {
+ int32_t idx;
+
+ fop->offset = ec_deitransform(fop->xl->private, &idx,
+ fop->offset);
+ fop->mask &= 1ULL << idx;
+ }
+
+ case EC_STATE_DISPATCH:
+ ec_dispatch_one(fop);
+
+ return EC_STATE_REPORT;
+
+ case -EC_STATE_REPORT:
+ if (fop->id == GF_FOP_READDIR)
+ {
+ if (fop->cbks.readdir != NULL)
+ {
+ fop->cbks.readdir(fop->req_frame, fop, fop->xl, -1,
+ fop->error, NULL, NULL);
+ }
+ }
+ else
+ {
+ if (fop->cbks.readdirp != NULL)
+ {
+ fop->cbks.readdirp(fop->req_frame, fop, fop->xl, -1,
+ fop->error, NULL, NULL);
+ }
+ }
+
+ case EC_STATE_REPORT:
+ return EC_STATE_END;
+
+ default:
+ gf_log(fop->xl->name, GF_LOG_ERROR, "Unhandled state %d for %s",
+ state, ec_fop_name(fop->id));
+
+ return EC_STATE_END;
+ }
+}
+
+void ec_readdir(call_frame_t * frame, xlator_t * this, uintptr_t target,
+ int32_t minimum, fop_readdir_cbk_t func, void * data,
+ fd_t * fd, size_t size, off_t offset, dict_t * xdata)
+{
+ ec_cbk_t callback = { .readdir = func };
+ ec_fop_data_t * fop = NULL;
+ int32_t error = EIO;
+
+ gf_log("ec", GF_LOG_TRACE, "EC(READDIR) %p", frame);
+
+ VALIDATE_OR_GOTO(this, out);
+ GF_VALIDATE_OR_GOTO(this->name, frame, out);
+ GF_VALIDATE_OR_GOTO(this->name, this->private, out);
+
+ fop = ec_fop_data_allocate(frame, this, GF_FOP_READDIR, 0, target, minimum,
+ ec_wind_readdir, ec_manager_readdir, callback,
+ data);
+ if (fop == NULL)
+ {
+ goto out;
+ }
+
+ fop->size = size;
+ fop->offset = offset;
+
+ if (fd != NULL)
+ {
+ fop->fd = fd_ref(fd);
+ if (fop->fd == NULL)
+ {
+ gf_log(this->name, GF_LOG_ERROR, "Failed to reference a "
+ "file descriptor.");
+
+ goto out;
+ }
+ }
+ if (xdata != NULL)
+ {
+ fop->xdata = dict_ref(xdata);
+ if (fop->xdata == NULL)
+ {
+ gf_log(this->name, GF_LOG_ERROR, "Failed to reference a "
+ "dictionary.");
+
+ goto out;
+ }
+ }
+
+ error = 0;
+
+out:
+ if (fop != NULL)
+ {
+ ec_manager(fop, error);
+ }
+ else
+ {
+ func(frame, NULL, this, -1, EIO, NULL, NULL);
+ }
+}
+
+/* FOP: readdirp */
+
+int32_t ec_readdirp_cbk(call_frame_t * frame, void * cookie, xlator_t * this,
+ int32_t op_ret, int32_t op_errno,
+ gf_dirent_t * entries, dict_t * xdata)
+{
+ ec_fop_data_t * fop = NULL;
+ int32_t idx = (int32_t)(uintptr_t)cookie;
+
+ VALIDATE_OR_GOTO(this, out);
+ GF_VALIDATE_OR_GOTO(this->name, frame, out);
+ GF_VALIDATE_OR_GOTO(this->name, frame->local, out);
+ GF_VALIDATE_OR_GOTO(this->name, this->private, out);
+
+ fop = frame->local;
+
+ ec_trace("CBK", fop, "idx=%d, frame=%p, op_ret=%d, op_errno=%d", idx,
+ frame, op_ret, op_errno);
+
+ if (op_ret > 0)
+ {
+ ec_adjust_readdir(fop->xl->private, idx, entries);
+ }
+
+ if (!ec_dispatch_one_retry(fop, idx, op_ret, op_errno))
+ {
+ if (fop->cbks.readdirp != NULL)
+ {
+ fop->cbks.readdirp(fop->req_frame, fop, this, op_ret, op_errno,
+ entries, xdata);
+ }
+ }
+
+out:
+ if (fop != NULL)
+ {
+ ec_complete(fop);
+ }
+
+ return 0;
+}
+
+void ec_wind_readdirp(ec_t * ec, ec_fop_data_t * fop, int32_t idx)
+{
+ ec_trace("WIND", fop, "idx=%d", idx);
+
+ STACK_WIND_COOKIE(fop->frame, ec_readdirp_cbk, (void *)(uintptr_t)idx,
+ ec->xl_list[idx], ec->xl_list[idx]->fops->readdirp,
+ fop->fd, fop->size, fop->offset, fop->xdata);
+}
+
+void ec_readdirp(call_frame_t * frame, xlator_t * this, uintptr_t target,
+ int32_t minimum, fop_readdirp_cbk_t func, void * data,
+ fd_t * fd, size_t size, off_t offset, dict_t * xdata)
+{
+ ec_cbk_t callback = { .readdirp = func };
+ ec_fop_data_t * fop = NULL;
+ int32_t error = EIO;
+
+ gf_log("ec", GF_LOG_TRACE, "EC(READDIRP) %p", frame);
+
+ VALIDATE_OR_GOTO(this, out);
+ GF_VALIDATE_OR_GOTO(this->name, frame, out);
+ GF_VALIDATE_OR_GOTO(this->name, this->private, out);
+
+ fop = ec_fop_data_allocate(frame, this, GF_FOP_READDIRP, 0, target,
+ minimum, ec_wind_readdirp, ec_manager_readdir,
+ callback, data);
+ if (fop == NULL)
+ {
+ goto out;
+ }
+
+ fop->size = size;
+ fop->offset = offset;
+
+ if (fd != NULL)
+ {
+ fop->fd = fd_ref(fd);
+ if (fop->fd == NULL)
+ {
+ gf_log(this->name, GF_LOG_ERROR, "Failed to reference a "
+ "file descriptor.");
+
+ goto out;
+ }
+ }
+ if (xdata != NULL)
+ {
+ fop->xdata = dict_ref(xdata);
+ if (fop->xdata == NULL)
+ {
+ gf_log(this->name, GF_LOG_ERROR, "Failed to reference a "
+ "dictionary.");
+
+ goto out;
+ }
+ }
+
+ error = 0;
+
+out:
+ if (fop != NULL)
+ {
+ ec_manager(fop, error);
+ }
+ else
+ {
+ func(frame, NULL, this, -1, EIO, NULL, NULL);
+ }
+}
diff --git a/xlators/cluster/ec/src/ec-dir-write.c b/xlators/cluster/ec/src/ec-dir-write.c
new file mode 100644
index 00000000000..da89e34ba25
--- /dev/null
+++ b/xlators/cluster/ec/src/ec-dir-write.c
@@ -0,0 +1,2102 @@
+/*
+ Copyright (c) 2012 DataLab, s.l. <http://www.datalab.es>
+
+ This file is part of the cluster/ec translator for GlusterFS.
+
+ The cluster/ec translator for GlusterFS is free software: you can
+ redistribute it and/or modify it under the terms of the GNU General
+ Public License as published by the Free Software Foundation, either
+ version 3 of the License, or (at your option) any later version.
+
+ The cluster/ec translator for GlusterFS is distributed in the hope
+ that it will be useful, but WITHOUT ANY WARRANTY; without even the
+ implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+ PURPOSE. See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with the cluster/ec translator for GlusterFS. If not, see
+ <http://www.gnu.org/licenses/>.
+*/
+
+#include "xlator.h"
+#include "defaults.h"
+
+#include "ec-helpers.h"
+#include "ec-common.h"
+#include "ec-combine.h"
+#include "ec-method.h"
+#include "ec-fops.h"
+
+/* FOP: create */
+
+int32_t ec_combine_create(ec_fop_data_t * fop, ec_cbk_data_t * dst,
+ ec_cbk_data_t * src)
+{
+ if (dst->fd != src->fd)
+ {
+ gf_log(fop->xl->name, GF_LOG_NOTICE, "Mismatching fd in answers "
+ "of 'GF_FOP_CREATE': %p <-> %p",
+ dst->fd, src->fd);
+
+ return 0;
+ }
+
+ if (!ec_iatt_combine(dst->iatt, src->iatt, 3))
+ {
+ gf_log(fop->xl->name, GF_LOG_NOTICE, "Mismatching iatt in "
+ "answers of 'GF_FOP_CREATE'");
+
+ return 0;
+ }
+
+ return 1;
+}
+
+int32_t ec_create_cbk(call_frame_t * frame, void * cookie, xlator_t * this,
+ int32_t op_ret, int32_t op_errno, fd_t * fd,
+ inode_t * inode, struct iatt * buf,
+ struct iatt * preparent, struct iatt * postparent,
+ dict_t * xdata)
+{
+ ec_fop_data_t * fop = NULL;
+ ec_cbk_data_t * cbk = NULL;
+ int32_t idx = (int32_t)(uintptr_t)cookie;
+
+ VALIDATE_OR_GOTO(this, out);
+ GF_VALIDATE_OR_GOTO(this->name, frame, out);
+ GF_VALIDATE_OR_GOTO(this->name, frame->local, out);
+ GF_VALIDATE_OR_GOTO(this->name, this->private, out);
+
+ fop = frame->local;
+
+ ec_trace("CBK", fop, "idx=%d, frame=%p, op_ret=%d, op_errno=%d", idx,
+ frame, op_ret, op_errno);
+
+ cbk = ec_cbk_data_allocate(frame, this, fop, GF_FOP_CREATE, idx, op_ret,
+ op_errno);
+ if (cbk != NULL)
+ {
+ if (op_ret >= 0)
+ {
+ if (fd != NULL)
+ {
+ cbk->fd = fd_ref(fd);
+ if (cbk->fd == NULL)
+ {
+ gf_log(this->name, GF_LOG_ERROR, "Failed to reference a "
+ "file descriptor.");
+
+ goto out;
+ }
+ }
+ if (inode != NULL)
+ {
+ cbk->inode = inode_ref(inode);
+ if (cbk->inode == NULL)
+ {
+ gf_log(this->name, GF_LOG_ERROR,
+ "Failed to reference an inode.");
+
+ goto out;
+ }
+ }
+ if (buf != NULL)
+ {
+ cbk->iatt[0] = *buf;
+ }
+ if (preparent != NULL)
+ {
+ cbk->iatt[1] = *preparent;
+ }
+ if (postparent != NULL)
+ {
+ cbk->iatt[2] = *postparent;
+ }
+ }
+ if (xdata != NULL)
+ {
+ cbk->xdata = dict_ref(xdata);
+ if (cbk->xdata == NULL)
+ {
+ gf_log(this->name, GF_LOG_ERROR, "Failed to reference a "
+ "dictionary.");
+
+ goto out;
+ }
+ }
+
+ ec_combine(cbk, ec_combine_create);
+ }
+
+out:
+ if (fop != NULL)
+ {
+ ec_complete(fop);
+ }
+
+ return 0;
+}
+
+void ec_wind_create(ec_t * ec, ec_fop_data_t * fop, int32_t idx)
+{
+ ec_trace("WIND", fop, "idx=%d", idx);
+
+ STACK_WIND_COOKIE(fop->frame, ec_create_cbk, (void *)(uintptr_t)idx,
+ ec->xl_list[idx], ec->xl_list[idx]->fops->create,
+ &fop->loc[0], fop->int32, fop->mode[0], fop->mode[1],
+ fop->fd, fop->xdata);
+}
+
+int32_t ec_manager_create(ec_fop_data_t * fop, int32_t state)
+{
+ ec_cbk_data_t * cbk;
+ ec_fd_t * ctx;
+
+ switch (state)
+ {
+ case EC_STATE_INIT:
+ LOCK(&fop->fd->lock);
+
+ ctx = __ec_fd_get(fop->fd, fop->xl);
+ if ((ctx == NULL) || !ec_loc_from_loc(fop->xl, &ctx->loc,
+ &fop->loc[0]))
+ {
+ UNLOCK(&fop->fd->lock);
+
+ fop->error = EIO;
+
+ return EC_STATE_REPORT;
+ }
+
+ if (ctx->flags == 0)
+ {
+ ctx->flags = fop->int32;
+ }
+
+ UNLOCK(&fop->fd->lock);
+
+ fop->int32 &= ~O_ACCMODE;
+ fop->int32 |= O_RDWR;
+
+ case EC_STATE_LOCK:
+ ec_lock_entry(fop, &fop->loc[0]);
+
+ return EC_STATE_DISPATCH;
+
+ case EC_STATE_DISPATCH:
+ ec_dispatch_all(fop);
+
+ return EC_STATE_PREPARE_ANSWER;
+
+ case EC_STATE_PREPARE_ANSWER:
+ cbk = fop->answer;
+ if (cbk != NULL)
+ {
+ if (!ec_dict_combine(cbk, EC_COMBINE_XDATA))
+ {
+ if (cbk->op_ret >= 0)
+ {
+ cbk->op_ret = -1;
+ cbk->op_errno = EIO;
+ }
+ }
+ if (cbk->op_ret < 0)
+ {
+ ec_fop_set_error(fop, cbk->op_errno);
+ }
+ else
+ {
+ ec_iatt_rebuild(fop->xl->private, cbk->iatt, 3,
+ cbk->count);
+
+ ec_loc_prepare(fop->xl, &fop->loc[0], cbk->inode,
+ &cbk->iatt[0]);
+
+ LOCK(&fop->fd->lock);
+
+ ctx = __ec_fd_get(fop->fd, fop->xl);
+ if (ctx != NULL)
+ {
+ ctx->open |= cbk->mask;
+ }
+
+ UNLOCK(&fop->fd->lock);
+ }
+ }
+ else
+ {
+ ec_fop_set_error(fop, EIO);
+ }
+
+ return EC_STATE_REPORT;
+
+ case EC_STATE_REPORT:
+ cbk = fop->answer;
+
+ GF_ASSERT(cbk != NULL);
+
+ if (fop->cbks.create != NULL)
+ {
+ fop->cbks.create(fop->req_frame, fop, fop->xl, cbk->op_ret,
+ cbk->op_errno, cbk->fd, cbk->inode,
+ &cbk->iatt[0], &cbk->iatt[1], &cbk->iatt[2],
+ cbk->xdata);
+ }
+
+ if (cbk->op_ret >= 0)
+ {
+ return EC_STATE_UPDATE_SIZE_AND_VERSION;
+ }
+ return EC_STATE_UNLOCK;
+
+ case -EC_STATE_LOCK:
+ case -EC_STATE_DISPATCH:
+ case -EC_STATE_PREPARE_ANSWER:
+ case -EC_STATE_REPORT:
+ GF_ASSERT(fop->error != 0);
+
+ if (fop->cbks.create != NULL)
+ {
+ fop->cbks.create(fop->req_frame, fop, fop->xl, -1, fop->error,
+ NULL, NULL, NULL, NULL, NULL, NULL);
+ }
+
+ return EC_STATE_UNLOCK;
+
+ case EC_STATE_UPDATE_SIZE_AND_VERSION:
+ ec_update_size_version(fop);
+
+ return EC_STATE_UNLOCK;
+
+ case -EC_STATE_UPDATE_SIZE_AND_VERSION:
+ case -EC_STATE_UNLOCK:
+ case EC_STATE_UNLOCK:
+ ec_unlock(fop);
+
+ return EC_STATE_END;
+
+ default:
+ gf_log(fop->xl->name, GF_LOG_ERROR, "Unhandled state %d for %s",
+ state, ec_fop_name(fop->id));
+
+ return EC_STATE_END;
+ }
+}
+
+void ec_create(call_frame_t * frame, xlator_t * this, uintptr_t target,
+ int32_t minimum, fop_create_cbk_t func, void * data,
+ loc_t * loc, int32_t flags, mode_t mode, mode_t umask,
+ fd_t * fd, dict_t * xdata)
+{
+ ec_cbk_t callback = { .create = func };
+ ec_fop_data_t * fop = NULL;
+ int32_t error = EIO;
+
+ gf_log("ec", GF_LOG_TRACE, "EC(CREATE) %p", frame);
+
+ VALIDATE_OR_GOTO(this, out);
+ GF_VALIDATE_OR_GOTO(this->name, frame, out);
+ GF_VALIDATE_OR_GOTO(this->name, this->private, out);
+
+ fop = ec_fop_data_allocate(frame, this, GF_FOP_CREATE,
+ EC_FLAG_UPDATE_LOC_PARENT |
+ EC_FLAG_UPDATE_FD_INODE, target, minimum,
+ ec_wind_create, ec_manager_create, callback,
+ data);
+ if (fop == NULL)
+ {
+ goto out;
+ }
+
+ fop->int32 = flags;
+ fop->mode[0] = mode;
+ fop->mode[1] = umask;
+
+ if (loc != NULL)
+ {
+ if (loc_copy(&fop->loc[0], loc) != 0)
+ {
+ gf_log(this->name, GF_LOG_ERROR, "Failed to copy a location.");
+
+ goto out;
+ }
+ }
+ if (fd != NULL)
+ {
+ fop->fd = fd_ref(fd);
+ if (fop->fd == NULL)
+ {
+ gf_log(this->name, GF_LOG_ERROR, "Failed to reference a "
+ "file descriptor.");
+
+ goto out;
+ }
+ }
+ if (xdata != NULL)
+ {
+ fop->xdata = dict_ref(xdata);
+ if (fop->xdata == NULL)
+ {
+ gf_log(this->name, GF_LOG_ERROR, "Failed to reference a "
+ "dictionary.");
+
+ goto out;
+ }
+ }
+
+ error = 0;
+
+out:
+ if (fop != NULL)
+ {
+ ec_manager(fop, error);
+ }
+ else
+ {
+ func(frame, NULL, this, -1, EIO, NULL, NULL, NULL, NULL, NULL, NULL);
+ }
+}
+
+/* FOP: link */
+
+int32_t ec_combine_link(ec_fop_data_t * fop, ec_cbk_data_t * dst,
+ ec_cbk_data_t * src)
+{
+ if (!ec_iatt_combine(dst->iatt, src->iatt, 3))
+ {
+ gf_log(fop->xl->name, GF_LOG_NOTICE, "Mismatching iatt in "
+ "answers of 'GF_FOP_LINK'");
+
+ return 0;
+ }
+
+ return 1;
+}
+
+int32_t ec_link_cbk(call_frame_t * frame, void * cookie, xlator_t * this,
+ int32_t op_ret, int32_t op_errno, inode_t * inode,
+ struct iatt * buf, struct iatt * preparent,
+ struct iatt * postparent, dict_t * xdata)
+{
+ ec_fop_data_t * fop = NULL;
+ ec_cbk_data_t * cbk = NULL;
+ int32_t idx = (int32_t)(uintptr_t)cookie;
+
+ VALIDATE_OR_GOTO(this, out);
+ GF_VALIDATE_OR_GOTO(this->name, frame, out);
+ GF_VALIDATE_OR_GOTO(this->name, frame->local, out);
+ GF_VALIDATE_OR_GOTO(this->name, this->private, out);
+
+ fop = frame->local;
+
+ ec_trace("CBK", fop, "idx=%d, frame=%p, op_ret=%d, op_errno=%d", idx,
+ frame, op_ret, op_errno);
+
+ cbk = ec_cbk_data_allocate(frame, this, fop, GF_FOP_LINK, idx, op_ret,
+ op_errno);
+ if (cbk != NULL)
+ {
+ if (op_ret >= 0)
+ {
+ if (inode != NULL)
+ {
+ cbk->inode = inode_ref(inode);
+ if (cbk->inode == NULL)
+ {
+ gf_log(this->name, GF_LOG_ERROR,
+ "Failed to reference an inode.");
+
+ goto out;
+ }
+ }
+ if (buf != NULL)
+ {
+ cbk->iatt[0] = *buf;
+ }
+ if (preparent != NULL)
+ {
+ cbk->iatt[1] = *preparent;
+ }
+ if (postparent != NULL)
+ {
+ cbk->iatt[2] = *postparent;
+ }
+ }
+ if (xdata != NULL)
+ {
+ cbk->xdata = dict_ref(xdata);
+ if (cbk->xdata == NULL)
+ {
+ gf_log(this->name, GF_LOG_ERROR, "Failed to reference a "
+ "dictionary.");
+
+ goto out;
+ }
+ }
+
+ ec_combine(cbk, ec_combine_link);
+ }
+
+out:
+ if (fop != NULL)
+ {
+ ec_complete(fop);
+ }
+
+ return 0;
+}
+
+void ec_wind_link(ec_t * ec, ec_fop_data_t * fop, int32_t idx)
+{
+ ec_trace("WIND", fop, "idx=%d", idx);
+
+ STACK_WIND_COOKIE(fop->frame, ec_link_cbk, (void *)(uintptr_t)idx,
+ ec->xl_list[idx], ec->xl_list[idx]->fops->link,
+ &fop->loc[0], &fop->loc[1], fop->xdata);
+}
+
+int32_t ec_manager_link(ec_fop_data_t * fop, int32_t state)
+{
+ ec_cbk_data_t * cbk;
+
+ switch (state)
+ {
+ case EC_STATE_INIT:
+ case EC_STATE_LOCK:
+ // Parent entry of fop->loc[0] should be locked, but I don't
+ // receive enough information to do it (fop->loc[0].parent is
+ // NULL).
+ ec_lock_entry(fop, &fop->loc[1]);
+
+ return EC_STATE_GET_SIZE_AND_VERSION;
+
+ case EC_STATE_GET_SIZE_AND_VERSION:
+ ec_get_size_version(fop);
+
+ return EC_STATE_DISPATCH;
+
+ case EC_STATE_DISPATCH:
+ ec_dispatch_all(fop);
+
+ return EC_STATE_PREPARE_ANSWER;
+
+ case EC_STATE_PREPARE_ANSWER:
+ cbk = fop->answer;
+ if (cbk != NULL)
+ {
+ if (!ec_dict_combine(cbk, EC_COMBINE_XDATA))
+ {
+ if (cbk->op_ret >= 0)
+ {
+ cbk->op_ret = -1;
+ cbk->op_errno = EIO;
+ }
+ }
+ if (cbk->op_ret < 0)
+ {
+ ec_fop_set_error(fop, cbk->op_errno);
+ }
+ else
+ {
+ ec_iatt_rebuild(fop->xl->private, cbk->iatt, 3,
+ cbk->count);
+
+ ec_loc_prepare(fop->xl, &fop->loc[0], cbk->inode,
+ &cbk->iatt[0]);
+
+ if (cbk->iatt[0].ia_type == IA_IFREG)
+ {
+ cbk->iatt[0].ia_size = fop->pre_size;
+ }
+ }
+ }
+ else
+ {
+ ec_fop_set_error(fop, EIO);
+ }
+
+ return EC_STATE_REPORT;
+
+ case EC_STATE_REPORT:
+ cbk = fop->answer;
+
+ GF_ASSERT(cbk != NULL);
+
+ if (fop->cbks.link != NULL)
+ {
+ fop->cbks.link(fop->req_frame, fop, fop->xl, cbk->op_ret,
+ cbk->op_errno, cbk->inode, &cbk->iatt[0],
+ &cbk->iatt[1], &cbk->iatt[2], cbk->xdata);
+ }
+
+ return EC_STATE_UNLOCK;
+
+ case -EC_STATE_LOCK:
+ case -EC_STATE_GET_SIZE_AND_VERSION:
+ case -EC_STATE_DISPATCH:
+ case -EC_STATE_PREPARE_ANSWER:
+ case -EC_STATE_REPORT:
+ GF_ASSERT(fop->error != 0);
+
+ if (fop->cbks.link != NULL)
+ {
+ fop->cbks.link(fop->req_frame, fop, fop->xl, -1, fop->error,
+ NULL, NULL, NULL, NULL, NULL);
+ }
+
+ return EC_STATE_UNLOCK;
+
+ case -EC_STATE_UNLOCK:
+ case EC_STATE_UNLOCK:
+ ec_unlock(fop);
+
+ return EC_STATE_END;
+
+ default:
+ gf_log(fop->xl->name, GF_LOG_ERROR, "Unhandled state %d for %s",
+ state, ec_fop_name(fop->id));
+
+ return EC_STATE_END;
+ }
+}
+
+void ec_link(call_frame_t * frame, xlator_t * this, uintptr_t target,
+ int32_t minimum, fop_link_cbk_t func, void * data, loc_t * oldloc,
+ loc_t * newloc, dict_t * xdata)
+{
+ ec_cbk_t callback = { .link = func };
+ ec_fop_data_t * fop = NULL;
+ int32_t error = EIO;
+
+ gf_log("ec", GF_LOG_TRACE, "EC(LINK) %p", frame);
+
+ VALIDATE_OR_GOTO(this, out);
+ GF_VALIDATE_OR_GOTO(this->name, frame, out);
+ GF_VALIDATE_OR_GOTO(this->name, this->private, out);
+
+ fop = ec_fop_data_allocate(frame, this, GF_FOP_LINK, 0, target, minimum,
+ ec_wind_link, ec_manager_link, callback, data);
+ if (fop == NULL)
+ {
+ goto out;
+ }
+
+ if (oldloc != NULL)
+ {
+ if (loc_copy(&fop->loc[0], oldloc) != 0)
+ {
+ gf_log(this->name, GF_LOG_ERROR, "Failed to copy a location.");
+
+ goto out;
+ }
+ }
+ if (newloc != NULL)
+ {
+ if (loc_copy(&fop->loc[1], newloc) != 0)
+ {
+ gf_log(this->name, GF_LOG_ERROR, "Failed to copy a location.");
+
+ goto out;
+ }
+ }
+ if (xdata != NULL)
+ {
+ fop->xdata = dict_ref(xdata);
+ if (fop->xdata == NULL)
+ {
+ gf_log(this->name, GF_LOG_ERROR, "Failed to reference a "
+ "dictionary.");
+
+ goto out;
+ }
+ }
+
+ error = 0;
+
+out:
+ if (fop != NULL)
+ {
+ ec_manager(fop, error);
+ }
+ else
+ {
+ func(frame, NULL, this, -1, EIO, NULL, NULL, NULL, NULL, NULL);
+ }
+}
+
+/* FOP: mkdir */
+
+int32_t ec_combine_mkdir(ec_fop_data_t * fop, ec_cbk_data_t * dst,
+ ec_cbk_data_t * src)
+{
+ if (!ec_iatt_combine(dst->iatt, src->iatt, 3))
+ {
+ gf_log(fop->xl->name, GF_LOG_NOTICE, "Mismatching iatt in "
+ "answers of 'GF_FOP_MKDIR'");
+
+ return 0;
+ }
+
+ return 1;
+}
+
+int32_t ec_mkdir_cbk(call_frame_t * frame, void * cookie, xlator_t * this,
+ int32_t op_ret, int32_t op_errno, inode_t * inode,
+ struct iatt * buf, struct iatt * preparent,
+ struct iatt * postparent, dict_t * xdata)
+{
+ ec_fop_data_t * fop = NULL;
+ ec_cbk_data_t * cbk = NULL;
+ int32_t idx = (int32_t)(uintptr_t)cookie;
+
+ VALIDATE_OR_GOTO(this, out);
+ GF_VALIDATE_OR_GOTO(this->name, frame, out);
+ GF_VALIDATE_OR_GOTO(this->name, frame->local, out);
+ GF_VALIDATE_OR_GOTO(this->name, this->private, out);
+
+ fop = frame->local;
+
+ ec_trace("CBK", fop, "idx=%d, frame=%p, op_ret=%d, op_errno=%d", idx,
+ frame, op_ret, op_errno);
+
+ cbk = ec_cbk_data_allocate(frame, this, fop, GF_FOP_MKDIR, idx, op_ret,
+ op_errno);
+ if (cbk != NULL)
+ {
+ if (op_ret >= 0)
+ {
+ if (inode != NULL)
+ {
+ cbk->inode = inode_ref(inode);
+ if (cbk->inode == NULL)
+ {
+ gf_log(this->name, GF_LOG_ERROR,
+ "Failed to reference an inode.");
+
+ goto out;
+ }
+ }
+ if (buf != NULL)
+ {
+ cbk->iatt[0] = *buf;
+ }
+ if (preparent != NULL)
+ {
+ cbk->iatt[1] = *preparent;
+ }
+ if (postparent != NULL)
+ {
+ cbk->iatt[2] = *postparent;
+ }
+ }
+ if (xdata != NULL)
+ {
+ cbk->xdata = dict_ref(xdata);
+ if (cbk->xdata == NULL)
+ {
+ gf_log(this->name, GF_LOG_ERROR, "Failed to reference a "
+ "dictionary.");
+
+ goto out;
+ }
+ }
+
+ ec_combine(cbk, ec_combine_mkdir);
+ }
+
+out:
+ if (fop != NULL)
+ {
+ ec_complete(fop);
+ }
+
+ return 0;
+}
+
+void ec_wind_mkdir(ec_t * ec, ec_fop_data_t * fop, int32_t idx)
+{
+ ec_trace("WIND", fop, "idx=%d", idx);
+
+ STACK_WIND_COOKIE(fop->frame, ec_mkdir_cbk, (void *)(uintptr_t)idx,
+ ec->xl_list[idx], ec->xl_list[idx]->fops->mkdir,
+ &fop->loc[0], fop->mode[0], fop->mode[1], fop->xdata);
+}
+
+int32_t ec_manager_mkdir(ec_fop_data_t * fop, int32_t state)
+{
+ ec_cbk_data_t * cbk;
+
+ switch (state)
+ {
+ case EC_STATE_INIT:
+ case EC_STATE_LOCK:
+ ec_lock_entry(fop, &fop->loc[0]);
+
+ return EC_STATE_DISPATCH;
+
+ case EC_STATE_DISPATCH:
+ ec_dispatch_all(fop);
+
+ return EC_STATE_PREPARE_ANSWER;
+
+ case EC_STATE_PREPARE_ANSWER:
+ cbk = fop->answer;
+ if (cbk != NULL)
+ {
+ if (!ec_dict_combine(cbk, EC_COMBINE_XDATA))
+ {
+ if (cbk->op_ret >= 0)
+ {
+ cbk->op_ret = -1;
+ cbk->op_errno = EIO;
+ }
+ }
+ if (cbk->op_ret < 0)
+ {
+ ec_fop_set_error(fop, cbk->op_errno);
+ }
+ else
+ {
+ ec_iatt_rebuild(fop->xl->private, cbk->iatt, 3,
+ cbk->count);
+
+ ec_loc_prepare(fop->xl, &fop->loc[0], cbk->inode,
+ &cbk->iatt[0]);
+ }
+ }
+ else
+ {
+ ec_fop_set_error(fop, EIO);
+ }
+
+ return EC_STATE_REPORT;
+
+ case EC_STATE_REPORT:
+ cbk = fop->answer;
+
+ GF_ASSERT(cbk != NULL);
+
+ if (fop->cbks.mkdir != NULL)
+ {
+ fop->cbks.mkdir(fop->req_frame, fop, fop->xl, cbk->op_ret,
+ cbk->op_errno, cbk->inode, &cbk->iatt[0],
+ &cbk->iatt[1], &cbk->iatt[2], cbk->xdata);
+ }
+
+ return EC_STATE_UNLOCK;
+
+ case -EC_STATE_LOCK:
+ case -EC_STATE_DISPATCH:
+ case -EC_STATE_PREPARE_ANSWER:
+ case -EC_STATE_REPORT:
+ GF_ASSERT(fop->error != 0);
+
+ if (fop->cbks.mkdir != NULL)
+ {
+ fop->cbks.mkdir(fop->req_frame, fop, fop->xl, -1, fop->error,
+ NULL, NULL, NULL, NULL, NULL);
+ }
+
+ return EC_STATE_UNLOCK;
+
+ case -EC_STATE_UNLOCK:
+ case EC_STATE_UNLOCK:
+ ec_unlock(fop);
+
+ return EC_STATE_END;
+
+ default:
+ gf_log(fop->xl->name, GF_LOG_ERROR, "Unhandled state %d for %s",
+ state, ec_fop_name(fop->id));
+
+ return EC_STATE_END;
+ }
+}
+
+void ec_mkdir(call_frame_t * frame, xlator_t * this, uintptr_t target,
+ int32_t minimum, fop_mkdir_cbk_t func, void * data, loc_t * loc,
+ mode_t mode, mode_t umask, dict_t * xdata)
+{
+ ec_cbk_t callback = { .mkdir = func };
+ ec_fop_data_t * fop = NULL;
+ int32_t error = EIO;
+
+ gf_log("ec", GF_LOG_TRACE, "EC(MKDIR) %p", frame);
+
+ VALIDATE_OR_GOTO(this, out);
+ GF_VALIDATE_OR_GOTO(this->name, frame, out);
+ GF_VALIDATE_OR_GOTO(this->name, this->private, out);
+
+ fop = ec_fop_data_allocate(frame, this, GF_FOP_MKDIR,
+ EC_FLAG_UPDATE_LOC_PARENT, target, minimum,
+ ec_wind_mkdir, ec_manager_mkdir, callback,
+ data);
+ if (fop == NULL)
+ {
+ goto out;
+ }
+
+ fop->mode[0] = mode;
+ fop->mode[1] = umask;
+
+ if (loc != NULL)
+ {
+ if (loc_copy(&fop->loc[0], loc) != 0)
+ {
+ gf_log(this->name, GF_LOG_ERROR, "Failed to copy a location.");
+
+ goto out;
+ }
+ }
+ if (xdata != NULL)
+ {
+ fop->xdata = dict_ref(xdata);
+ if (fop->xdata == NULL)
+ {
+ gf_log(this->name, GF_LOG_ERROR, "Failed to reference a "
+ "dictionary.");
+
+ goto out;
+ }
+ }
+
+ error = 0;
+
+out:
+ if (fop != NULL)
+ {
+ ec_manager(fop, error);
+ }
+ else
+ {
+ func(frame, NULL, this, -1, EIO, NULL, NULL, NULL, NULL, NULL);
+ }
+}
+
+/* FOP: mknod */
+
+int32_t ec_combine_mknod(ec_fop_data_t * fop, ec_cbk_data_t * dst,
+ ec_cbk_data_t * src)
+{
+ if (!ec_iatt_combine(dst->iatt, src->iatt, 3))
+ {
+ gf_log(fop->xl->name, GF_LOG_NOTICE, "Mismatching iatt in "
+ "answers of 'GF_FOP_MKNOD'");
+
+ return 0;
+ }
+
+ return 1;
+}
+
+int32_t ec_mknod_cbk(call_frame_t * frame, void * cookie, xlator_t * this,
+ int32_t op_ret, int32_t op_errno, inode_t * inode,
+ struct iatt * buf, struct iatt * preparent,
+ struct iatt * postparent, dict_t * xdata)
+{
+ ec_fop_data_t * fop = NULL;
+ ec_cbk_data_t * cbk = NULL;
+ int32_t idx = (int32_t)(uintptr_t)cookie;
+
+ VALIDATE_OR_GOTO(this, out);
+ GF_VALIDATE_OR_GOTO(this->name, frame, out);
+ GF_VALIDATE_OR_GOTO(this->name, frame->local, out);
+ GF_VALIDATE_OR_GOTO(this->name, this->private, out);
+
+ fop = frame->local;
+
+ ec_trace("CBK", fop, "idx=%d, frame=%p, op_ret=%d, op_errno=%d", idx,
+ frame, op_ret, op_errno);
+
+ cbk = ec_cbk_data_allocate(frame, this, fop, GF_FOP_MKNOD, idx, op_ret,
+ op_errno);
+ if (cbk != NULL)
+ {
+ if (op_ret >= 0)
+ {
+ if (inode != NULL)
+ {
+ cbk->inode = inode_ref(inode);
+ if (cbk->inode == NULL)
+ {
+ gf_log(this->name, GF_LOG_ERROR,
+ "Failed to reference an inode.");
+
+ goto out;
+ }
+ }
+ if (buf != NULL)
+ {
+ cbk->iatt[0] = *buf;
+ }
+ if (preparent != NULL)
+ {
+ cbk->iatt[1] = *preparent;
+ }
+ if (postparent != NULL)
+ {
+ cbk->iatt[2] = *postparent;
+ }
+ }
+ if (xdata != NULL)
+ {
+ cbk->xdata = dict_ref(xdata);
+ if (cbk->xdata == NULL)
+ {
+ gf_log(this->name, GF_LOG_ERROR, "Failed to reference a "
+ "dictionary.");
+
+ goto out;
+ }
+ }
+
+ ec_combine(cbk, ec_combine_mknod);
+ }
+
+out:
+ if (fop != NULL)
+ {
+ ec_complete(fop);
+ }
+
+ return 0;
+}
+
+void ec_wind_mknod(ec_t * ec, ec_fop_data_t * fop, int32_t idx)
+{
+ ec_trace("WIND", fop, "idx=%d", idx);
+
+ STACK_WIND_COOKIE(fop->frame, ec_mknod_cbk, (void *)(uintptr_t)idx,
+ ec->xl_list[idx], ec->xl_list[idx]->fops->mknod,
+ &fop->loc[0], fop->mode[0], fop->dev, fop->mode[1],
+ fop->xdata);
+}
+
+int32_t ec_manager_mknod(ec_fop_data_t * fop, int32_t state)
+{
+ ec_cbk_data_t * cbk;
+
+ switch (state)
+ {
+ case EC_STATE_INIT:
+ case EC_STATE_LOCK:
+ ec_lock_entry(fop, &fop->loc[0]);
+
+ return EC_STATE_DISPATCH;
+
+ case EC_STATE_DISPATCH:
+ ec_dispatch_all(fop);
+
+ return EC_STATE_PREPARE_ANSWER;
+
+ case EC_STATE_PREPARE_ANSWER:
+ cbk = fop->answer;
+ if (cbk != NULL)
+ {
+ if (!ec_dict_combine(cbk, EC_COMBINE_XDATA))
+ {
+ if (cbk->op_ret >= 0)
+ {
+ cbk->op_ret = -1;
+ cbk->op_errno = EIO;
+ }
+ }
+ if (cbk->op_ret < 0)
+ {
+ ec_fop_set_error(fop, cbk->op_errno);
+ }
+ else
+ {
+ ec_iatt_rebuild(fop->xl->private, cbk->iatt, 3,
+ cbk->count);
+
+ ec_loc_prepare(fop->xl, &fop->loc[0], cbk->inode,
+ &cbk->iatt[0]);
+ }
+ }
+ else
+ {
+ ec_fop_set_error(fop, EIO);
+ }
+
+ return EC_STATE_REPORT;
+
+ case EC_STATE_REPORT:
+ cbk = fop->answer;
+
+ GF_ASSERT(cbk != NULL);
+
+ if (fop->cbks.mknod != NULL)
+ {
+ fop->cbks.mknod(fop->req_frame, fop, fop->xl, cbk->op_ret,
+ cbk->op_errno, cbk->inode, &cbk->iatt[0],
+ &cbk->iatt[1], &cbk->iatt[2], cbk->xdata);
+ }
+
+ return EC_STATE_UNLOCK;
+
+ case -EC_STATE_LOCK:
+ case -EC_STATE_DISPATCH:
+ case -EC_STATE_PREPARE_ANSWER:
+ case -EC_STATE_REPORT:
+ GF_ASSERT(fop->error != 0);
+
+ if (fop->cbks.mknod != NULL)
+ {
+ fop->cbks.mknod(fop->req_frame, fop, fop->xl, -1, fop->error,
+ NULL, NULL, NULL, NULL, NULL);
+ }
+
+ return EC_STATE_UNLOCK;
+
+ case -EC_STATE_UNLOCK:
+ case EC_STATE_UNLOCK:
+ ec_unlock(fop);
+
+ return EC_STATE_END;
+
+ default:
+ gf_log(fop->xl->name, GF_LOG_ERROR, "Unhandled state %d for %s",
+ state, ec_fop_name(fop->id));
+
+ return EC_STATE_END;
+ }
+}
+
+void ec_mknod(call_frame_t * frame, xlator_t * this, uintptr_t target,
+ int32_t minimum, fop_mknod_cbk_t func, void * data, loc_t * loc,
+ mode_t mode, dev_t rdev, mode_t umask, dict_t * xdata)
+{
+ ec_cbk_t callback = { .mknod = func };
+ ec_fop_data_t * fop = NULL;
+ int32_t error = EIO;
+
+ gf_log("ec", GF_LOG_TRACE, "EC(MKNOD) %p", frame);
+
+ VALIDATE_OR_GOTO(this, out);
+ GF_VALIDATE_OR_GOTO(this->name, frame, out);
+ GF_VALIDATE_OR_GOTO(this->name, this->private, out);
+
+ fop = ec_fop_data_allocate(frame, this, GF_FOP_MKNOD,
+ EC_FLAG_UPDATE_LOC_PARENT, target, minimum,
+ ec_wind_mknod, ec_manager_mknod, callback,
+ data);
+ if (fop == NULL)
+ {
+ goto out;
+ }
+
+ fop->mode[0] = mode;
+ fop->dev = rdev;
+ fop->mode[1] = umask;
+
+ if (loc != NULL)
+ {
+ if (loc_copy(&fop->loc[0], loc) != 0)
+ {
+ gf_log(this->name, GF_LOG_ERROR, "Failed to copy a location.");
+
+ goto out;
+ }
+ }
+ if (xdata != NULL)
+ {
+ fop->xdata = dict_ref(xdata);
+ if (fop->xdata == NULL)
+ {
+ gf_log(this->name, GF_LOG_ERROR, "Failed to reference a "
+ "dictionary.");
+
+ goto out;
+ }
+ }
+
+ error = 0;
+
+out:
+ if (fop != NULL)
+ {
+ ec_manager(fop, error);
+ }
+ else
+ {
+ func(frame, NULL, this, -1, EIO, NULL, NULL, NULL, NULL, NULL);
+ }
+}
+
+/* FOP: rename */
+
+int32_t ec_combine_rename(ec_fop_data_t * fop, ec_cbk_data_t * dst,
+ ec_cbk_data_t * src)
+{
+ if (!ec_iatt_combine(dst->iatt, src->iatt, 5))
+ {
+ gf_log(fop->xl->name, GF_LOG_NOTICE, "Mismatching iatt in "
+ "answers of 'GF_FOP_RENAME'");
+
+ return 0;
+ }
+
+ return 1;
+}
+
+int32_t ec_rename_cbk(call_frame_t * frame, void * cookie, xlator_t * this,
+ int32_t op_ret, int32_t op_errno, struct iatt * buf,
+ struct iatt * preoldparent, struct iatt * postoldparent,
+ struct iatt * prenewparent, struct iatt * postnewparent,
+ dict_t * xdata)
+{
+ ec_fop_data_t * fop = NULL;
+ ec_cbk_data_t * cbk = NULL;
+ int32_t idx = (int32_t)(uintptr_t)cookie;
+
+ VALIDATE_OR_GOTO(this, out);
+ GF_VALIDATE_OR_GOTO(this->name, frame, out);
+ GF_VALIDATE_OR_GOTO(this->name, frame->local, out);
+ GF_VALIDATE_OR_GOTO(this->name, this->private, out);
+
+ fop = frame->local;
+
+ ec_trace("CBK", fop, "idx=%d, frame=%p, op_ret=%d, op_errno=%d", idx,
+ frame, op_ret, op_errno);
+
+ cbk = ec_cbk_data_allocate(frame, this, fop, GF_FOP_RENAME, idx, op_ret,
+ op_errno);
+ if (cbk != NULL)
+ {
+ if (op_ret >= 0)
+ {
+ if (buf != NULL)
+ {
+ cbk->iatt[0] = *buf;
+ }
+ if (preoldparent != NULL)
+ {
+ cbk->iatt[1] = *preoldparent;
+ }
+ if (postoldparent != NULL)
+ {
+ cbk->iatt[2] = *postoldparent;
+ }
+ if (prenewparent != NULL)
+ {
+ cbk->iatt[3] = *prenewparent;
+ }
+ if (postnewparent != NULL)
+ {
+ cbk->iatt[4] = *postnewparent;
+ }
+ }
+ if (xdata != NULL)
+ {
+ cbk->xdata = dict_ref(xdata);
+ if (cbk->xdata == NULL)
+ {
+ gf_log(this->name, GF_LOG_ERROR, "Failed to reference a "
+ "dictionary.");
+
+ goto out;
+ }
+ }
+
+ ec_combine(cbk, ec_combine_rename);
+ }
+
+out:
+ if (fop != NULL)
+ {
+ ec_complete(fop);
+ }
+
+ return 0;
+}
+
+void ec_wind_rename(ec_t * ec, ec_fop_data_t * fop, int32_t idx)
+{
+ ec_trace("WIND", fop, "idx=%d", idx);
+
+ STACK_WIND_COOKIE(fop->frame, ec_rename_cbk, (void *)(uintptr_t)idx,
+ ec->xl_list[idx], ec->xl_list[idx]->fops->rename,
+ &fop->loc[0], &fop->loc[1], fop->xdata);
+}
+
+int32_t ec_manager_rename(ec_fop_data_t * fop, int32_t state)
+{
+ ec_cbk_data_t * cbk;
+
+ switch (state)
+ {
+ case EC_STATE_INIT:
+ case EC_STATE_LOCK:
+ ec_lock_entry(fop, &fop->loc[0]);
+ ec_lock_entry(fop, &fop->loc[1]);
+
+ return EC_STATE_GET_SIZE_AND_VERSION;
+
+ case EC_STATE_GET_SIZE_AND_VERSION:
+ ec_get_size_version(fop);
+
+ return EC_STATE_DISPATCH;
+
+ case EC_STATE_DISPATCH:
+ ec_dispatch_all(fop);
+
+ return EC_STATE_PREPARE_ANSWER;
+
+ case EC_STATE_PREPARE_ANSWER:
+ cbk = fop->answer;
+ if (cbk != NULL)
+ {
+ if (!ec_dict_combine(cbk, EC_COMBINE_XDATA))
+ {
+ if (cbk->op_ret >= 0)
+ {
+ cbk->op_ret = -1;
+ cbk->op_errno = EIO;
+ }
+ }
+ if (cbk->op_ret < 0)
+ {
+ ec_fop_set_error(fop, cbk->op_errno);
+ }
+ else
+ {
+ ec_iatt_rebuild(fop->xl->private, cbk->iatt, 5,
+ cbk->count);
+
+ if (cbk->iatt[0].ia_type == IA_IFREG)
+ {
+ cbk->iatt[0].ia_size = fop->pre_size;
+ }
+ }
+ }
+ else
+ {
+ ec_fop_set_error(fop, EIO);
+ }
+
+ return EC_STATE_REPORT;
+
+ case EC_STATE_REPORT:
+ cbk = fop->answer;
+
+ GF_ASSERT(cbk != NULL);
+
+ if (fop->cbks.rename != NULL)
+ {
+ fop->cbks.rename(fop->req_frame, fop, fop->xl, cbk->op_ret,
+ cbk->op_errno, &cbk->iatt[0], &cbk->iatt[1],
+ &cbk->iatt[2], &cbk->iatt[3], &cbk->iatt[4],
+ cbk->xdata);
+ }
+
+ return EC_STATE_UNLOCK;
+
+ case -EC_STATE_LOCK:
+ case -EC_STATE_GET_SIZE_AND_VERSION:
+ case -EC_STATE_DISPATCH:
+ case -EC_STATE_PREPARE_ANSWER:
+ case -EC_STATE_REPORT:
+ GF_ASSERT(fop->error != 0);
+
+ if (fop->cbks.rename != NULL)
+ {
+ fop->cbks.rename(fop->req_frame, fop, fop->xl, -1, fop->error,
+ NULL, NULL, NULL, NULL, NULL, NULL);
+ }
+
+ return EC_STATE_UNLOCK;
+
+ case -EC_STATE_UNLOCK:
+ case EC_STATE_UNLOCK:
+ ec_unlock(fop);
+
+ return EC_STATE_END;
+
+ default:
+ gf_log(fop->xl->name, GF_LOG_ERROR, "Unhandled state %d for %s",
+ state, ec_fop_name(fop->id));
+
+ return EC_STATE_END;
+ }
+}
+
+void ec_rename(call_frame_t * frame, xlator_t * this, uintptr_t target,
+ int32_t minimum, fop_rename_cbk_t func, void * data,
+ loc_t * oldloc, loc_t * newloc, dict_t * xdata)
+{
+ ec_cbk_t callback = { .rename = func };
+ ec_fop_data_t * fop = NULL;
+ int32_t error = EIO;
+
+ gf_log("ec", GF_LOG_TRACE, "EC(RENAME) %p", frame);
+
+ VALIDATE_OR_GOTO(this, out);
+ GF_VALIDATE_OR_GOTO(this->name, frame, out);
+ GF_VALIDATE_OR_GOTO(this->name, this->private, out);
+
+ fop = ec_fop_data_allocate(frame, this, GF_FOP_RENAME,
+ EC_FLAG_UPDATE_LOC_PARENT, target, minimum,
+ ec_wind_rename, ec_manager_rename, callback,
+ data);
+ if (fop == NULL)
+ {
+ goto out;
+ }
+
+ if (oldloc != NULL)
+ {
+ if (loc_copy(&fop->loc[0], oldloc) != 0)
+ {
+ gf_log(this->name, GF_LOG_ERROR, "Failed to copy a location.");
+
+ goto out;
+ }
+ }
+ if (newloc != NULL)
+ {
+ if (loc_copy(&fop->loc[1], newloc) != 0)
+ {
+ gf_log(this->name, GF_LOG_ERROR, "Failed to copy a location.");
+
+ goto out;
+ }
+ }
+ if (xdata != NULL)
+ {
+ fop->xdata = dict_ref(xdata);
+ if (fop->xdata == NULL)
+ {
+ gf_log(this->name, GF_LOG_ERROR, "Failed to reference a "
+ "dictionary.");
+
+ goto out;
+ }
+ }
+
+ error = 0;
+
+out:
+ if (fop != NULL)
+ {
+ ec_manager(fop, error);
+ }
+ else
+ {
+ func(frame, NULL, this, -1, EIO, NULL, NULL, NULL, NULL, NULL, NULL);
+ }
+}
+
+/* FOP: rmdir */
+
+int32_t ec_combine_rmdir(ec_fop_data_t * fop, ec_cbk_data_t * dst,
+ ec_cbk_data_t * src)
+{
+ if (!ec_iatt_combine(dst->iatt, src->iatt, 2))
+ {
+ gf_log(fop->xl->name, GF_LOG_NOTICE, "Mismatching iatt in "
+ "answers of 'GF_FOP_RMDIR'");
+
+ return 0;
+ }
+
+ return 1;
+}
+
+int32_t ec_rmdir_cbk(call_frame_t * frame, void * cookie, xlator_t * this,
+ int32_t op_ret, int32_t op_errno, struct iatt * preparent,
+ struct iatt * postparent, dict_t * xdata)
+{
+ ec_fop_data_t * fop = NULL;
+ ec_cbk_data_t * cbk = NULL;
+ int32_t idx = (int32_t)(uintptr_t)cookie;
+
+ VALIDATE_OR_GOTO(this, out);
+ GF_VALIDATE_OR_GOTO(this->name, frame, out);
+ GF_VALIDATE_OR_GOTO(this->name, frame->local, out);
+ GF_VALIDATE_OR_GOTO(this->name, this->private, out);
+
+ fop = frame->local;
+
+ ec_trace("CBK", fop, "idx=%d, frame=%p, op_ret=%d, op_errno=%d", idx,
+ frame, op_ret, op_errno);
+
+ cbk = ec_cbk_data_allocate(frame, this, fop, GF_FOP_RMDIR, idx, op_ret,
+ op_errno);
+ if (cbk != NULL)
+ {
+ if (op_ret >= 0)
+ {
+ if (preparent != NULL)
+ {
+ cbk->iatt[0] = *preparent;
+ }
+ if (postparent != NULL)
+ {
+ cbk->iatt[1] = *postparent;
+ }
+ }
+ if (xdata != NULL)
+ {
+ cbk->xdata = dict_ref(xdata);
+ if (cbk->xdata == NULL)
+ {
+ gf_log(this->name, GF_LOG_ERROR, "Failed to reference a "
+ "dictionary.");
+
+ goto out;
+ }
+ }
+
+ ec_combine(cbk, ec_combine_rmdir);
+ }
+
+out:
+ if (fop != NULL)
+ {
+ ec_complete(fop);
+ }
+
+ return 0;
+}
+
+void ec_wind_rmdir(ec_t * ec, ec_fop_data_t * fop, int32_t idx)
+{
+ ec_trace("WIND", fop, "idx=%d", idx);
+
+ STACK_WIND_COOKIE(fop->frame, ec_rmdir_cbk, (void *)(uintptr_t)idx,
+ ec->xl_list[idx], ec->xl_list[idx]->fops->rmdir,
+ &fop->loc[0], fop->int32, fop->xdata);
+}
+
+int32_t ec_manager_rmdir(ec_fop_data_t * fop, int32_t state)
+{
+ ec_cbk_data_t * cbk;
+
+ switch (state)
+ {
+ case EC_STATE_INIT:
+ case EC_STATE_LOCK:
+ ec_lock_entry(fop, &fop->loc[0]);
+
+ return EC_STATE_DISPATCH;
+
+ case EC_STATE_DISPATCH:
+ ec_dispatch_all(fop);
+
+ return EC_STATE_PREPARE_ANSWER;
+
+ case EC_STATE_PREPARE_ANSWER:
+ cbk = fop->answer;
+ if (cbk != NULL)
+ {
+ if (!ec_dict_combine(cbk, EC_COMBINE_XDATA))
+ {
+ if (cbk->op_ret >= 0)
+ {
+ cbk->op_ret = -1;
+ cbk->op_errno = EIO;
+ }
+ }
+ if (cbk->op_ret < 0)
+ {
+ ec_fop_set_error(fop, cbk->op_errno);
+ }
+ }
+ else
+ {
+ ec_fop_set_error(fop, EIO);
+ }
+
+ return EC_STATE_REPORT;
+
+ case EC_STATE_REPORT:
+ cbk = fop->answer;
+
+ GF_ASSERT(cbk != NULL);
+
+ if (fop->cbks.rmdir != NULL)
+ {
+ fop->cbks.rmdir(fop->req_frame, fop, fop->xl, cbk->op_ret,
+ cbk->op_errno, &cbk->iatt[0], &cbk->iatt[1],
+ cbk->xdata);
+ }
+
+ return EC_STATE_UNLOCK;
+
+ case -EC_STATE_LOCK:
+ case -EC_STATE_DISPATCH:
+ case -EC_STATE_PREPARE_ANSWER:
+ case -EC_STATE_REPORT:
+ GF_ASSERT(fop->error != 0);
+
+ if (fop->cbks.rmdir != NULL)
+ {
+ fop->cbks.rmdir(fop->req_frame, fop, fop->xl, -1, fop->error,
+ NULL, NULL, NULL);
+ }
+
+ return EC_STATE_UNLOCK;
+
+ case -EC_STATE_UNLOCK:
+ case EC_STATE_UNLOCK:
+ ec_unlock(fop);
+
+ return EC_STATE_END;
+
+ default:
+ gf_log(fop->xl->name, GF_LOG_ERROR, "Unhandled state %d for %s",
+ state, ec_fop_name(fop->id));
+
+ return EC_STATE_END;
+ }
+}
+
+void ec_rmdir(call_frame_t * frame, xlator_t * this, uintptr_t target,
+ int32_t minimum, fop_rmdir_cbk_t func, void * data, loc_t * loc,
+ int xflags, dict_t * xdata)
+{
+ ec_cbk_t callback = { .rmdir = func };
+ ec_fop_data_t * fop = NULL;
+ int32_t error = EIO;
+
+ gf_log("ec", GF_LOG_TRACE, "EC(RMDIR) %p", frame);
+
+ VALIDATE_OR_GOTO(this, out);
+ GF_VALIDATE_OR_GOTO(this->name, frame, out);
+ GF_VALIDATE_OR_GOTO(this->name, this->private, out);
+
+ fop = ec_fop_data_allocate(frame, this, GF_FOP_RMDIR,
+ EC_FLAG_UPDATE_LOC_PARENT, target, minimum,
+ ec_wind_rmdir, ec_manager_rmdir, callback,
+ data);
+ if (fop == NULL)
+ {
+ goto out;
+ }
+
+ fop->int32 = xflags;
+
+ if (loc != NULL)
+ {
+ if (loc_copy(&fop->loc[0], loc) != 0)
+ {
+ gf_log(this->name, GF_LOG_ERROR, "Failed to copy a location.");
+
+ goto out;
+ }
+ }
+ if (xdata != NULL)
+ {
+ fop->xdata = dict_ref(xdata);
+ if (fop->xdata == NULL)
+ {
+ gf_log(this->name, GF_LOG_ERROR, "Failed to reference a "
+ "dictionary.");
+
+ goto out;
+ }
+ }
+
+ error = 0;
+
+out:
+ if (fop != NULL)
+ {
+ ec_manager(fop, error);
+ }
+ else
+ {
+ func(frame, NULL, this, -1, EIO, NULL, NULL, NULL);
+ }
+}
+
+/* FOP: symlink */
+
+int32_t ec_combine_symlink(ec_fop_data_t * fop, ec_cbk_data_t * dst,
+ ec_cbk_data_t * src)
+{
+ if (!ec_iatt_combine(dst->iatt, src->iatt, 3))
+ {
+ gf_log(fop->xl->name, GF_LOG_NOTICE, "Mismatching iatt in "
+ "answers of 'GF_FOP_SYMLINK'");
+
+ return 0;
+ }
+
+ return 1;
+}
+
+int32_t ec_symlink_cbk(call_frame_t * frame, void * cookie, xlator_t * this,
+ int32_t op_ret, int32_t op_errno, inode_t * inode,
+ struct iatt * buf, struct iatt * preparent,
+ struct iatt * postparent, dict_t * xdata)
+{
+ ec_fop_data_t * fop = NULL;
+ ec_cbk_data_t * cbk = NULL;
+ int32_t idx = (int32_t)(uintptr_t)cookie;
+
+ VALIDATE_OR_GOTO(this, out);
+ GF_VALIDATE_OR_GOTO(this->name, frame, out);
+ GF_VALIDATE_OR_GOTO(this->name, frame->local, out);
+ GF_VALIDATE_OR_GOTO(this->name, this->private, out);
+
+ fop = frame->local;
+
+ ec_trace("CBK", fop, "idx=%d, frame=%p, op_ret=%d, op_errno=%d", idx,
+ frame, op_ret, op_errno);
+
+ cbk = ec_cbk_data_allocate(frame, this, fop, GF_FOP_SYMLINK, idx, op_ret,
+ op_errno);
+ if (cbk != NULL)
+ {
+ if (op_ret >= 0)
+ {
+ if (inode != NULL)
+ {
+ cbk->inode = inode_ref(inode);
+ if (cbk->inode == NULL)
+ {
+ gf_log(this->name, GF_LOG_ERROR,
+ "Failed to reference an inode.");
+
+ goto out;
+ }
+ }
+ if (buf != NULL)
+ {
+ cbk->iatt[0] = *buf;
+ }
+ if (preparent != NULL)
+ {
+ cbk->iatt[1] = *preparent;
+ }
+ if (postparent != NULL)
+ {
+ cbk->iatt[2] = *postparent;
+ }
+ }
+ if (xdata != NULL)
+ {
+ cbk->xdata = dict_ref(xdata);
+ if (cbk->xdata == NULL)
+ {
+ gf_log(this->name, GF_LOG_ERROR, "Failed to reference a "
+ "dictionary.");
+
+ goto out;
+ }
+ }
+
+ ec_combine(cbk, ec_combine_symlink);
+ }
+
+out:
+ if (fop != NULL)
+ {
+ ec_complete(fop);
+ }
+
+ return 0;
+}
+
+void ec_wind_symlink(ec_t * ec, ec_fop_data_t * fop, int32_t idx)
+{
+ ec_trace("WIND", fop, "idx=%d", idx);
+
+ STACK_WIND_COOKIE(fop->frame, ec_symlink_cbk, (void *)(uintptr_t)idx,
+ ec->xl_list[idx], ec->xl_list[idx]->fops->symlink,
+ fop->str[0], &fop->loc[0], fop->mode[0], fop->xdata);
+}
+
+int32_t ec_manager_symlink(ec_fop_data_t * fop, int32_t state)
+{
+ ec_cbk_data_t * cbk;
+
+ switch (state)
+ {
+ case EC_STATE_INIT:
+ case EC_STATE_LOCK:
+ ec_lock_entry(fop, &fop->loc[0]);
+
+ return EC_STATE_DISPATCH;
+
+ case EC_STATE_DISPATCH:
+ ec_dispatch_all(fop);
+
+ return EC_STATE_PREPARE_ANSWER;
+
+ case EC_STATE_PREPARE_ANSWER:
+ cbk = fop->answer;
+ if (cbk != NULL)
+ {
+ if (!ec_dict_combine(cbk, EC_COMBINE_XDATA))
+ {
+ if (cbk->op_ret >= 0)
+ {
+ cbk->op_ret = -1;
+ cbk->op_errno = EIO;
+ }
+ }
+ if (cbk->op_ret < 0)
+ {
+ ec_fop_set_error(fop, cbk->op_errno);
+ }
+ else
+ {
+ ec_iatt_rebuild(fop->xl->private, cbk->iatt, 3,
+ cbk->count);
+
+ ec_loc_prepare(fop->xl, &fop->loc[0], cbk->inode,
+ &cbk->iatt[0]);
+ }
+ }
+ else
+ {
+ ec_fop_set_error(fop, EIO);
+ }
+
+ return EC_STATE_REPORT;
+
+ case EC_STATE_REPORT:
+ cbk = fop->answer;
+
+ GF_ASSERT(cbk != NULL);
+
+ if (fop->cbks.symlink != NULL)
+ {
+ fop->cbks.symlink(fop->req_frame, fop, fop->xl, cbk->op_ret,
+ cbk->op_errno, cbk->inode, &cbk->iatt[0],
+ &cbk->iatt[1], &cbk->iatt[2], cbk->xdata);
+ }
+
+ return EC_STATE_UNLOCK;
+
+ case -EC_STATE_LOCK:
+ case -EC_STATE_DISPATCH:
+ case -EC_STATE_PREPARE_ANSWER:
+ case -EC_STATE_REPORT:
+ GF_ASSERT(fop->error != 0);
+
+ if (fop->cbks.symlink != NULL)
+ {
+ fop->cbks.symlink(fop->req_frame, fop, fop->xl, -1, fop->error,
+ NULL, NULL, NULL, NULL, NULL);
+ }
+
+ return EC_STATE_UNLOCK;
+
+ case -EC_STATE_UNLOCK:
+ case EC_STATE_UNLOCK:
+ ec_unlock(fop);
+
+ return EC_STATE_END;
+
+ default:
+ gf_log(fop->xl->name, GF_LOG_ERROR, "Unhandled state %d for %s",
+ state, ec_fop_name(fop->id));
+
+ return EC_STATE_END;
+ }
+}
+
+void ec_symlink(call_frame_t * frame, xlator_t * this, uintptr_t target,
+ int32_t minimum, fop_symlink_cbk_t func, void * data,
+ const char * linkname, loc_t * loc, mode_t umask,
+ dict_t * xdata)
+{
+ ec_cbk_t callback = { .symlink = func };
+ ec_fop_data_t * fop = NULL;
+ int32_t error = EIO;
+
+ gf_log("ec", GF_LOG_TRACE, "EC(SYMLINK) %p", frame);
+
+ VALIDATE_OR_GOTO(this, out);
+ GF_VALIDATE_OR_GOTO(this->name, frame, out);
+ GF_VALIDATE_OR_GOTO(this->name, this->private, out);
+
+ fop = ec_fop_data_allocate(frame, this, GF_FOP_SYMLINK,
+ EC_FLAG_UPDATE_LOC_PARENT, target, minimum,
+ ec_wind_symlink, ec_manager_symlink, callback,
+ data);
+ if (fop == NULL)
+ {
+ goto out;
+ }
+
+ fop->mode[0] = umask;
+
+ if (linkname != NULL)
+ {
+ fop->str[0] = gf_strdup(linkname);
+ if (fop->str[0] == NULL)
+ {
+ gf_log(this->name, GF_LOG_ERROR, "Failed to duplicate a string.");
+
+ goto out;
+ }
+ }
+ if (loc != NULL)
+ {
+ if (loc_copy(&fop->loc[0], loc) != 0)
+ {
+ gf_log(this->name, GF_LOG_ERROR, "Failed to copy a location.");
+
+ goto out;
+ }
+ }
+ if (xdata != NULL)
+ {
+ fop->xdata = dict_ref(xdata);
+ if (fop->xdata == NULL)
+ {
+ gf_log(this->name, GF_LOG_ERROR, "Failed to reference a "
+ "dictionary.");
+
+ goto out;
+ }
+ }
+
+ error = 0;
+
+out:
+ if (fop != NULL)
+ {
+ ec_manager(fop, error);
+ }
+ else
+ {
+ func(frame, NULL, this, -1, EIO, NULL, NULL, NULL, NULL, NULL);
+ }
+}
+
+/* FOP: unlink */
+
+int32_t ec_combine_unlink(ec_fop_data_t * fop, ec_cbk_data_t * dst,
+ ec_cbk_data_t * src)
+{
+ if (!ec_iatt_combine(dst->iatt, src->iatt, 2))
+ {
+ gf_log(fop->xl->name, GF_LOG_NOTICE, "Mismatching iatt in "
+ "answers of 'GF_FOP_UNLINK'");
+
+ return 0;
+ }
+
+ return 1;
+}
+
+int32_t ec_unlink_cbk(call_frame_t * frame, void * cookie, xlator_t * this,
+ int32_t op_ret, int32_t op_errno,
+ struct iatt * preparent, struct iatt * postparent,
+ dict_t * xdata)
+{
+ ec_fop_data_t * fop = NULL;
+ ec_cbk_data_t * cbk = NULL;
+ int32_t idx = (int32_t)(uintptr_t)cookie;
+
+ VALIDATE_OR_GOTO(this, out);
+ GF_VALIDATE_OR_GOTO(this->name, frame, out);
+ GF_VALIDATE_OR_GOTO(this->name, frame->local, out);
+ GF_VALIDATE_OR_GOTO(this->name, this->private, out);
+
+ fop = frame->local;
+
+ ec_trace("CBK", fop, "idx=%d, frame=%p, op_ret=%d, op_errno=%d", idx,
+ frame, op_ret, op_errno);
+
+ cbk = ec_cbk_data_allocate(frame, this, fop, GF_FOP_UNLINK, idx, op_ret,
+ op_errno);
+ if (cbk != NULL)
+ {
+ if (op_ret >= 0)
+ {
+ if (preparent != NULL)
+ {
+ cbk->iatt[0] = *preparent;
+ }
+ if (postparent != NULL)
+ {
+ cbk->iatt[1] = *postparent;
+ }
+ }
+ if (xdata != NULL)
+ {
+ cbk->xdata = dict_ref(xdata);
+ if (cbk->xdata == NULL)
+ {
+ gf_log(this->name, GF_LOG_ERROR, "Failed to reference a "
+ "dictionary.");
+
+ goto out;
+ }
+ }
+
+ ec_combine(cbk, ec_combine_unlink);
+ }
+
+out:
+ if (fop != NULL)
+ {
+ ec_complete(fop);
+ }
+
+ return 0;
+}
+
+void ec_wind_unlink(ec_t * ec, ec_fop_data_t * fop, int32_t idx)
+{
+ ec_trace("WIND", fop, "idx=%d", idx);
+
+ STACK_WIND_COOKIE(fop->frame, ec_unlink_cbk, (void *)(uintptr_t)idx,
+ ec->xl_list[idx], ec->xl_list[idx]->fops->unlink,
+ &fop->loc[0], fop->int32, fop->xdata);
+}
+
+int32_t ec_manager_unlink(ec_fop_data_t * fop, int32_t state)
+{
+ ec_cbk_data_t * cbk;
+
+ switch (state)
+ {
+ case EC_STATE_INIT:
+ case EC_STATE_LOCK:
+ ec_lock_entry(fop, &fop->loc[0]);
+
+ return EC_STATE_GET_SIZE_AND_VERSION;
+
+ case EC_STATE_GET_SIZE_AND_VERSION:
+ ec_get_size_version(fop);
+
+ return EC_STATE_DISPATCH;
+
+ case EC_STATE_DISPATCH:
+ ec_dispatch_all(fop);
+
+ return EC_STATE_PREPARE_ANSWER;
+
+ case EC_STATE_PREPARE_ANSWER:
+ cbk = fop->answer;
+ if (cbk != NULL)
+ {
+ if (!ec_dict_combine(cbk, EC_COMBINE_XDATA))
+ {
+ if (cbk->op_ret >= 0)
+ {
+ cbk->op_ret = -1;
+ cbk->op_errno = EIO;
+ }
+ }
+ if (cbk->op_ret < 0)
+ {
+ ec_fop_set_error(fop, cbk->op_errno);
+ }
+ }
+ else
+ {
+ ec_fop_set_error(fop, EIO);
+ }
+
+ return EC_STATE_REPORT;
+
+ case EC_STATE_REPORT:
+ cbk = fop->answer;
+
+ GF_ASSERT(cbk != NULL);
+
+ if (fop->cbks.unlink != NULL)
+ {
+ fop->cbks.unlink(fop->req_frame, fop, fop->xl, cbk->op_ret,
+ cbk->op_errno, &cbk->iatt[0], &cbk->iatt[1],
+ cbk->xdata);
+ }
+
+ return EC_STATE_UNLOCK;
+
+ case -EC_STATE_LOCK:
+ case -EC_STATE_GET_SIZE_AND_VERSION:
+ case -EC_STATE_DISPATCH:
+ case -EC_STATE_PREPARE_ANSWER:
+ case -EC_STATE_REPORT:
+ GF_ASSERT(fop->error != 0);
+
+ if (fop->cbks.unlink != NULL)
+ {
+ fop->cbks.unlink(fop->req_frame, fop, fop->xl, -1, fop->error,
+ NULL, NULL, NULL);
+ }
+
+ return EC_STATE_UNLOCK;
+
+ case -EC_STATE_UNLOCK:
+ case EC_STATE_UNLOCK:
+ ec_unlock(fop);
+
+ return EC_STATE_END;
+
+ default:
+ gf_log(fop->xl->name, GF_LOG_ERROR, "Unhandled state %d for %s",
+ state, ec_fop_name(fop->id));
+
+ return EC_STATE_END;
+ }
+}
+
+void ec_unlink(call_frame_t * frame, xlator_t * this, uintptr_t target,
+ int32_t minimum, fop_unlink_cbk_t func, void * data,
+ loc_t * loc, int xflags, dict_t * xdata)
+{
+ ec_cbk_t callback = { .unlink = func };
+ ec_fop_data_t * fop = NULL;
+ int32_t error = EIO;
+
+ gf_log("ec", GF_LOG_TRACE, "EC(UNLINK) %p", frame);
+
+ VALIDATE_OR_GOTO(this, out);
+ GF_VALIDATE_OR_GOTO(this->name, frame, out);
+ GF_VALIDATE_OR_GOTO(this->name, this->private, out);
+
+ fop = ec_fop_data_allocate(frame, this, GF_FOP_UNLINK,
+ EC_FLAG_UPDATE_LOC_PARENT, target, minimum,
+ ec_wind_unlink, ec_manager_unlink, callback,
+ data);
+ if (fop == NULL)
+ {
+ goto out;
+ }
+
+ fop->int32 = xflags;
+
+ if (loc != NULL)
+ {
+ if (loc_copy(&fop->loc[0], loc) != 0)
+ {
+ gf_log(this->name, GF_LOG_ERROR, "Failed to copy a location.");
+
+ goto out;
+ }
+ }
+ if (xdata != NULL)
+ {
+ fop->xdata = dict_ref(xdata);
+ if (fop->xdata == NULL)
+ {
+ gf_log(this->name, GF_LOG_ERROR, "Failed to reference a "
+ "dictionary.");
+
+ goto out;
+ }
+ }
+
+ error = 0;
+
+out:
+ if (fop != NULL)
+ {
+ ec_manager(fop, error);
+ }
+ else
+ {
+ func(frame, NULL, this, -1, EIO, NULL, NULL, NULL);
+ }
+}
diff --git a/xlators/cluster/ec/src/ec-fops.h b/xlators/cluster/ec/src/ec-fops.h
new file mode 100644
index 00000000000..2b6e03f723f
--- /dev/null
+++ b/xlators/cluster/ec/src/ec-fops.h
@@ -0,0 +1,211 @@
+/*
+ Copyright (c) 2012 DataLab, s.l. <http://www.datalab.es>
+
+ This file is part of the cluster/ec translator for GlusterFS.
+
+ The cluster/ec translator for GlusterFS is free software: you can
+ redistribute it and/or modify it under the terms of the GNU General
+ Public License as published by the Free Software Foundation, either
+ version 3 of the License, or (at your option) any later version.
+
+ The cluster/ec translator for GlusterFS is distributed in the hope
+ that it will be useful, but WITHOUT ANY WARRANTY; without even the
+ implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+ PURPOSE. See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with the cluster/ec translator for GlusterFS. If not, see
+ <http://www.gnu.org/licenses/>.
+*/
+
+#ifndef __EC_FOPS_H__
+#define __EC_FOPS_H__
+
+#include "xlator.h"
+
+#include "ec-data.h"
+#include "ec-common.h"
+
+#define EC_FOP_HEAL -1
+#define EC_FOP_FHEAL -2
+
+void ec_access(call_frame_t * frame, xlator_t * this, uintptr_t target,
+ int32_t minimum, fop_access_cbk_t func, void *data, loc_t * loc,
+ int32_t mask, dict_t * xdata);
+
+void ec_create(call_frame_t * frame, xlator_t * this, uintptr_t target,
+ int32_t minimum, fop_create_cbk_t func, void *data, loc_t * loc,
+ int32_t flags, mode_t mode, mode_t umask, fd_t * fd,
+ dict_t * xdata);
+
+void ec_entrylk(call_frame_t * frame, xlator_t * this, uintptr_t target,
+ int32_t minimum, fop_entrylk_cbk_t func, void *data,
+ const char * volume, loc_t * loc, const char * basename,
+ entrylk_cmd cmd, entrylk_type type, dict_t * xdata);
+
+void ec_fentrylk(call_frame_t * frame, xlator_t * this, uintptr_t target,
+ int32_t minimum, fop_fentrylk_cbk_t func, void *data,
+ const char * volume, fd_t * fd, const char * basename,
+ entrylk_cmd cmd, entrylk_type type, dict_t * xdata);
+
+void ec_flush(call_frame_t * frame, xlator_t * this, uintptr_t target,
+ int32_t minimum, fop_flush_cbk_t func, void *data, fd_t * fd,
+ dict_t * xdata);
+
+void ec_fsync(call_frame_t * frame, xlator_t * this, uintptr_t target,
+ int32_t minimum, fop_fsync_cbk_t func, void *data, fd_t * fd,
+ int32_t datasync, dict_t * xdata);
+
+void ec_fsyncdir(call_frame_t * frame, xlator_t * this, uintptr_t target,
+ int32_t minimum, fop_fsyncdir_cbk_t func, void *data,
+ fd_t * fd, int32_t datasync, dict_t * xdata);
+
+void ec_getxattr(call_frame_t * frame, xlator_t * this, uintptr_t target,
+ int32_t minimum, fop_getxattr_cbk_t func, void *data,
+ loc_t * loc, const char * name, dict_t * xdata);
+
+void ec_fgetxattr(call_frame_t * frame, xlator_t * this, uintptr_t target,
+ int32_t minimum, fop_fgetxattr_cbk_t func, void *data,
+ fd_t * fd, const char * name, dict_t * xdata);
+
+void ec_heal(call_frame_t * frame, xlator_t * this, uintptr_t target,
+ int32_t minimum, fop_heal_cbk_t func, void *data, loc_t * loc,
+ dict_t * xdata);
+
+void ec_fheal(call_frame_t * frame, xlator_t * this, uintptr_t target,
+ int32_t minimum, fop_fheal_cbk_t func, void *data, fd_t * fd,
+ dict_t * xdata);
+
+void ec_inodelk(call_frame_t * frame, xlator_t * this, uintptr_t target,
+ int32_t minimum, fop_inodelk_cbk_t func, void *data,
+ const char * volume, loc_t * loc, int32_t cmd,
+ struct gf_flock * flock, dict_t * xdata);
+
+void ec_finodelk(call_frame_t * frame, xlator_t * this, uintptr_t target,
+ int32_t minimum, fop_finodelk_cbk_t func, void *data,
+ const char * volume, fd_t * fd, int32_t cmd,
+ struct gf_flock * flock, dict_t * xdata);
+
+void ec_link(call_frame_t * frame, xlator_t * this, uintptr_t target,
+ int32_t minimum, fop_link_cbk_t func, void *data, loc_t * oldloc,
+ loc_t * newloc, dict_t * xdata);
+
+void ec_lk(call_frame_t * frame, xlator_t * this, uintptr_t target,
+ int32_t minimum, fop_lk_cbk_t func, void *data, fd_t * fd,
+ int32_t cmd, struct gf_flock * flock, dict_t * xdata);
+
+void ec_lookup(call_frame_t * frame, xlator_t * this, uintptr_t target,
+ int32_t minimum, fop_lookup_cbk_t func, void *data, loc_t * loc,
+ dict_t * xdata);
+
+void ec_mkdir(call_frame_t * frame, xlator_t * this, uintptr_t target,
+ int32_t minimum, fop_mkdir_cbk_t func, void *data, loc_t * loc,
+ mode_t mode, mode_t umask, dict_t * xdata);
+
+void ec_mknod(call_frame_t * frame, xlator_t * this, uintptr_t target,
+ int32_t minimum, fop_mknod_cbk_t func, void *data, loc_t * loc,
+ mode_t mode, dev_t rdev, mode_t umask, dict_t * xdata);
+
+void ec_open(call_frame_t * frame, xlator_t * this, uintptr_t target,
+ int32_t minimum, fop_open_cbk_t func, void *data, loc_t * loc,
+ int32_t flags, fd_t * fd, dict_t * xdata);
+
+void ec_opendir(call_frame_t * frame, xlator_t * this, uintptr_t target,
+ int32_t minimum, fop_opendir_cbk_t func, void *data,
+ loc_t * loc, fd_t * fd, dict_t * xdata);
+
+void ec_readdir(call_frame_t * frame, xlator_t * this, uintptr_t target,
+ int32_t minimum, fop_readdir_cbk_t func, void *data, fd_t * fd,
+ size_t size, off_t offset, dict_t * xdata);
+
+void ec_readdirp(call_frame_t * frame, xlator_t * this, uintptr_t target,
+ int32_t minimum, fop_readdirp_cbk_t func, void *data,
+ fd_t * fd, size_t size, off_t offset, dict_t * xdata);
+
+void ec_readlink(call_frame_t * frame, xlator_t * this, uintptr_t target,
+ int32_t minimum, fop_readlink_cbk_t func, void *data,
+ loc_t * loc, size_t size, dict_t * xdata);
+
+void ec_readv(call_frame_t * frame, xlator_t * this, uintptr_t target,
+ int32_t minimum, fop_readv_cbk_t func, void *data, fd_t * fd,
+ size_t size, off_t offset, uint32_t flags, dict_t * xdata);
+
+void ec_removexattr(call_frame_t * frame, xlator_t * this, uintptr_t target,
+ int32_t minimum, fop_removexattr_cbk_t func, void *data,
+ loc_t * loc, const char * name, dict_t * xdata);
+
+void ec_fremovexattr(call_frame_t * frame, xlator_t * this, uintptr_t target,
+ int32_t minimum, fop_fremovexattr_cbk_t func, void *data,
+ fd_t * fd, const char * name, dict_t * xdata);
+
+void ec_rename(call_frame_t * frame, xlator_t * this, uintptr_t target,
+ int32_t minimum, fop_rename_cbk_t func, void *data,
+ loc_t * oldloc, loc_t * newloc, dict_t * xdata);
+
+void ec_rmdir(call_frame_t * frame, xlator_t * this, uintptr_t target,
+ int32_t minimum, fop_rmdir_cbk_t func, void *data, loc_t * loc,
+ int xflags, dict_t * xdata);
+
+void ec_setattr(call_frame_t * frame, xlator_t * this, uintptr_t target,
+ int32_t minimum, fop_setattr_cbk_t func, void *data,
+ loc_t * loc, struct iatt * stbuf, int32_t valid,
+ dict_t * xdata);
+
+void ec_fsetattr(call_frame_t * frame, xlator_t * this, uintptr_t target,
+ int32_t minimum, fop_fsetattr_cbk_t func, void *data,
+ fd_t * fd, struct iatt * stbuf, int32_t valid,
+ dict_t * xdata);
+
+void ec_setxattr(call_frame_t * frame, xlator_t * this, uintptr_t target,
+ int32_t minimum, fop_setxattr_cbk_t func, void *data,
+ loc_t * loc, dict_t * dict, int32_t flags, dict_t * xdata);
+
+void ec_fsetxattr(call_frame_t * frame, xlator_t * this, uintptr_t target,
+ int32_t minimum, fop_fsetxattr_cbk_t func, void *data,
+ fd_t * fd, dict_t * dict, int32_t flags, dict_t * xdata);
+
+void ec_stat(call_frame_t * frame, xlator_t * this, uintptr_t target,
+ int32_t minimum, fop_stat_cbk_t func, void *data, loc_t * loc,
+ dict_t * xdata);
+
+void ec_fstat(call_frame_t * frame, xlator_t * this, uintptr_t target,
+ int32_t minimum, fop_fstat_cbk_t func, void *data, fd_t * fd,
+ dict_t * xdata);
+
+void ec_statfs(call_frame_t * frame, xlator_t * this, uintptr_t target,
+ int32_t minimum, fop_statfs_cbk_t func, void *data, loc_t * loc,
+ dict_t * xdata);
+
+void ec_symlink(call_frame_t * frame, xlator_t * this, uintptr_t target,
+ int32_t minimum, fop_symlink_cbk_t func, void *data,
+ const char * linkname, loc_t * loc, mode_t umask,
+ dict_t * xdata);
+
+void ec_truncate(call_frame_t * frame, xlator_t * this, uintptr_t target,
+ int32_t minimum, fop_truncate_cbk_t func, void *data,
+ loc_t * loc, off_t offset, dict_t * xdata);
+
+void ec_ftruncate(call_frame_t * frame, xlator_t * this, uintptr_t target,
+ int32_t minimum, fop_ftruncate_cbk_t func, void *data,
+ fd_t * fd, off_t offset, dict_t * xdata);
+
+void ec_unlink(call_frame_t * frame, xlator_t * this, uintptr_t target,
+ int32_t minimum, fop_unlink_cbk_t func, void *data, loc_t * loc,
+ int xflags, dict_t * xdata);
+
+void ec_writev(call_frame_t * frame, xlator_t * this, uintptr_t target,
+ int32_t minimum, fop_writev_cbk_t func, void *data, fd_t * fd,
+ struct iovec * vector, int32_t count, off_t offset,
+ uint32_t flags, struct iobref * iobref, dict_t * xdata);
+
+void ec_xattrop(call_frame_t * frame, xlator_t * this, uintptr_t target,
+ int32_t minimum, fop_xattrop_cbk_t func, void *data,
+ loc_t * loc, gf_xattrop_flags_t optype, dict_t * xattr,
+ dict_t * xdata);
+
+void ec_fxattrop(call_frame_t * frame, xlator_t * this, uintptr_t target,
+ int32_t minimum, fop_fxattrop_cbk_t func, void *data,
+ fd_t * fd, gf_xattrop_flags_t optype, dict_t * xattr,
+ dict_t * xdata);
+
+#endif /* __EC_FOPS_H__ */
diff --git a/xlators/cluster/ec/src/ec-generic.c b/xlators/cluster/ec/src/ec-generic.c
new file mode 100644
index 00000000000..49343388934
--- /dev/null
+++ b/xlators/cluster/ec/src/ec-generic.c
@@ -0,0 +1,1660 @@
+/*
+ Copyright (c) 2012 DataLab, s.l. <http://www.datalab.es>
+
+ This file is part of the cluster/ec translator for GlusterFS.
+
+ The cluster/ec translator for GlusterFS is free software: you can
+ redistribute it and/or modify it under the terms of the GNU General
+ Public License as published by the Free Software Foundation, either
+ version 3 of the License, or (at your option) any later version.
+
+ The cluster/ec translator for GlusterFS is distributed in the hope
+ that it will be useful, but WITHOUT ANY WARRANTY; without even the
+ implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+ PURPOSE. See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with the cluster/ec translator for GlusterFS. If not, see
+ <http://www.gnu.org/licenses/>.
+*/
+
+#include "xlator.h"
+#include "defaults.h"
+
+#include "ec-helpers.h"
+#include "ec-common.h"
+#include "ec-combine.h"
+#include "ec-method.h"
+#include "ec-fops.h"
+
+/* FOP: flush */
+
+int32_t ec_flush_cbk(call_frame_t * frame, void * cookie, xlator_t * this,
+ int32_t op_ret, int32_t op_errno, dict_t * xdata)
+{
+ ec_fop_data_t * fop = NULL;
+ ec_cbk_data_t * cbk = NULL;
+ int32_t idx = (int32_t)(uintptr_t)cookie;
+
+ VALIDATE_OR_GOTO(this, out);
+ GF_VALIDATE_OR_GOTO(this->name, frame, out);
+ GF_VALIDATE_OR_GOTO(this->name, frame->local, out);
+ GF_VALIDATE_OR_GOTO(this->name, this->private, out);
+
+ fop = frame->local;
+
+ ec_trace("CBK", fop, "idx=%d, frame=%p, op_ret=%d, op_errno=%d", idx,
+ frame, op_ret, op_errno);
+
+ cbk = ec_cbk_data_allocate(frame, this, fop, GF_FOP_FLUSH, idx, op_ret,
+ op_errno);
+ if (cbk != NULL)
+ {
+ if (xdata != NULL)
+ {
+ cbk->xdata = dict_ref(xdata);
+ if (cbk->xdata == NULL)
+ {
+ gf_log(this->name, GF_LOG_ERROR, "Failed to reference a "
+ "dictionary.");
+
+ goto out;
+ }
+ }
+
+ ec_combine(cbk, NULL);
+ }
+
+out:
+ if (fop != NULL)
+ {
+ ec_complete(fop);
+ }
+
+ return 0;
+}
+
+void ec_wind_flush(ec_t * ec, ec_fop_data_t * fop, int32_t idx)
+{
+ ec_trace("WIND", fop, "idx=%d", idx);
+
+ STACK_WIND_COOKIE(fop->frame, ec_flush_cbk, (void *)(uintptr_t)idx,
+ ec->xl_list[idx], ec->xl_list[idx]->fops->flush, fop->fd,
+ fop->xdata);
+}
+
+int32_t ec_manager_flush(ec_fop_data_t * fop, int32_t state)
+{
+ ec_cbk_data_t * cbk;
+
+ switch (state)
+ {
+ case EC_STATE_INIT:
+ case EC_STATE_LOCK:
+ ec_lock_fd(fop, fop->fd);
+
+ return EC_STATE_DISPATCH;
+
+ case EC_STATE_DISPATCH:
+ ec_dispatch_all(fop);
+
+ return EC_STATE_PREPARE_ANSWER;
+
+ case EC_STATE_PREPARE_ANSWER:
+ cbk = fop->answer;
+ if (cbk != NULL)
+ {
+ if (!ec_dict_combine(cbk, EC_COMBINE_XDATA))
+ {
+ if (cbk->op_ret >= 0)
+ {
+ cbk->op_ret = -1;
+ cbk->op_errno = EIO;
+ }
+ }
+ if (cbk->op_ret < 0)
+ {
+ ec_fop_set_error(fop, cbk->op_errno);
+ }
+ }
+ else
+ {
+ ec_fop_set_error(fop, EIO);
+ }
+
+ return EC_STATE_REPORT;
+
+ case EC_STATE_REPORT:
+ cbk = fop->answer;
+
+ GF_ASSERT(cbk != NULL);
+
+ if (fop->cbks.flush != NULL)
+ {
+ fop->cbks.flush(fop->req_frame, fop, fop->xl, cbk->op_ret,
+ cbk->op_errno, cbk->xdata);
+ }
+
+ return EC_STATE_UNLOCK;
+
+ case -EC_STATE_LOCK:
+ case -EC_STATE_DISPATCH:
+ case -EC_STATE_PREPARE_ANSWER:
+ case -EC_STATE_REPORT:
+ GF_ASSERT(fop->error != 0);
+
+ if (fop->cbks.flush != NULL)
+ {
+ fop->cbks.flush(fop->req_frame, fop, fop->xl, -1, fop->error,
+ NULL);
+ }
+
+ return EC_STATE_UNLOCK;
+
+ case -EC_STATE_UNLOCK:
+ case EC_STATE_UNLOCK:
+ ec_unlock(fop);
+
+ return EC_STATE_END;
+
+ default:
+ gf_log(fop->xl->name, GF_LOG_ERROR, "Unhandled state %d for %s",
+ state, ec_fop_name(fop->id));
+
+ return EC_STATE_END;
+ }
+}
+
+void ec_flush(call_frame_t * frame, xlator_t * this, uintptr_t target,
+ int32_t minimum, fop_flush_cbk_t func, void * data, fd_t * fd,
+ dict_t * xdata)
+{
+ ec_cbk_t callback = { .flush = func };
+ ec_fop_data_t * fop = NULL;
+ int32_t error = EIO;
+
+ gf_log("ec", GF_LOG_TRACE, "EC(FLUSH) %p", frame);
+
+ VALIDATE_OR_GOTO(this, out);
+ GF_VALIDATE_OR_GOTO(this->name, frame, out);
+ GF_VALIDATE_OR_GOTO(this->name, this->private, out);
+
+ fop = ec_fop_data_allocate(frame, this, GF_FOP_FLUSH, EC_FLAG_UPDATE_FD,
+ target, minimum, ec_wind_flush,
+ ec_manager_flush, callback, data);
+ if (fop == NULL)
+ {
+ goto out;
+ }
+
+ if (fd != NULL)
+ {
+ fop->fd = fd_ref(fd);
+ if (fop->fd == NULL)
+ {
+ gf_log(this->name, GF_LOG_ERROR, "Failed to reference a "
+ "file descriptor.");
+
+ goto out;
+ }
+ }
+ if (xdata != NULL)
+ {
+ fop->xdata = dict_ref(xdata);
+ if (fop->xdata == NULL)
+ {
+ gf_log(this->name, GF_LOG_ERROR, "Failed to reference a "
+ "dictionary.");
+
+ goto out;
+ }
+ }
+
+ error = 0;
+
+out:
+ if (fop != NULL)
+ {
+ ec_manager(fop, error);
+ }
+ else
+ {
+ func(frame, NULL, this, -1, EIO, NULL);
+ }
+}
+
+/* FOP: fsync */
+
+int32_t ec_combine_fsync(ec_fop_data_t * fop, ec_cbk_data_t * dst,
+ ec_cbk_data_t * src)
+{
+ if (!ec_iatt_combine(dst->iatt, src->iatt, 2))
+ {
+ gf_log(fop->xl->name, GF_LOG_NOTICE, "Mismatching iatt in "
+ "answers of 'GF_FOP_FSYNC'");
+
+ return 0;
+ }
+
+ return 1;
+}
+
+int32_t ec_fsync_cbk(call_frame_t * frame, void * cookie, xlator_t * this,
+ int32_t op_ret, int32_t op_errno, struct iatt * prebuf,
+ struct iatt * postbuf, dict_t * xdata)
+{
+ ec_fop_data_t * fop = NULL;
+ ec_cbk_data_t * cbk = NULL;
+ int32_t idx = (int32_t)(uintptr_t)cookie;
+
+ VALIDATE_OR_GOTO(this, out);
+ GF_VALIDATE_OR_GOTO(this->name, frame, out);
+ GF_VALIDATE_OR_GOTO(this->name, frame->local, out);
+ GF_VALIDATE_OR_GOTO(this->name, this->private, out);
+
+ fop = frame->local;
+
+ ec_trace("CBK", fop, "idx=%d, frame=%p, op_ret=%d, op_errno=%d", idx,
+ frame, op_ret, op_errno);
+
+ cbk = ec_cbk_data_allocate(frame, this, fop, GF_FOP_FSYNC, idx, op_ret,
+ op_errno);
+ if (cbk != NULL)
+ {
+ if (op_ret >= 0)
+ {
+ if (prebuf != NULL)
+ {
+ cbk->iatt[0] = *prebuf;
+ }
+ if (postbuf != NULL)
+ {
+ cbk->iatt[1] = *postbuf;
+ }
+ }
+ if (xdata != NULL)
+ {
+ cbk->xdata = dict_ref(xdata);
+ if (cbk->xdata == NULL)
+ {
+ gf_log(this->name, GF_LOG_ERROR, "Failed to reference a "
+ "dictionary.");
+
+ goto out;
+ }
+ }
+
+ ec_combine(cbk, ec_combine_fsync);
+ }
+
+out:
+ if (fop != NULL)
+ {
+ ec_complete(fop);
+ }
+
+ return 0;
+}
+
+void ec_wind_fsync(ec_t * ec, ec_fop_data_t * fop, int32_t idx)
+{
+ ec_trace("WIND", fop, "idx=%d", idx);
+
+ STACK_WIND_COOKIE(fop->frame, ec_fsync_cbk, (void *)(uintptr_t)idx,
+ ec->xl_list[idx], ec->xl_list[idx]->fops->fsync, fop->fd,
+ fop->int32, fop->xdata);
+}
+
+int32_t ec_manager_fsync(ec_fop_data_t * fop, int32_t state)
+{
+ ec_cbk_data_t * cbk;
+
+ switch (state)
+ {
+ case EC_STATE_INIT:
+ case EC_STATE_LOCK:
+ ec_lock_fd(fop, fop->fd);
+
+ return EC_STATE_GET_SIZE_AND_VERSION;
+
+ case EC_STATE_GET_SIZE_AND_VERSION:
+ ec_get_size_version(fop);
+
+ return EC_STATE_DISPATCH;
+
+ case EC_STATE_DISPATCH:
+ ec_dispatch_all(fop);
+
+ return EC_STATE_PREPARE_ANSWER;
+
+ case EC_STATE_PREPARE_ANSWER:
+ cbk = fop->answer;
+ if (cbk != NULL)
+ {
+ if (!ec_dict_combine(cbk, EC_COMBINE_XDATA))
+ {
+ if (cbk->op_ret >= 0)
+ {
+ cbk->op_ret = -1;
+ cbk->op_errno = EIO;
+ }
+ }
+ if (cbk->op_ret < 0)
+ {
+ ec_fop_set_error(fop, cbk->op_errno);
+ }
+ else
+ {
+ ec_iatt_rebuild(fop->xl->private, cbk->iatt, 2,
+ cbk->count);
+
+ cbk->iatt[0].ia_size = fop->pre_size;
+ cbk->iatt[1].ia_size = fop->post_size;
+ }
+ }
+ else
+ {
+ ec_fop_set_error(fop, EIO);
+ }
+
+ return EC_STATE_REPORT;
+
+ case EC_STATE_REPORT:
+ cbk = fop->answer;
+
+ GF_ASSERT(cbk != NULL);
+
+ if (fop->cbks.fsync != NULL)
+ {
+ fop->cbks.fsync(fop->req_frame, fop, fop->xl, cbk->op_ret,
+ cbk->op_errno, &cbk->iatt[0], &cbk->iatt[1],
+ cbk->xdata);
+ }
+
+ return EC_STATE_UNLOCK;
+
+ case -EC_STATE_LOCK:
+ case -EC_STATE_GET_SIZE_AND_VERSION:
+ case -EC_STATE_DISPATCH:
+ case -EC_STATE_PREPARE_ANSWER:
+ case -EC_STATE_REPORT:
+ GF_ASSERT(fop->error != 0);
+
+ if (fop->cbks.fsync != NULL)
+ {
+ fop->cbks.fsync(fop->req_frame, fop, fop->xl, -1, fop->error,
+ NULL, NULL, NULL);
+ }
+
+ return EC_STATE_UNLOCK;
+
+ case -EC_STATE_UNLOCK:
+ case EC_STATE_UNLOCK:
+ ec_unlock(fop);
+
+ return EC_STATE_END;
+
+ default:
+ gf_log(fop->xl->name, GF_LOG_ERROR, "Unhandled state %d for %s",
+ state, ec_fop_name(fop->id));
+
+ return EC_STATE_END;
+ }
+}
+
+void ec_fsync(call_frame_t * frame, xlator_t * this, uintptr_t target,
+ int32_t minimum, fop_fsync_cbk_t func, void * data, fd_t * fd,
+ int32_t datasync, dict_t * xdata)
+{
+ ec_cbk_t callback = { .fsync = func };
+ ec_fop_data_t * fop = NULL;
+ int32_t error = EIO;
+
+ gf_log("ec", GF_LOG_TRACE, "EC(FSYNC) %p", frame);
+
+ VALIDATE_OR_GOTO(this, out);
+ GF_VALIDATE_OR_GOTO(this->name, frame, out);
+ GF_VALIDATE_OR_GOTO(this->name, this->private, out);
+
+ fop = ec_fop_data_allocate(frame, this, GF_FOP_FSYNC, EC_FLAG_UPDATE_FD,
+ target, minimum, ec_wind_fsync,
+ ec_manager_fsync, callback, data);
+ if (fop == NULL)
+ {
+ goto out;
+ }
+
+ fop->int32 = datasync;
+
+ if (fd != NULL)
+ {
+ fop->fd = fd_ref(fd);
+ if (fop->fd == NULL)
+ {
+ gf_log(this->name, GF_LOG_ERROR, "Failed to reference a "
+ "file descriptor.");
+
+ goto out;
+ }
+ }
+ if (xdata != NULL)
+ {
+ fop->xdata = dict_ref(xdata);
+ if (fop->xdata == NULL)
+ {
+ gf_log(this->name, GF_LOG_ERROR, "Failed to reference a "
+ "dictionary.");
+
+ goto out;
+ }
+ }
+
+ error = 0;
+
+out:
+ if (fop != NULL)
+ {
+ ec_manager(fop, error);
+ }
+ else
+ {
+ func(frame, NULL, this, -1, EIO, NULL, NULL, NULL);
+ }
+}
+
+/* FOP: fsyncdir */
+
+int32_t ec_fsyncdir_cbk(call_frame_t * frame, void * cookie, xlator_t * this,
+ int32_t op_ret, int32_t op_errno, dict_t * xdata)
+{
+ ec_fop_data_t * fop = NULL;
+ ec_cbk_data_t * cbk = NULL;
+ int32_t idx = (int32_t)(uintptr_t)cookie;
+
+ VALIDATE_OR_GOTO(this, out);
+ GF_VALIDATE_OR_GOTO(this->name, frame, out);
+ GF_VALIDATE_OR_GOTO(this->name, frame->local, out);
+ GF_VALIDATE_OR_GOTO(this->name, this->private, out);
+
+ fop = frame->local;
+
+ ec_trace("CBK", fop, "idx=%d, frame=%p, op_ret=%d, op_errno=%d", idx,
+ frame, op_ret, op_errno);
+
+ cbk = ec_cbk_data_allocate(frame, this, fop, GF_FOP_FSYNCDIR, idx, op_ret,
+ op_errno);
+ if (cbk != NULL)
+ {
+ if (xdata != NULL)
+ {
+ cbk->xdata = dict_ref(xdata);
+ if (cbk->xdata == NULL)
+ {
+ gf_log(this->name, GF_LOG_ERROR, "Failed to reference a "
+ "dictionary.");
+
+ goto out;
+ }
+ }
+
+ ec_combine(cbk, NULL);
+ }
+
+out:
+ if (fop != NULL)
+ {
+ ec_complete(fop);
+ }
+
+ return 0;
+}
+
+void ec_wind_fsyncdir(ec_t * ec, ec_fop_data_t * fop, int32_t idx)
+{
+ ec_trace("WIND", fop, "idx=%d", idx);
+
+ STACK_WIND_COOKIE(fop->frame, ec_fsyncdir_cbk, (void *)(uintptr_t)idx,
+ ec->xl_list[idx], ec->xl_list[idx]->fops->fsyncdir,
+ fop->fd, fop->int32, fop->xdata);
+}
+
+int32_t ec_manager_fsyncdir(ec_fop_data_t * fop, int32_t state)
+{
+ ec_cbk_data_t * cbk;
+
+ switch (state)
+ {
+ case EC_STATE_INIT:
+ case EC_STATE_LOCK:
+ ec_lock_fd(fop, fop->fd);
+
+ return EC_STATE_DISPATCH;
+
+ case EC_STATE_DISPATCH:
+ ec_dispatch_all(fop);
+
+ return EC_STATE_PREPARE_ANSWER;
+
+ case EC_STATE_PREPARE_ANSWER:
+ cbk = fop->answer;
+ if (cbk != NULL)
+ {
+ if (!ec_dict_combine(cbk, EC_COMBINE_XDATA))
+ {
+ if (cbk->op_ret >= 0)
+ {
+ cbk->op_ret = -1;
+ cbk->op_errno = EIO;
+ }
+ }
+ if (cbk->op_ret < 0)
+ {
+ ec_fop_set_error(fop, cbk->op_errno);
+ }
+ }
+ else
+ {
+ ec_fop_set_error(fop, EIO);
+ }
+
+ return EC_STATE_REPORT;
+
+ case EC_STATE_REPORT:
+ cbk = fop->answer;
+
+ GF_ASSERT(cbk != NULL);
+
+ if (fop->cbks.fsyncdir != NULL)
+ {
+ fop->cbks.fsyncdir(fop->req_frame, fop, fop->xl, cbk->op_ret,
+ cbk->op_errno, cbk->xdata);
+ }
+
+ return EC_STATE_UNLOCK;
+
+ case -EC_STATE_LOCK:
+ case -EC_STATE_DISPATCH:
+ case -EC_STATE_PREPARE_ANSWER:
+ case -EC_STATE_REPORT:
+ GF_ASSERT(fop->error != 0);
+
+ if (fop->cbks.fsyncdir != NULL)
+ {
+ fop->cbks.fsyncdir(fop->req_frame, fop, fop->xl, -1,
+ fop->error, NULL);
+ }
+
+ return EC_STATE_UNLOCK;
+
+ case -EC_STATE_UNLOCK:
+ case EC_STATE_UNLOCK:
+ ec_unlock(fop);
+
+ return EC_STATE_END;
+
+ default:
+ gf_log(fop->xl->name, GF_LOG_ERROR, "Unhandled state %d for %s",
+ state, ec_fop_name(fop->id));
+
+ return EC_STATE_END;
+ }
+}
+
+void ec_fsyncdir(call_frame_t * frame, xlator_t * this, uintptr_t target,
+ int32_t minimum, fop_fsyncdir_cbk_t func, void * data,
+ fd_t * fd, int32_t datasync, dict_t * xdata)
+{
+ ec_cbk_t callback = { .fsyncdir = func };
+ ec_fop_data_t * fop = NULL;
+ int32_t error = EIO;
+
+ gf_log("ec", GF_LOG_TRACE, "EC(FSYNCDIR) %p", frame);
+
+ VALIDATE_OR_GOTO(this, out);
+ GF_VALIDATE_OR_GOTO(this->name, frame, out);
+ GF_VALIDATE_OR_GOTO(this->name, this->private, out);
+
+ fop = ec_fop_data_allocate(frame, this, GF_FOP_FSYNCDIR, EC_FLAG_UPDATE_FD,
+ target, minimum, ec_wind_fsyncdir,
+ ec_manager_fsyncdir, callback, data);
+ if (fop == NULL)
+ {
+ goto out;
+ }
+
+ fop->int32 = datasync;
+
+ if (fd != NULL)
+ {
+ fop->fd = fd_ref(fd);
+ if (fop->fd == NULL)
+ {
+ gf_log(this->name, GF_LOG_ERROR, "Failed to reference a "
+ "file descriptor.");
+
+ goto out;
+ }
+ }
+ if (xdata != NULL)
+ {
+ fop->xdata = dict_ref(xdata);
+ if (fop->xdata == NULL)
+ {
+ gf_log(this->name, GF_LOG_ERROR, "Failed to reference a "
+ "dictionary.");
+
+ goto out;
+ }
+ }
+
+ error = 0;
+
+out:
+ if (fop != NULL)
+ {
+ ec_manager(fop, error);
+ }
+ else
+ {
+ func(frame, NULL, this, -1, EIO, NULL);
+ }
+}
+
+/* FOP: lookup */
+
+void ec_lookup_rebuild(ec_t * ec, ec_fop_data_t * fop, ec_cbk_data_t * cbk)
+{
+ ec_cbk_data_t * ans = NULL;
+ data_t * data = NULL;
+ uint8_t * ptr = NULL, * buff = NULL, * tmp = NULL;
+ size_t size = 0;
+ int32_t i = 0;
+
+ if (cbk->op_ret < 0)
+ {
+ return;
+ }
+
+ ec_dict_del_number(cbk->xdata, EC_XATTR_VERSION, &cbk->version);
+
+ ec_loc_prepare(fop->xl, &fop->loc[0], cbk->inode, &cbk->iatt[0]);
+
+ if (cbk->iatt[0].ia_type == IA_IFREG)
+ {
+ uint8_t * blocks[cbk->count];
+ uint8_t * ptrs[cbk->count];
+ uint32_t values[cbk->count];
+
+ cbk->size = cbk->iatt[0].ia_size;
+ ec_dict_del_number(cbk->xdata, EC_XATTR_SIZE, &cbk->iatt[0].ia_size);
+
+ size = SIZE_MAX;
+ for (i = 0, ans = cbk; (ans != NULL) && (i < ec->fragments);
+ ans = ans->next)
+ {
+ data = dict_get(ans->xdata, GF_CONTENT_KEY);
+ if (data != NULL)
+ {
+ values[i] = ans->idx;
+ ptrs[i] = GF_MALLOC(data->len + EC_BUFFER_ALIGN_SIZE - 1,
+ gf_common_mt_char);
+ if (ptrs[i] == NULL)
+ {
+ continue;
+ }
+
+ if (size > data->len)
+ {
+ size = data->len;
+ }
+ blocks[i] = GF_ALIGN_BUF(ptrs[i], EC_BUFFER_ALIGN_SIZE);
+ memcpy(blocks[i], data->data, size);
+
+ i++;
+ }
+ }
+
+ dict_del(cbk->xdata, GF_CONTENT_KEY);
+
+ if (i >= ec->fragments)
+ {
+ size -= size % ec->fragment_size;
+ if (size > 0)
+ {
+ ptr = GF_MALLOC(size * ec->fragments +
+ EC_BUFFER_ALIGN_SIZE - 1,
+ gf_common_mt_char);
+ if (ptr != NULL)
+ {
+ buff = GF_ALIGN_BUF(ptr, EC_BUFFER_ALIGN_SIZE);
+
+ size = ec_method_decode(size, ec->fragments, values,
+ blocks, buff);
+ if (size > fop->size)
+ {
+ size = fop->size;
+ }
+ if (size > cbk->iatt[0].ia_size)
+ {
+ size = cbk->iatt[0].ia_size;
+ }
+
+ tmp = GF_MALLOC(size, gf_common_mt_char);
+ if (tmp != NULL)
+ {
+ memcpy(tmp, buff, size);
+ if (dict_set_bin(cbk->xdata, GF_CONTENT_KEY, tmp,
+ size) != 0)
+ {
+ GF_FREE(tmp);
+
+ gf_log(fop->xl->name, GF_LOG_WARNING, "Lookup "
+ "read-ahead "
+ "failed");
+ }
+ }
+
+ GF_FREE(ptr);
+ }
+ else
+ {
+ gf_log(fop->xl->name, GF_LOG_WARNING, "Lookup read-ahead "
+ "failed");
+ }
+ }
+ }
+ while (--i > 0)
+ {
+ GF_FREE(ptrs[i]);
+ }
+ }
+}
+
+int32_t ec_combine_lookup(ec_fop_data_t * fop, ec_cbk_data_t * dst,
+ ec_cbk_data_t * src)
+{
+ if (!ec_iatt_combine(dst->iatt, src->iatt, 2))
+ {
+ gf_log(fop->xl->name, GF_LOG_NOTICE, "Mismatching iatt in "
+ "answers of 'GF_FOP_LOOKUP'");
+
+ return 0;
+ }
+
+ return 1;
+}
+
+int32_t ec_lookup_cbk(call_frame_t * frame, void * cookie, xlator_t * this,
+ int32_t op_ret, int32_t op_errno, inode_t * inode,
+ struct iatt * buf, dict_t * xdata,
+ struct iatt * postparent)
+{
+ ec_fop_data_t * fop = NULL;
+ ec_cbk_data_t * cbk = NULL;
+ int32_t idx = (int32_t)(uintptr_t)cookie;
+
+ VALIDATE_OR_GOTO(this, out);
+ GF_VALIDATE_OR_GOTO(this->name, frame, out);
+ GF_VALIDATE_OR_GOTO(this->name, frame->local, out);
+ GF_VALIDATE_OR_GOTO(this->name, this->private, out);
+
+ fop = frame->local;
+
+ ec_trace("CBK", fop, "idx=%d, frame=%p, op_ret=%d, op_errno=%d", idx,
+ frame, op_ret, op_errno);
+
+ cbk = ec_cbk_data_allocate(frame, this, fop, GF_FOP_LOOKUP, idx, op_ret,
+ op_errno);
+ if (cbk != NULL)
+ {
+ if (op_ret >= 0)
+ {
+ if (inode != NULL)
+ {
+ cbk->inode = inode_ref(inode);
+ if (cbk->inode == NULL)
+ {
+ gf_log(this->name, GF_LOG_ERROR,
+ "Failed to reference an inode.");
+
+ goto out;
+ }
+ }
+ if (buf != NULL)
+ {
+ cbk->iatt[0] = *buf;
+ }
+ if (postparent != NULL)
+ {
+ cbk->iatt[1] = *postparent;
+ }
+ }
+ if (xdata != NULL)
+ {
+ cbk->xdata = dict_ref(xdata);
+ if (cbk->xdata == NULL)
+ {
+ gf_log(this->name, GF_LOG_ERROR, "Failed to reference a "
+ "dictionary.");
+
+ goto out;
+ }
+ }
+
+ ec_combine(cbk, ec_combine_lookup);
+ }
+
+out:
+ if (fop != NULL)
+ {
+ ec_complete(fop);
+ }
+
+ return 0;
+}
+
+void ec_wind_lookup(ec_t * ec, ec_fop_data_t * fop, int32_t idx)
+{
+ ec_trace("WIND", fop, "idx=%d", idx);
+
+ STACK_WIND_COOKIE(fop->frame, ec_lookup_cbk, (void *)(uintptr_t)idx,
+ ec->xl_list[idx], ec->xl_list[idx]->fops->lookup,
+ &fop->loc[0], fop->xdata);
+}
+
+int32_t ec_manager_lookup(ec_fop_data_t * fop, int32_t state)
+{
+ ec_cbk_data_t * cbk;
+
+ switch (state)
+ {
+ case EC_STATE_INIT:
+ if (fop->xdata == NULL)
+ {
+ fop->xdata = dict_new();
+ if (fop->xdata == NULL)
+ {
+ gf_log(fop->xl->name, GF_LOG_ERROR, "Unable to prepare "
+ "lookup request");
+
+ fop->error = EIO;
+
+ return EC_STATE_REPORT;
+ }
+ }
+ else
+ {
+ uint64_t size;
+
+ if (dict_get_uint64(fop->xdata, GF_CONTENT_KEY, &size) == 0)
+ {
+ fop->size = size;
+ size = ec_adjust_size(fop->xl->private, size, 1);
+ if (dict_set_uint64(fop->xdata, GF_CONTENT_KEY, size) != 0)
+ {
+ gf_log("ec", GF_LOG_DEBUG, "Unable to update lookup "
+ "content size");
+ }
+ }
+ }
+ if ((dict_set_uint64(fop->xdata, EC_XATTR_SIZE, 0) != 0) ||
+ (dict_set_uint64(fop->xdata, EC_XATTR_VERSION, 0) != 0))
+ {
+ gf_log(fop->xl->name, GF_LOG_ERROR, "Unable to prepare lookup "
+ "request");
+
+ fop->error = EIO;
+
+ return EC_STATE_REPORT;
+ }
+
+ case EC_STATE_DISPATCH:
+ ec_dispatch_all(fop);
+
+ return EC_STATE_PREPARE_ANSWER;
+
+ case EC_STATE_PREPARE_ANSWER:
+ cbk = fop->answer;
+ if (cbk != NULL)
+ {
+ if (!ec_dict_combine(cbk, EC_COMBINE_XDATA))
+ {
+ if (cbk->op_ret >= 0)
+ {
+ cbk->op_ret = -1;
+ cbk->op_errno = EIO;
+ }
+ }
+ if (cbk->op_ret < 0)
+ {
+ ec_fop_set_error(fop, cbk->op_errno);
+ }
+ else
+ {
+ ec_iatt_rebuild(fop->xl->private, cbk->iatt, 2,
+ cbk->count);
+
+ ec_lookup_rebuild(fop->xl->private, fop, cbk);
+ }
+ }
+ else
+ {
+ ec_fop_set_error(fop, EIO);
+ }
+
+ return EC_STATE_REPORT;
+
+ case EC_STATE_REPORT:
+ cbk = fop->answer;
+
+ GF_ASSERT(cbk != NULL);
+
+ if (fop->cbks.lookup != NULL)
+ {
+ fop->cbks.lookup(fop->req_frame, fop, fop->xl, cbk->op_ret,
+ cbk->op_errno, cbk->inode, &cbk->iatt[0],
+ cbk->xdata, &cbk->iatt[1]);
+ }
+
+ return EC_STATE_END;
+
+ case -EC_STATE_DISPATCH:
+ case -EC_STATE_PREPARE_ANSWER:
+ case -EC_STATE_REPORT:
+ GF_ASSERT(fop->error != 0);
+
+ if (fop->cbks.lookup != NULL)
+ {
+ fop->cbks.lookup(fop->req_frame, fop, fop->xl, -1, fop->error,
+ NULL, NULL, NULL, NULL);
+ }
+
+ return EC_STATE_END;
+
+ default:
+ gf_log(fop->xl->name, GF_LOG_ERROR, "Unhandled state %d for %s",
+ state, ec_fop_name(fop->id));
+
+ return EC_STATE_END;
+ }
+}
+
+void ec_lookup(call_frame_t * frame, xlator_t * this, uintptr_t target,
+ int32_t minimum, fop_lookup_cbk_t func, void * data,
+ loc_t * loc, dict_t * xdata)
+{
+ ec_cbk_t callback = { .lookup = func };
+ ec_fop_data_t * fop = NULL;
+ int32_t error = EIO;
+
+ gf_log("ec", GF_LOG_TRACE, "EC(LOOKUP) %p", frame);
+
+ VALIDATE_OR_GOTO(this, out);
+ GF_VALIDATE_OR_GOTO(this->name, frame, out);
+ GF_VALIDATE_OR_GOTO(this->name, this->private, out);
+
+ fop = ec_fop_data_allocate(frame, this, GF_FOP_LOOKUP, 0, target, minimum,
+ ec_wind_lookup, ec_manager_lookup, callback,
+ data);
+ if (fop == NULL)
+ {
+ goto out;
+ }
+
+ if (loc != NULL)
+ {
+ if (loc_copy(&fop->loc[0], loc) != 0)
+ {
+ gf_log(this->name, GF_LOG_ERROR, "Failed to copy a location.");
+
+ goto out;
+ }
+ }
+ if (xdata != NULL)
+ {
+ fop->xdata = dict_ref(xdata);
+ if (fop->xdata == NULL)
+ {
+ gf_log(this->name, GF_LOG_ERROR, "Failed to reference a "
+ "dictionary.");
+
+ goto out;
+ }
+ }
+
+ error = 0;
+
+out:
+ if (fop != NULL)
+ {
+ ec_manager(fop, error);
+ }
+ else
+ {
+ func(frame, NULL, this, -1, EIO, NULL, NULL, NULL, NULL);
+ }
+}
+
+/* FOP: statfs */
+
+int32_t ec_combine_statfs(ec_fop_data_t * fop, ec_cbk_data_t * dst,
+ ec_cbk_data_t * src)
+{
+ ec_statvfs_combine(&dst->statvfs, &src->statvfs);
+
+ return 1;
+}
+
+int32_t ec_statfs_cbk(call_frame_t * frame, void * cookie, xlator_t * this,
+ int32_t op_ret, int32_t op_errno, struct statvfs * buf,
+ dict_t * xdata)
+{
+ ec_fop_data_t * fop = NULL;
+ ec_cbk_data_t * cbk = NULL;
+ int32_t idx = (int32_t)(uintptr_t)cookie;
+
+ VALIDATE_OR_GOTO(this, out);
+ GF_VALIDATE_OR_GOTO(this->name, frame, out);
+ GF_VALIDATE_OR_GOTO(this->name, frame->local, out);
+ GF_VALIDATE_OR_GOTO(this->name, this->private, out);
+
+ fop = frame->local;
+
+ ec_trace("CBK", fop, "idx=%d, frame=%p, op_ret=%d, op_errno=%d", idx,
+ frame, op_ret, op_errno);
+
+ cbk = ec_cbk_data_allocate(frame, this, fop, GF_FOP_STATFS, idx, op_ret,
+ op_errno);
+ if (cbk != NULL)
+ {
+ if (op_ret >= 0)
+ {
+ if (buf != NULL)
+ {
+ cbk->statvfs = *buf;
+ }
+ }
+ if (xdata != NULL)
+ {
+ cbk->xdata = dict_ref(xdata);
+ if (cbk->xdata == NULL)
+ {
+ gf_log(this->name, GF_LOG_ERROR, "Failed to reference a "
+ "dictionary.");
+
+ goto out;
+ }
+ }
+
+ ec_combine(cbk, ec_combine_statfs);
+ }
+
+out:
+ if (fop != NULL)
+ {
+ ec_complete(fop);
+ }
+
+ return 0;
+}
+
+void ec_wind_statfs(ec_t * ec, ec_fop_data_t * fop, int32_t idx)
+{
+ ec_trace("WIND", fop, "idx=%d", idx);
+
+ STACK_WIND_COOKIE(fop->frame, ec_statfs_cbk, (void *)(uintptr_t)idx,
+ ec->xl_list[idx], ec->xl_list[idx]->fops->statfs,
+ &fop->loc[0], fop->xdata);
+}
+
+int32_t ec_manager_statfs(ec_fop_data_t * fop, int32_t state)
+{
+ ec_cbk_data_t * cbk;
+
+ switch (state)
+ {
+ case EC_STATE_INIT:
+ case EC_STATE_DISPATCH:
+ ec_dispatch_all(fop);
+
+ return EC_STATE_PREPARE_ANSWER;
+
+ case EC_STATE_PREPARE_ANSWER:
+ cbk = fop->answer;
+ if (cbk != NULL)
+ {
+ if (!ec_dict_combine(cbk, EC_COMBINE_XDATA))
+ {
+ if (cbk->op_ret >= 0)
+ {
+ cbk->op_ret = -1;
+ cbk->op_errno = EIO;
+ }
+ }
+ if (cbk->op_ret < 0)
+ {
+ ec_fop_set_error(fop, cbk->op_errno);
+ }
+ else
+ {
+ ec_t * ec = fop->xl->private;
+
+ cbk->statvfs.f_blocks *= ec->fragments;
+ cbk->statvfs.f_bfree *= ec->fragments;
+ cbk->statvfs.f_bavail *= ec->fragments;
+ }
+ }
+ else
+ {
+ ec_fop_set_error(fop, EIO);
+ }
+
+ return EC_STATE_REPORT;
+
+ case EC_STATE_REPORT:
+ cbk = fop->answer;
+
+ GF_ASSERT(cbk != NULL);
+
+ if (fop->cbks.statfs != NULL)
+ {
+ fop->cbks.statfs(fop->req_frame, fop, fop->xl, cbk->op_ret,
+ cbk->op_errno, &cbk->statvfs, cbk->xdata);
+ }
+
+ return EC_STATE_END;
+
+ case -EC_STATE_DISPATCH:
+ case -EC_STATE_PREPARE_ANSWER:
+ case -EC_STATE_REPORT:
+ GF_ASSERT(fop->error != 0);
+
+ if (fop->cbks.statfs != NULL)
+ {
+ fop->cbks.statfs(fop->req_frame, fop, fop->xl, -1, fop->error,
+ NULL, NULL);
+ }
+
+ return EC_STATE_END;
+
+ default:
+ gf_log(fop->xl->name, GF_LOG_ERROR, "Unhandled state %d for %s",
+ state, ec_fop_name(fop->id));
+
+ return EC_STATE_END;
+ }
+}
+
+void ec_statfs(call_frame_t * frame, xlator_t * this, uintptr_t target,
+ int32_t minimum, fop_statfs_cbk_t func, void * data,
+ loc_t * loc, dict_t * xdata)
+{
+ ec_cbk_t callback = { .statfs = func };
+ ec_fop_data_t * fop = NULL;
+ int32_t error = EIO;
+
+ gf_log("ec", GF_LOG_TRACE, "EC(STATFS) %p", frame);
+
+ VALIDATE_OR_GOTO(this, out);
+ GF_VALIDATE_OR_GOTO(this->name, frame, out);
+ GF_VALIDATE_OR_GOTO(this->name, this->private, out);
+
+ fop = ec_fop_data_allocate(frame, this, GF_FOP_STATFS, 0, target, minimum,
+ ec_wind_statfs, ec_manager_statfs, callback,
+ data);
+ if (fop == NULL)
+ {
+ goto out;
+ }
+
+ if (loc != NULL)
+ {
+ if (loc_copy(&fop->loc[0], loc) != 0)
+ {
+ gf_log(this->name, GF_LOG_ERROR, "Failed to copy a location.");
+
+ goto out;
+ }
+ }
+ if (xdata != NULL)
+ {
+ fop->xdata = dict_ref(xdata);
+ if (fop->xdata == NULL)
+ {
+ gf_log(this->name, GF_LOG_ERROR, "Failed to reference a "
+ "dictionary.");
+
+ goto out;
+ }
+ }
+
+ error = 0;
+
+out:
+ if (fop != NULL)
+ {
+ ec_manager(fop, error);
+ }
+ else
+ {
+ func(frame, NULL, this, -1, EIO, NULL, NULL);
+ }
+}
+
+/* FOP: xattrop */
+
+int32_t ec_combine_xattrop(ec_fop_data_t * fop, ec_cbk_data_t * dst,
+ ec_cbk_data_t * src)
+{
+ if (!ec_dict_compare(dst->dict, src->dict))
+ {
+ gf_log(fop->xl->name, GF_LOG_NOTICE, "Mismatching dictionary in "
+ "answers of 'GF_FOP_XATTROP'");
+
+ return 0;
+ }
+
+ return 1;
+}
+
+int32_t ec_xattrop_cbk(call_frame_t * frame, void * cookie, xlator_t * this,
+ int32_t op_ret, int32_t op_errno, dict_t * xattr,
+ dict_t * xdata)
+{
+ ec_fop_data_t * fop = NULL;
+ ec_cbk_data_t * cbk = NULL;
+ int32_t idx = (int32_t)(uintptr_t)cookie;
+
+ VALIDATE_OR_GOTO(this, out);
+ GF_VALIDATE_OR_GOTO(this->name, frame, out);
+ GF_VALIDATE_OR_GOTO(this->name, frame->local, out);
+ GF_VALIDATE_OR_GOTO(this->name, this->private, out);
+
+ fop = frame->local;
+
+ ec_trace("CBK", fop, "idx=%d, frame=%p, op_ret=%d, op_errno=%d", idx,
+ frame, op_ret, op_errno);
+
+ cbk = ec_cbk_data_allocate(frame, this, fop, GF_FOP_XATTROP, idx, op_ret,
+ op_errno);
+ if (cbk != NULL)
+ {
+ if (op_ret >= 0)
+ {
+ if (xattr != NULL)
+ {
+ cbk->dict = dict_ref(xattr);
+ if (cbk->dict == NULL)
+ {
+ gf_log(this->name, GF_LOG_ERROR, "Failed to reference a "
+ "dictionary.");
+
+ goto out;
+ }
+ }
+ }
+ if (xdata != NULL)
+ {
+ cbk->xdata = dict_ref(xdata);
+ if (cbk->xdata == NULL)
+ {
+ gf_log(this->name, GF_LOG_ERROR, "Failed to reference a "
+ "dictionary.");
+
+ goto out;
+ }
+ }
+
+ ec_combine(cbk, ec_combine_xattrop);
+ }
+
+out:
+ if (fop != NULL)
+ {
+ ec_complete(fop);
+ }
+
+ return 0;
+}
+
+void ec_wind_xattrop(ec_t * ec, ec_fop_data_t * fop, int32_t idx)
+{
+ ec_trace("WIND", fop, "idx=%d", idx);
+
+ STACK_WIND_COOKIE(fop->frame, ec_xattrop_cbk, (void *)(uintptr_t)idx,
+ ec->xl_list[idx], ec->xl_list[idx]->fops->xattrop,
+ &fop->loc[0], fop->xattrop_flags, fop->dict, fop->xdata);
+}
+
+int32_t ec_manager_xattrop(ec_fop_data_t * fop, int32_t state)
+{
+ ec_cbk_data_t * cbk;
+
+ switch (state)
+ {
+ case EC_STATE_INIT:
+ case EC_STATE_LOCK:
+ ec_lock_inode(fop, &fop->loc[0]);
+
+ return EC_STATE_DISPATCH;
+
+ case EC_STATE_DISPATCH:
+ ec_dispatch_all(fop);
+
+ return EC_STATE_PREPARE_ANSWER;
+
+ case EC_STATE_PREPARE_ANSWER:
+ cbk = fop->answer;
+ if (cbk != NULL)
+ {
+ if (!ec_dict_combine(cbk, EC_COMBINE_XDATA) ||
+ ((cbk->op_ret >= 0) && !ec_dict_combine(cbk,
+ EC_COMBINE_DICT)))
+ {
+ if (cbk->op_ret >= 0)
+ {
+ cbk->op_ret = -1;
+ cbk->op_errno = EIO;
+ }
+ }
+ if (cbk->op_ret < 0)
+ {
+ ec_fop_set_error(fop, cbk->op_errno);
+ }
+ }
+ else
+ {
+ ec_fop_set_error(fop, EIO);
+ }
+
+ return EC_STATE_REPORT;
+
+ case EC_STATE_REPORT:
+ cbk = fop->answer;
+
+ GF_ASSERT(cbk != NULL);
+
+ if (fop->fd == NULL)
+ {
+ if (fop->cbks.xattrop != NULL)
+ {
+ fop->cbks.xattrop(fop->req_frame, fop, fop->xl,
+ cbk->op_ret, cbk->op_errno, cbk->dict,
+ cbk->xdata);
+ }
+ }
+ else
+ {
+ if (fop->cbks.fxattrop != NULL)
+ {
+ fop->cbks.fxattrop(fop->req_frame, fop, fop->xl,
+ cbk->op_ret, cbk->op_errno, cbk->dict,
+ cbk->xdata);
+ }
+ }
+
+ if (cbk->op_ret >= 0)
+ {
+ return EC_STATE_UPDATE_SIZE_AND_VERSION;
+ }
+ return EC_STATE_UNLOCK;
+
+ case -EC_STATE_LOCK:
+ case -EC_STATE_DISPATCH:
+ case -EC_STATE_PREPARE_ANSWER:
+ case -EC_STATE_REPORT:
+ GF_ASSERT(fop->error != 0);
+
+ if (fop->fd == NULL)
+ {
+ if (fop->cbks.xattrop != NULL)
+ {
+ fop->cbks.xattrop(fop->req_frame, fop, fop->xl, -1,
+ fop->error, NULL, NULL);
+ }
+ }
+ else
+ {
+ if (fop->cbks.fxattrop != NULL)
+ {
+ fop->cbks.fxattrop(fop->req_frame, fop, fop->xl, -1,
+ fop->error, NULL, NULL);
+ }
+ }
+
+ return EC_STATE_UNLOCK;
+
+ case EC_STATE_UPDATE_SIZE_AND_VERSION:
+ ec_update_size_version(fop);
+
+ return EC_STATE_UNLOCK;
+
+ case -EC_STATE_UPDATE_SIZE_AND_VERSION:
+ case -EC_STATE_UNLOCK:
+ case EC_STATE_UNLOCK:
+ ec_unlock(fop);
+
+ return EC_STATE_END;
+
+ default:
+ gf_log(fop->xl->name, GF_LOG_ERROR, "Unhandled state %d for %s",
+ state, ec_fop_name(fop->id));
+
+ return EC_STATE_END;
+ }
+}
+
+void ec_xattrop(call_frame_t * frame, xlator_t * this, uintptr_t target,
+ int32_t minimum, fop_xattrop_cbk_t func, void * data,
+ loc_t * loc, gf_xattrop_flags_t optype, dict_t * xattr,
+ dict_t * xdata)
+{
+ ec_cbk_t callback = { .xattrop = func };
+ ec_fop_data_t * fop = NULL;
+ int32_t error = EIO;
+
+ gf_log("ec", GF_LOG_TRACE, "EC(XATTROP) %p", frame);
+
+ VALIDATE_OR_GOTO(this, out);
+ GF_VALIDATE_OR_GOTO(this->name, frame, out);
+ GF_VALIDATE_OR_GOTO(this->name, this->private, out);
+
+ fop = ec_fop_data_allocate(frame, this, GF_FOP_XATTROP,
+ EC_FLAG_UPDATE_LOC_INODE, target, minimum,
+ ec_wind_xattrop, ec_manager_xattrop, callback,
+ data);
+ if (fop == NULL)
+ {
+ goto out;
+ }
+
+ fop->xattrop_flags = optype;
+
+ if (loc != NULL)
+ {
+ if (loc_copy(&fop->loc[0], loc) != 0)
+ {
+ gf_log(this->name, GF_LOG_ERROR, "Failed to copy a location.");
+
+ goto out;
+ }
+ }
+ if (xattr != NULL)
+ {
+ fop->dict = dict_ref(xattr);
+ if (fop->dict == NULL)
+ {
+ gf_log(this->name, GF_LOG_ERROR, "Failed to reference a "
+ "dictionary.");
+
+ goto out;
+ }
+ }
+ if (xdata != NULL)
+ {
+ fop->xdata = dict_ref(xdata);
+ if (fop->xdata == NULL)
+ {
+ gf_log(this->name, GF_LOG_ERROR, "Failed to reference a "
+ "dictionary.");
+
+ goto out;
+ }
+ }
+
+ error = 0;
+
+out:
+ if (fop != NULL)
+ {
+ ec_manager(fop, error);
+ }
+ else
+ {
+ func(frame, NULL, this, -1, EIO, NULL, NULL);
+ }
+}
+
+/* FOP: fxattrop */
+
+int32_t ec_fxattrop_cbk(call_frame_t * frame, void * cookie, xlator_t * this,
+ int32_t op_ret, int32_t op_errno, dict_t * xattr,
+ dict_t * xdata)
+{
+ ec_fop_data_t * fop = NULL;
+ ec_cbk_data_t * cbk = NULL;
+ int32_t idx = (int32_t)(uintptr_t)cookie;
+
+ VALIDATE_OR_GOTO(this, out);
+ GF_VALIDATE_OR_GOTO(this->name, frame, out);
+ GF_VALIDATE_OR_GOTO(this->name, frame->local, out);
+ GF_VALIDATE_OR_GOTO(this->name, this->private, out);
+
+ fop = frame->local;
+
+ ec_trace("CBK", fop, "idx=%d, frame=%p, op_ret=%d, op_errno=%d", idx,
+ frame, op_ret, op_errno);
+
+ cbk = ec_cbk_data_allocate(frame, this, fop, GF_FOP_FXATTROP, idx, op_ret,
+ op_errno);
+ if (cbk != NULL)
+ {
+ if (op_ret >= 0)
+ {
+ if (xattr != NULL)
+ {
+ cbk->dict = dict_ref(xattr);
+ if (cbk->dict == NULL)
+ {
+ gf_log(this->name, GF_LOG_ERROR, "Failed to reference a "
+ "dictionary.");
+
+ goto out;
+ }
+ }
+ }
+ if (xdata != NULL)
+ {
+ cbk->xdata = dict_ref(xdata);
+ if (cbk->xdata == NULL)
+ {
+ gf_log(this->name, GF_LOG_ERROR, "Failed to reference a "
+ "dictionary.");
+
+ goto out;
+ }
+ }
+
+ ec_combine(cbk, ec_combine_xattrop);
+ }
+
+out:
+ if (fop != NULL)
+ {
+ ec_complete(fop);
+ }
+
+ return 0;
+}
+
+void ec_wind_fxattrop(ec_t * ec, ec_fop_data_t * fop, int32_t idx)
+{
+ ec_trace("WIND", fop, "idx=%d", idx);
+
+ STACK_WIND_COOKIE(fop->frame, ec_fxattrop_cbk, (void *)(uintptr_t)idx,
+ ec->xl_list[idx], ec->xl_list[idx]->fops->fxattrop,
+ fop->fd, fop->xattrop_flags, fop->dict, fop->xdata);
+}
+
+void ec_fxattrop(call_frame_t * frame, xlator_t * this, uintptr_t target,
+ int32_t minimum, fop_fxattrop_cbk_t func, void * data,
+ fd_t * fd, gf_xattrop_flags_t optype, dict_t * xattr,
+ dict_t * xdata)
+{
+ ec_cbk_t callback = { .fxattrop = func };
+ ec_fop_data_t * fop = NULL;
+ int32_t error = EIO;
+
+ gf_log("ec", GF_LOG_TRACE, "EC(FXATTROP) %p", frame);
+
+ VALIDATE_OR_GOTO(this, out);
+ GF_VALIDATE_OR_GOTO(this->name, frame, out);
+ GF_VALIDATE_OR_GOTO(this->name, this->private, out);
+
+ fop = ec_fop_data_allocate(frame, this, GF_FOP_FXATTROP,
+ EC_FLAG_UPDATE_FD_INODE, target, minimum,
+ ec_wind_fxattrop, ec_manager_xattrop, callback,
+ data);
+ if (fop == NULL)
+ {
+ goto out;
+ }
+
+ fop->xattrop_flags = optype;
+
+ if (fd != NULL)
+ {
+ fop->fd = fd_ref(fd);
+ if (fop->fd == NULL)
+ {
+ gf_log(this->name, GF_LOG_ERROR, "Failed to reference a "
+ "file descriptor.");
+
+ goto out;
+ }
+ }
+ if (xattr != NULL)
+ {
+ fop->dict = dict_ref(xattr);
+ if (fop->dict == NULL)
+ {
+ gf_log(this->name, GF_LOG_ERROR, "Failed to reference a "
+ "dictionary.");
+
+ goto out;
+ }
+ }
+ if (xdata != NULL)
+ {
+ fop->xdata = dict_ref(xdata);
+ if (fop->xdata == NULL)
+ {
+ gf_log(this->name, GF_LOG_ERROR, "Failed to reference a "
+ "dictionary.");
+
+ goto out;
+ }
+ }
+
+ error = 0;
+
+out:
+ if (fop != NULL)
+ {
+ ec_manager(fop, error);
+ }
+ else
+ {
+ func(frame, NULL, this, -1, EIO, NULL, NULL);
+ }
+}
diff --git a/xlators/cluster/ec/src/ec-gf.c b/xlators/cluster/ec/src/ec-gf.c
new file mode 100644
index 00000000000..03c4818c0cc
--- /dev/null
+++ b/xlators/cluster/ec/src/ec-gf.c
@@ -0,0 +1,10120 @@
+/*
+ Copyright (c) 2012 DataLab, s.l. <http://www.datalab.es>
+
+ This file is part of the cluster/ec translator for GlusterFS.
+
+ The cluster/ec translator for GlusterFS is free software: you can
+ redistribute it and/or modify it under the terms of the GNU General
+ Public License as published by the Free Software Foundation, either
+ version 3 of the License, or (at your option) any later version.
+
+ The cluster/ec translator for GlusterFS is distributed in the hope
+ that it will be useful, but WITHOUT ANY WARRANTY; without even the
+ implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+ PURPOSE. See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with the cluster/ec translator for GlusterFS. If not, see
+ <http://www.gnu.org/licenses/>.
+*/
+
+/*
+ * File automatically generated on Thu Jan 26 12:08:19 2012
+ *
+ * DO NOT MODIFY
+ *
+ * Multiplications in a GF(2^8) with modulus 0x11D using XOR's
+ *
+ * 7994 total xor's
+ * 31.3 average xor's per number
+ * 0 xor's for the best case (01)
+ * 43 xor's for the worst case (F4)
+ *
+ * 0 xor's: 01
+ * 10 xor's: 03
+ * 12 xor's: F5
+ * 16 xor's: 04 05
+ * 17 xor's: 9C A6
+ * 18 xor's: 02 73
+ * 19 xor's: 10 39
+ * 20 xor's: 0B
+ * 21 xor's: 0D 59 D2 E9 EC
+ * 22 xor's: 12 28 61
+ * 23 xor's: 08 09 44
+ * 24 xor's: 0A 1D 25 55 B4
+ * 25 xor's: 07 11 21 51 63 C4
+ * 26 xor's: 0C 0F 13 45 54 5E 64 BD F2
+ * 27 xor's: 06 1F 22 41 6B B9 C7 D1 F7
+ * 28 xor's: 19 31 8C 95 B5 C1 F3
+ * 29 xor's: 26 30 42 4A 4B 50 6A 88 90 A3 D8 E0 E8 F0 FD
+ * 30 xor's: 14 15 20 2E 34 5D 89 99 A2 A9 B0 E5 F9
+ * 31 xor's: 16 17 18 1A 1B 24 29 2B 2D 3B 57 84 85 87 8F 97 A5 EB F1 FB
+ * 32 xor's: 33 36 43 47 65 67 72 75 78 79 81 83 8D 9B A8 AF B8 BB C5 CB CC CE E6 ED
+ * 33 xor's: 0E 35 3D 49 4C 4D 6E 70 94 98 A0 AB B1 B2 B6 C8 C9 CD D0 D6 DC DD E3 EA F8
+ * 34 xor's: 1C 1E 23 27 2C 32 40 46 5C 60 68 6F 71 7F 8A 9A AA AC B3 C2 D3 FC FF
+ * 35 xor's: 3A 53 58 6D 74 7C 7D 8B 91 93 96 A1 AE C0 CA D5 DB E4 F6
+ * 36 xor's: 2A 2F 38 48 4F 5B 66 6C 82 86 92 9F AD BC CF D4 DA DE E2 FA FE
+ * 37 xor's: 37 3E 52 69 7B 9D B7 BE C3 C6 EE
+ * 38 xor's: 3C 5A 7E 80 9E A7 BA BF D7 E7 EF
+ * 39 xor's: 3F 4E 77 8E A4 D9 E1
+ * 40 xor's: 76 7A
+ * 41 xor's: 62
+ * 42 xor's: 56 5F DF
+ * 43 xor's: F4
+ *
+ */
+
+#include <xmmintrin.h>
+
+#include "ec-gf.h"
+
+static void gf8mul_00000000(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm0, %xmm0\n"
+ "\tpxor %xmm1, %xmm1\n"
+ "\tpxor %xmm2, %xmm2\n"
+ "\tpxor %xmm3, %xmm3\n"
+ "\tpxor %xmm4, %xmm4\n"
+ "\tpxor %xmm5, %xmm5\n"
+ "\tpxor %xmm6, %xmm6\n"
+ "\tpxor %xmm7, %xmm7\n"
+ );
+}
+
+static void gf8mul_00000001(void)
+{
+}
+
+static void gf8mul_00000010(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm7, %xmm0\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm5, %xmm6\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm2, %xmm3\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm0, %xmm1\n"
+ "\tpxor %xmm1, %xmm0\n"
+ );
+}
+
+static void gf8mul_00000011(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm7, %xmm1\n"
+ "\tpxor %xmm7, %xmm0\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm5, %xmm6\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm2, %xmm3\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm0, %xmm1\n"
+ );
+}
+
+static void gf8mul_00000100(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm2\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm6, %xmm1\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm4, %xmm6\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm3, %xmm5\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm2, %xmm4\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm1, %xmm3\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm0, %xmm2\n"
+ "\tpxor %xmm2, %xmm0\n"
+ );
+}
+
+static void gf8mul_00000101(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm7, %xmm1\n"
+ "\tpxor %xmm7, %xmm0\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm6, %xmm0\n"
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm4, %xmm6\n"
+ "\tpxor %xmm3, %xmm5\n"
+ "\tpxor %xmm2, %xmm4\n"
+ "\tpxor %xmm1, %xmm3\n"
+ "\tpxor %xmm0, %xmm2\n"
+ );
+}
+
+static void gf8mul_00000110(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm7, %xmm1\n"
+ "\tpxor %xmm7, %xmm0\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm6, %xmm2\n"
+ "\tpxor %xmm6, %xmm1\n"
+ "\tpxor %xmm6, %xmm0\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm5, %xmm6\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm4, %xmm6\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm3, %xmm5\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm2, %xmm4\n"
+ "\tpxor %xmm2, %xmm3\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm1, %xmm3\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm0, %xmm2\n"
+ "\tpxor %xmm0, %xmm1\n"
+ "\tpxor %xmm1, %xmm0\n"
+ );
+}
+
+static void gf8mul_00000111(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm7, %xmm2\n"
+ "\tpxor %xmm7, %xmm1\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm6, %xmm2\n"
+ "\tpxor %xmm6, %xmm1\n"
+ "\tpxor %xmm6, %xmm0\n"
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm5, %xmm6\n"
+ "\tpxor %xmm4, %xmm6\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm3, %xmm5\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm2, %xmm4\n"
+ "\tpxor %xmm2, %xmm3\n"
+ "\tpxor %xmm1, %xmm3\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm0, %xmm2\n"
+ "\tpxor %xmm0, %xmm1\n"
+ );
+}
+
+static void gf8mul_00001000(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm7, %xmm0\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm6, %xmm2\n"
+ "\tpxor %xmm6, %xmm0\n"
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm5, %xmm6\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm5, %xmm1\n"
+ "\tpxor %xmm4, %xmm7\n"
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm3, %xmm6\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm2, %xmm5\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm1, %xmm4\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm0, %xmm3\n"
+ "\tpxor %xmm3, %xmm0\n"
+ );
+}
+
+static void gf8mul_00001001(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm7, %xmm1\n"
+ "\tpxor %xmm7, %xmm0\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm6, %xmm2\n"
+ "\tpxor %xmm6, %xmm1\n"
+ "\tpxor %xmm6, %xmm0\n"
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm5, %xmm0\n"
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm4, %xmm7\n"
+ "\tpxor %xmm3, %xmm6\n"
+ "\tpxor %xmm2, %xmm5\n"
+ "\tpxor %xmm1, %xmm4\n"
+ "\tpxor %xmm0, %xmm3\n"
+ );
+}
+
+static void gf8mul_00001010(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm7, %xmm1\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm6, %xmm0\n"
+ "\tpxor %xmm5, %xmm6\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm5, %xmm0\n"
+ "\tpxor %xmm4, %xmm7\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm3, %xmm6\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm2, %xmm5\n"
+ "\tpxor %xmm2, %xmm3\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm1, %xmm4\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm0, %xmm3\n"
+ "\tpxor %xmm0, %xmm1\n"
+ "\tpxor %xmm1, %xmm0\n"
+ );
+}
+
+static void gf8mul_00001011(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm7, %xmm2\n"
+ "\tpxor %xmm7, %xmm1\n"
+ "\tpxor %xmm7, %xmm0\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm2\n"
+ "\tpxor %xmm6, %xmm1\n"
+ "\tpxor %xmm5, %xmm6\n"
+ "\tpxor %xmm5, %xmm1\n"
+ "\tpxor %xmm5, %xmm0\n"
+ "\tpxor %xmm4, %xmm7\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm3, %xmm6\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm2, %xmm5\n"
+ "\tpxor %xmm2, %xmm3\n"
+ "\tpxor %xmm1, %xmm4\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm0, %xmm3\n"
+ "\tpxor %xmm0, %xmm1\n"
+ );
+}
+
+static void gf8mul_00001100(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm2\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm6, %xmm1\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm4, %xmm6\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm3, %xmm5\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm2, %xmm4\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm1, %xmm3\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm0, %xmm2\n"
+ "\tpxor %xmm2, %xmm0\n"
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm7, %xmm1\n"
+ "\tpxor %xmm7, %xmm0\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm5, %xmm6\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm2, %xmm3\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm0, %xmm1\n"
+ );
+}
+
+static void gf8mul_00001101(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm7, %xmm2\n"
+ "\tpxor %xmm7, %xmm1\n"
+ "\tpxor %xmm7, %xmm0\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm6, %xmm1\n"
+ "\tpxor %xmm5, %xmm6\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm5, %xmm0\n"
+ "\tpxor %xmm4, %xmm7\n"
+ "\tpxor %xmm4, %xmm6\n"
+ "\tpxor %xmm3, %xmm6\n"
+ "\tpxor %xmm3, %xmm5\n"
+ "\tpxor %xmm2, %xmm5\n"
+ "\tpxor %xmm2, %xmm4\n"
+ "\tpxor %xmm1, %xmm4\n"
+ "\tpxor %xmm1, %xmm3\n"
+ "\tpxor %xmm0, %xmm3\n"
+ "\tpxor %xmm0, %xmm2\n"
+ );
+}
+
+static void gf8mul_00001110(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm0\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm6, %xmm2\n"
+ "\tpxor %xmm6, %xmm1\n"
+ "\tpxor %xmm6, %xmm0\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm5, %xmm1\n"
+ "\tpxor %xmm5, %xmm0\n"
+ "\tpxor %xmm4, %xmm7\n"
+ "\tpxor %xmm4, %xmm6\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm3, %xmm6\n"
+ "\tpxor %xmm3, %xmm5\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm2, %xmm5\n"
+ "\tpxor %xmm2, %xmm4\n"
+ "\tpxor %xmm2, %xmm3\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm1, %xmm4\n"
+ "\tpxor %xmm1, %xmm3\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm0, %xmm3\n"
+ "\tpxor %xmm0, %xmm2\n"
+ "\tpxor %xmm0, %xmm1\n"
+ "\tpxor %xmm1, %xmm0\n"
+ );
+}
+
+static void gf8mul_00001111(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm7, %xmm1\n"
+ "\tpxor %xmm7, %xmm0\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm6, %xmm0\n"
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm4, %xmm6\n"
+ "\tpxor %xmm3, %xmm5\n"
+ "\tpxor %xmm2, %xmm4\n"
+ "\tpxor %xmm1, %xmm3\n"
+ "\tpxor %xmm0, %xmm2\n"
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm7, %xmm1\n"
+ "\tpxor %xmm7, %xmm0\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm5, %xmm6\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm2, %xmm3\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm0, %xmm1\n"
+ );
+}
+
+static void gf8mul_00010000(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm2\n"
+ "\tpxor %xmm7, %xmm1\n"
+ "\tpxor %xmm6, %xmm1\n"
+ "\tpxor %xmm6, %xmm0\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm5, %xmm0\n"
+ "\tpxor %xmm4, %xmm7\n"
+ "\tpxor %xmm4, %xmm6\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm3, %xmm7\n"
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm2, %xmm6\n"
+ "\tpxor %xmm6, %xmm2\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm5, %xmm1\n"
+ "\tpxor %xmm0, %xmm4\n"
+ "\tpxor %xmm4, %xmm0\n"
+ );
+}
+
+static void gf8mul_00010001(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm7, %xmm2\n"
+ "\tpxor %xmm7, %xmm1\n"
+ "\tpxor %xmm7, %xmm0\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm6, %xmm2\n"
+ "\tpxor %xmm6, %xmm1\n"
+ "\tpxor %xmm5, %xmm6\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm5, %xmm1\n"
+ "\tpxor %xmm5, %xmm0\n"
+ "\tpxor %xmm4, %xmm7\n"
+ "\tpxor %xmm4, %xmm6\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm4, %xmm0\n"
+ "\tpxor %xmm3, %xmm7\n"
+ "\tpxor %xmm2, %xmm6\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm0, %xmm4\n"
+ );
+}
+
+static void gf8mul_00010010(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm7, %xmm2\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm6, %xmm1\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm5, %xmm0\n"
+ "\tpxor %xmm4, %xmm7\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm4, %xmm0\n"
+ "\tpxor %xmm3, %xmm7\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm2, %xmm6\n"
+ "\tpxor %xmm2, %xmm3\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm0, %xmm4\n"
+ "\tpxor %xmm0, %xmm1\n"
+ "\tpxor %xmm1, %xmm0\n"
+ );
+}
+
+static void gf8mul_00010011(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm7, %xmm1\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm6, %xmm0\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm5, %xmm0\n"
+ "\tpxor %xmm4, %xmm7\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm4, %xmm0\n"
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm3, %xmm7\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm2, %xmm6\n"
+ "\tpxor %xmm2, %xmm3\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm0, %xmm4\n"
+ "\tpxor %xmm0, %xmm1\n"
+ );
+}
+
+static void gf8mul_00010100(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm7, %xmm1\n"
+ "\tpxor %xmm7, %xmm0\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm6, %xmm0\n"
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm5, %xmm6\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm4, %xmm7\n"
+ "\tpxor %xmm4, %xmm6\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm3, %xmm7\n"
+ "\tpxor %xmm3, %xmm5\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm2, %xmm6\n"
+ "\tpxor %xmm2, %xmm4\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm1, %xmm3\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm0, %xmm4\n"
+ "\tpxor %xmm0, %xmm2\n"
+ "\tpxor %xmm2, %xmm0\n"
+ );
+}
+
+static void gf8mul_00010101(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm7, %xmm2\n"
+ "\tpxor %xmm7, %xmm1\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm6, %xmm2\n"
+ "\tpxor %xmm6, %xmm1\n"
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm5, %xmm6\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm5, %xmm1\n"
+ "\tpxor %xmm5, %xmm0\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm4, %xmm7\n"
+ "\tpxor %xmm4, %xmm6\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm4, %xmm0\n"
+ "\tpxor %xmm3, %xmm7\n"
+ "\tpxor %xmm3, %xmm5\n"
+ "\tpxor %xmm2, %xmm6\n"
+ "\tpxor %xmm2, %xmm4\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm1, %xmm3\n"
+ "\tpxor %xmm0, %xmm4\n"
+ "\tpxor %xmm0, %xmm2\n"
+ );
+}
+
+static void gf8mul_00010110(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm7, %xmm0\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm6, %xmm2\n"
+ "\tpxor %xmm6, %xmm1\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm5, %xmm6\n"
+ "\tpxor %xmm5, %xmm1\n"
+ "\tpxor %xmm5, %xmm0\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm4, %xmm0\n"
+ "\tpxor %xmm3, %xmm7\n"
+ "\tpxor %xmm3, %xmm5\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm2, %xmm6\n"
+ "\tpxor %xmm2, %xmm4\n"
+ "\tpxor %xmm2, %xmm3\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm1, %xmm3\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm0, %xmm4\n"
+ "\tpxor %xmm0, %xmm2\n"
+ "\tpxor %xmm0, %xmm1\n"
+ "\tpxor %xmm1, %xmm0\n"
+ );
+}
+
+static void gf8mul_00010111(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm7, %xmm2\n"
+ "\tpxor %xmm7, %xmm0\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm6, %xmm2\n"
+ "\tpxor %xmm6, %xmm0\n"
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm5, %xmm0\n"
+ "\tpxor %xmm4, %xmm7\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm4, %xmm0\n"
+ "\tpxor %xmm3, %xmm7\n"
+ "\tpxor %xmm3, %xmm5\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm2, %xmm6\n"
+ "\tpxor %xmm2, %xmm4\n"
+ "\tpxor %xmm2, %xmm3\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm1, %xmm3\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm0, %xmm4\n"
+ "\tpxor %xmm0, %xmm2\n"
+ "\tpxor %xmm0, %xmm1\n"
+ );
+}
+
+static void gf8mul_00011000(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm7, %xmm1\n"
+ "\tpxor %xmm7, %xmm0\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm6, %xmm2\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm5, %xmm6\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm5, %xmm0\n"
+ "\tpxor %xmm4, %xmm6\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm3, %xmm7\n"
+ "\tpxor %xmm3, %xmm6\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm2, %xmm6\n"
+ "\tpxor %xmm2, %xmm5\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm1, %xmm4\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm0, %xmm4\n"
+ "\tpxor %xmm0, %xmm3\n"
+ "\tpxor %xmm3, %xmm0\n"
+ );
+}
+
+static void gf8mul_00011001(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm7, %xmm2\n"
+ "\tpxor %xmm7, %xmm0\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm6, %xmm2\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm5, %xmm1\n"
+ "\tpxor %xmm5, %xmm0\n"
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm4, %xmm7\n"
+ "\tpxor %xmm4, %xmm6\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm4, %xmm0\n"
+ "\tpxor %xmm3, %xmm7\n"
+ "\tpxor %xmm3, %xmm6\n"
+ "\tpxor %xmm2, %xmm6\n"
+ "\tpxor %xmm2, %xmm5\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm1, %xmm4\n"
+ "\tpxor %xmm0, %xmm4\n"
+ "\tpxor %xmm0, %xmm3\n"
+ );
+}
+
+static void gf8mul_00011010(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm7, %xmm2\n"
+ "\tpxor %xmm7, %xmm0\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm1\n"
+ "\tpxor %xmm6, %xmm0\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm5, %xmm6\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm5, %xmm0\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm4, %xmm0\n"
+ "\tpxor %xmm3, %xmm7\n"
+ "\tpxor %xmm3, %xmm6\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm2, %xmm6\n"
+ "\tpxor %xmm2, %xmm5\n"
+ "\tpxor %xmm2, %xmm3\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm1, %xmm4\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm0, %xmm4\n"
+ "\tpxor %xmm0, %xmm3\n"
+ "\tpxor %xmm0, %xmm1\n"
+ "\tpxor %xmm1, %xmm0\n"
+ );
+}
+
+static void gf8mul_00011011(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm7, %xmm1\n"
+ "\tpxor %xmm7, %xmm0\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm6, %xmm2\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm5, %xmm1\n"
+ "\tpxor %xmm4, %xmm7\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm4, %xmm0\n"
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm3, %xmm7\n"
+ "\tpxor %xmm3, %xmm6\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm2, %xmm6\n"
+ "\tpxor %xmm2, %xmm5\n"
+ "\tpxor %xmm2, %xmm3\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm1, %xmm4\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm0, %xmm4\n"
+ "\tpxor %xmm0, %xmm3\n"
+ "\tpxor %xmm0, %xmm1\n"
+ );
+}
+
+static void gf8mul_00011100(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm7, %xmm2\n"
+ "\tpxor %xmm7, %xmm1\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm6, %xmm2\n"
+ "\tpxor %xmm6, %xmm1\n"
+ "\tpxor %xmm6, %xmm0\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm5, %xmm6\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm4, %xmm7\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm3, %xmm7\n"
+ "\tpxor %xmm3, %xmm6\n"
+ "\tpxor %xmm3, %xmm5\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm2, %xmm6\n"
+ "\tpxor %xmm2, %xmm5\n"
+ "\tpxor %xmm2, %xmm4\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm1, %xmm4\n"
+ "\tpxor %xmm1, %xmm3\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm0, %xmm4\n"
+ "\tpxor %xmm0, %xmm3\n"
+ "\tpxor %xmm0, %xmm2\n"
+ "\tpxor %xmm2, %xmm0\n"
+ );
+}
+
+static void gf8mul_00011101(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm7, %xmm2\n"
+ "\tpxor %xmm7, %xmm1\n"
+ "\tpxor %xmm6, %xmm2\n"
+ "\tpxor %xmm6, %xmm1\n"
+ "\tpxor %xmm6, %xmm0\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm5, %xmm1\n"
+ "\tpxor %xmm5, %xmm0\n"
+ "\tpxor %xmm4, %xmm7\n"
+ "\tpxor %xmm4, %xmm6\n"
+ "\tpxor %xmm4, %xmm0\n"
+ "\tpxor %xmm3, %xmm7\n"
+ "\tpxor %xmm3, %xmm6\n"
+ "\tpxor %xmm3, %xmm5\n"
+ "\tpxor %xmm2, %xmm6\n"
+ "\tpxor %xmm2, %xmm5\n"
+ "\tpxor %xmm2, %xmm4\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm1, %xmm4\n"
+ "\tpxor %xmm1, %xmm3\n"
+ "\tpxor %xmm0, %xmm4\n"
+ "\tpxor %xmm0, %xmm3\n"
+ "\tpxor %xmm0, %xmm2\n"
+ );
+}
+
+static void gf8mul_00011110(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm7, %xmm1\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm6, %xmm0\n"
+ "\tpxor %xmm5, %xmm6\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm5, %xmm0\n"
+ "\tpxor %xmm4, %xmm7\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm3, %xmm6\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm2, %xmm5\n"
+ "\tpxor %xmm2, %xmm3\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm1, %xmm4\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm0, %xmm3\n"
+ "\tpxor %xmm0, %xmm1\n"
+ "\tpxor %xmm1, %xmm0\n"
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm7, %xmm1\n"
+ "\tpxor %xmm7, %xmm0\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm5, %xmm6\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm2, %xmm3\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm0, %xmm1\n"
+ );
+}
+
+static void gf8mul_00011111(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm2\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm6, %xmm1\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm5, %xmm0\n"
+ "\tpxor %xmm4, %xmm7\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm4, %xmm0\n"
+ "\tpxor %xmm3, %xmm7\n"
+ "\tpxor %xmm3, %xmm6\n"
+ "\tpxor %xmm3, %xmm5\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm2, %xmm6\n"
+ "\tpxor %xmm2, %xmm5\n"
+ "\tpxor %xmm2, %xmm4\n"
+ "\tpxor %xmm2, %xmm3\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm1, %xmm4\n"
+ "\tpxor %xmm1, %xmm3\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm0, %xmm4\n"
+ "\tpxor %xmm0, %xmm3\n"
+ "\tpxor %xmm0, %xmm2\n"
+ "\tpxor %xmm0, %xmm1\n"
+ );
+}
+
+static void gf8mul_00100000(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm7, %xmm0\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm6, %xmm1\n"
+ "\tpxor %xmm6, %xmm0\n"
+ "\tpxor %xmm5, %xmm6\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm5, %xmm0\n"
+ "\tpxor %xmm4, %xmm7\n"
+ "\tpxor %xmm4, %xmm6\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm3, %xmm7\n"
+ "\tpxor %xmm3, %xmm5\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm3, %xmm0\n"
+ "\tpxor %xmm2, %xmm7\n"
+ "\tpxor %xmm7, %xmm2\n"
+ "\tpxor %xmm1, %xmm6\n"
+ "\tpxor %xmm6, %xmm1\n"
+ "\tpxor %xmm0, %xmm5\n"
+ "\tpxor %xmm5, %xmm0\n"
+ );
+}
+
+static void gf8mul_00100001(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm7, %xmm2\n"
+ "\tpxor %xmm7, %xmm1\n"
+ "\tpxor %xmm7, %xmm0\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm6, %xmm1\n"
+ "\tpxor %xmm5, %xmm6\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm5, %xmm0\n"
+ "\tpxor %xmm4, %xmm7\n"
+ "\tpxor %xmm4, %xmm6\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm4, %xmm0\n"
+ "\tpxor %xmm3, %xmm7\n"
+ "\tpxor %xmm3, %xmm5\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm3, %xmm0\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm2, %xmm7\n"
+ "\tpxor %xmm1, %xmm6\n"
+ "\tpxor %xmm0, %xmm5\n"
+ );
+}
+
+static void gf8mul_00100010(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm7, %xmm0\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm2\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm5, %xmm6\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm5, %xmm1\n"
+ "\tpxor %xmm5, %xmm0\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm4, %xmm7\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm4, %xmm0\n"
+ "\tpxor %xmm3, %xmm5\n"
+ "\tpxor %xmm3, %xmm0\n"
+ "\tpxor %xmm2, %xmm7\n"
+ "\tpxor %xmm2, %xmm3\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm1, %xmm6\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm0, %xmm5\n"
+ "\tpxor %xmm0, %xmm1\n"
+ "\tpxor %xmm1, %xmm0\n"
+ );
+}
+
+static void gf8mul_00100011(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm7, %xmm2\n"
+ "\tpxor %xmm7, %xmm1\n"
+ "\tpxor %xmm7, %xmm0\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm6, %xmm2\n"
+ "\tpxor %xmm6, %xmm1\n"
+ "\tpxor %xmm6, %xmm0\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm5, %xmm6\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm5, %xmm1\n"
+ "\tpxor %xmm5, %xmm0\n"
+ "\tpxor %xmm4, %xmm7\n"
+ "\tpxor %xmm4, %xmm6\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm3, %xmm6\n"
+ "\tpxor %xmm3, %xmm5\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm3, %xmm0\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm2, %xmm7\n"
+ "\tpxor %xmm2, %xmm3\n"
+ "\tpxor %xmm1, %xmm6\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm0, %xmm5\n"
+ "\tpxor %xmm0, %xmm1\n"
+ );
+}
+
+static void gf8mul_00100100(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm7, %xmm1\n"
+ "\tpxor %xmm7, %xmm0\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm6, %xmm1\n"
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm5, %xmm6\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm5, %xmm0\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm4, %xmm7\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm3, %xmm6\n"
+ "\tpxor %xmm3, %xmm5\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm2, %xmm7\n"
+ "\tpxor %xmm2, %xmm4\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm1, %xmm6\n"
+ "\tpxor %xmm1, %xmm3\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm0, %xmm5\n"
+ "\tpxor %xmm0, %xmm2\n"
+ "\tpxor %xmm2, %xmm0\n"
+ );
+}
+
+static void gf8mul_00100101(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm7, %xmm2\n"
+ "\tpxor %xmm7, %xmm0\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm5, %xmm6\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm3, %xmm0\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm2, %xmm7\n"
+ "\tpxor %xmm2, %xmm4\n"
+ "\tpxor %xmm1, %xmm6\n"
+ "\tpxor %xmm1, %xmm3\n"
+ "\tpxor %xmm0, %xmm5\n"
+ "\tpxor %xmm0, %xmm2\n"
+ );
+}
+
+static void gf8mul_00100110(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm1\n"
+ "\tpxor %xmm7, %xmm0\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm6, %xmm2\n"
+ "\tpxor %xmm5, %xmm6\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm5, %xmm1\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm4, %xmm0\n"
+ "\tpxor %xmm3, %xmm7\n"
+ "\tpxor %xmm3, %xmm6\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm3, %xmm0\n"
+ "\tpxor %xmm2, %xmm7\n"
+ "\tpxor %xmm2, %xmm4\n"
+ "\tpxor %xmm2, %xmm3\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm1, %xmm6\n"
+ "\tpxor %xmm1, %xmm3\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm0, %xmm5\n"
+ "\tpxor %xmm0, %xmm2\n"
+ "\tpxor %xmm0, %xmm1\n"
+ "\tpxor %xmm1, %xmm0\n"
+ );
+}
+
+static void gf8mul_00100111(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm7, %xmm2\n"
+ "\tpxor %xmm7, %xmm1\n"
+ "\tpxor %xmm6, %xmm2\n"
+ "\tpxor %xmm6, %xmm1\n"
+ "\tpxor %xmm6, %xmm0\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm5, %xmm1\n"
+ "\tpxor %xmm5, %xmm0\n"
+ "\tpxor %xmm4, %xmm7\n"
+ "\tpxor %xmm4, %xmm6\n"
+ "\tpxor %xmm4, %xmm0\n"
+ "\tpxor %xmm3, %xmm7\n"
+ "\tpxor %xmm3, %xmm6\n"
+ "\tpxor %xmm3, %xmm5\n"
+ "\tpxor %xmm2, %xmm6\n"
+ "\tpxor %xmm2, %xmm5\n"
+ "\tpxor %xmm2, %xmm4\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm1, %xmm4\n"
+ "\tpxor %xmm1, %xmm3\n"
+ "\tpxor %xmm0, %xmm4\n"
+ "\tpxor %xmm0, %xmm3\n"
+ "\tpxor %xmm0, %xmm2\n"
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm7, %xmm1\n"
+ "\tpxor %xmm7, %xmm0\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm5, %xmm6\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm2, %xmm3\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm0, %xmm1\n"
+ );
+}
+
+static void gf8mul_00101000(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm1\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm6, %xmm0\n"
+ "\tpxor %xmm5, %xmm6\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm5, %xmm1\n"
+ "\tpxor %xmm4, %xmm7\n"
+ "\tpxor %xmm4, %xmm0\n"
+ "\tpxor %xmm3, %xmm7\n"
+ "\tpxor %xmm3, %xmm5\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm2, %xmm7\n"
+ "\tpxor %xmm2, %xmm5\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm1, %xmm6\n"
+ "\tpxor %xmm1, %xmm4\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm0, %xmm5\n"
+ "\tpxor %xmm0, %xmm3\n"
+ "\tpxor %xmm3, %xmm0\n"
+ );
+}
+
+static void gf8mul_00101001(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm2\n"
+ "\tpxor %xmm6, %xmm1\n"
+ "\tpxor %xmm5, %xmm0\n"
+ "\tpxor %xmm4, %xmm7\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm3, %xmm6\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm3, %xmm0\n"
+ "\tpxor %xmm2, %xmm7\n"
+ "\tpxor %xmm2, %xmm4\n"
+ "\tpxor %xmm1, %xmm7\n"
+ "\tpxor %xmm1, %xmm6\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm0, %xmm7\n"
+ "\tpxor %xmm0, %xmm6\n"
+ "\tpxor %xmm0, %xmm5\n"
+ "\tpxor %xmm0, %xmm3\n"
+ "\tpxor %xmm0, %xmm2\n"
+ "\tpxor %xmm2, %xmm0\n"
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm7, %xmm1\n"
+ "\tpxor %xmm7, %xmm0\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm5, %xmm6\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm2, %xmm3\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm0, %xmm1\n"
+ );
+}
+
+static void gf8mul_00101010(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm7, %xmm2\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm6, %xmm2\n"
+ "\tpxor %xmm6, %xmm1\n"
+ "\tpxor %xmm6, %xmm0\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm5, %xmm1\n"
+ "\tpxor %xmm5, %xmm0\n"
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm4, %xmm7\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm4, %xmm0\n"
+ "\tpxor %xmm3, %xmm7\n"
+ "\tpxor %xmm3, %xmm6\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm3, %xmm0\n"
+ "\tpxor %xmm2, %xmm7\n"
+ "\tpxor %xmm2, %xmm5\n"
+ "\tpxor %xmm2, %xmm3\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm1, %xmm6\n"
+ "\tpxor %xmm1, %xmm4\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm0, %xmm5\n"
+ "\tpxor %xmm0, %xmm3\n"
+ "\tpxor %xmm0, %xmm1\n"
+ "\tpxor %xmm1, %xmm0\n"
+ );
+}
+
+static void gf8mul_00101011(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm7, %xmm2\n"
+ "\tpxor %xmm7, %xmm1\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm6, %xmm2\n"
+ "\tpxor %xmm6, %xmm1\n"
+ "\tpxor %xmm6, %xmm0\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm5, %xmm6\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm4, %xmm6\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm3, %xmm5\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm3, %xmm0\n"
+ "\tpxor %xmm2, %xmm7\n"
+ "\tpxor %xmm2, %xmm5\n"
+ "\tpxor %xmm2, %xmm3\n"
+ "\tpxor %xmm1, %xmm6\n"
+ "\tpxor %xmm1, %xmm4\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm0, %xmm5\n"
+ "\tpxor %xmm0, %xmm3\n"
+ "\tpxor %xmm0, %xmm1\n"
+ );
+}
+
+static void gf8mul_00101100(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm7, %xmm1\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm6, %xmm2\n"
+ "\tpxor %xmm6, %xmm0\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm5, %xmm6\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm5, %xmm1\n"
+ "\tpxor %xmm5, %xmm0\n"
+ "\tpxor %xmm4, %xmm7\n"
+ "\tpxor %xmm4, %xmm6\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm3, %xmm7\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm2, %xmm7\n"
+ "\tpxor %xmm2, %xmm5\n"
+ "\tpxor %xmm2, %xmm4\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm1, %xmm6\n"
+ "\tpxor %xmm1, %xmm4\n"
+ "\tpxor %xmm1, %xmm3\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm0, %xmm5\n"
+ "\tpxor %xmm0, %xmm3\n"
+ "\tpxor %xmm0, %xmm2\n"
+ "\tpxor %xmm2, %xmm0\n"
+ );
+}
+
+static void gf8mul_00101101(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm7, %xmm1\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm6, %xmm2\n"
+ "\tpxor %xmm6, %xmm0\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm5, %xmm6\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm5, %xmm1\n"
+ "\tpxor %xmm5, %xmm0\n"
+ "\tpxor %xmm4, %xmm7\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm3, %xmm6\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm3, %xmm0\n"
+ "\tpxor %xmm2, %xmm7\n"
+ "\tpxor %xmm2, %xmm5\n"
+ "\tpxor %xmm2, %xmm4\n"
+ "\tpxor %xmm1, %xmm6\n"
+ "\tpxor %xmm1, %xmm4\n"
+ "\tpxor %xmm1, %xmm3\n"
+ "\tpxor %xmm0, %xmm5\n"
+ "\tpxor %xmm0, %xmm3\n"
+ "\tpxor %xmm0, %xmm2\n"
+ );
+}
+
+static void gf8mul_00101110(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm6, %xmm2\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm5, %xmm1\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm4, %xmm0\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm3, %xmm0\n"
+ "\tpxor %xmm2, %xmm7\n"
+ "\tpxor %xmm2, %xmm5\n"
+ "\tpxor %xmm2, %xmm4\n"
+ "\tpxor %xmm2, %xmm3\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm1, %xmm6\n"
+ "\tpxor %xmm1, %xmm4\n"
+ "\tpxor %xmm1, %xmm3\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm0, %xmm5\n"
+ "\tpxor %xmm0, %xmm3\n"
+ "\tpxor %xmm0, %xmm2\n"
+ "\tpxor %xmm0, %xmm1\n"
+ "\tpxor %xmm1, %xmm0\n"
+ );
+}
+
+static void gf8mul_00101111(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm7, %xmm2\n"
+ "\tpxor %xmm7, %xmm1\n"
+ "\tpxor %xmm7, %xmm0\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm6, %xmm2\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm5, %xmm6\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm5, %xmm0\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm4, %xmm7\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm4, %xmm0\n"
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm3, %xmm7\n"
+ "\tpxor %xmm3, %xmm5\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm3, %xmm0\n"
+ "\tpxor %xmm2, %xmm7\n"
+ "\tpxor %xmm2, %xmm5\n"
+ "\tpxor %xmm2, %xmm4\n"
+ "\tpxor %xmm2, %xmm3\n"
+ "\tpxor %xmm1, %xmm6\n"
+ "\tpxor %xmm1, %xmm4\n"
+ "\tpxor %xmm1, %xmm3\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm0, %xmm5\n"
+ "\tpxor %xmm0, %xmm3\n"
+ "\tpxor %xmm0, %xmm2\n"
+ "\tpxor %xmm0, %xmm1\n"
+ );
+}
+
+static void gf8mul_00110000(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm2\n"
+ "\tpxor %xmm7, %xmm1\n"
+ "\tpxor %xmm6, %xmm1\n"
+ "\tpxor %xmm6, %xmm0\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm5, %xmm0\n"
+ "\tpxor %xmm4, %xmm7\n"
+ "\tpxor %xmm4, %xmm6\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm3, %xmm7\n"
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm2, %xmm6\n"
+ "\tpxor %xmm6, %xmm2\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm5, %xmm1\n"
+ "\tpxor %xmm0, %xmm4\n"
+ "\tpxor %xmm4, %xmm0\n"
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm7, %xmm1\n"
+ "\tpxor %xmm7, %xmm0\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm5, %xmm6\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm2, %xmm3\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm0, %xmm1\n"
+ );
+}
+
+static void gf8mul_00110001(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm2\n"
+ "\tpxor %xmm7, %xmm1\n"
+ "\tpxor %xmm7, %xmm0\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm6, %xmm1\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm5, %xmm1\n"
+ "\tpxor %xmm5, %xmm0\n"
+ "\tpxor %xmm4, %xmm7\n"
+ "\tpxor %xmm4, %xmm6\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm3, %xmm6\n"
+ "\tpxor %xmm3, %xmm5\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm3, %xmm0\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm2, %xmm7\n"
+ "\tpxor %xmm2, %xmm6\n"
+ "\tpxor %xmm1, %xmm6\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm0, %xmm5\n"
+ "\tpxor %xmm0, %xmm4\n"
+ );
+}
+
+static void gf8mul_00110010(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm7, %xmm0\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm6, %xmm1\n"
+ "\tpxor %xmm6, %xmm0\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm5, %xmm6\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm5, %xmm1\n"
+ "\tpxor %xmm4, %xmm6\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm4, %xmm0\n"
+ "\tpxor %xmm3, %xmm7\n"
+ "\tpxor %xmm3, %xmm5\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm3, %xmm0\n"
+ "\tpxor %xmm2, %xmm7\n"
+ "\tpxor %xmm2, %xmm6\n"
+ "\tpxor %xmm2, %xmm3\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm1, %xmm6\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm0, %xmm5\n"
+ "\tpxor %xmm0, %xmm4\n"
+ "\tpxor %xmm0, %xmm1\n"
+ "\tpxor %xmm1, %xmm0\n"
+ );
+}
+
+static void gf8mul_00110011(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm7, %xmm2\n"
+ "\tpxor %xmm7, %xmm1\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm6, %xmm2\n"
+ "\tpxor %xmm6, %xmm1\n"
+ "\tpxor %xmm6, %xmm0\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm5, %xmm1\n"
+ "\tpxor %xmm5, %xmm0\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm3, %xmm7\n"
+ "\tpxor %xmm3, %xmm6\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm3, %xmm0\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm2, %xmm7\n"
+ "\tpxor %xmm2, %xmm6\n"
+ "\tpxor %xmm2, %xmm3\n"
+ "\tpxor %xmm1, %xmm6\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm0, %xmm5\n"
+ "\tpxor %xmm0, %xmm4\n"
+ "\tpxor %xmm0, %xmm1\n"
+ );
+}
+
+static void gf8mul_00110100(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm2\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm6, %xmm1\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm5, %xmm0\n"
+ "\tpxor %xmm4, %xmm7\n"
+ "\tpxor %xmm4, %xmm6\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm3, %xmm7\n"
+ "\tpxor %xmm3, %xmm6\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm2, %xmm7\n"
+ "\tpxor %xmm2, %xmm6\n"
+ "\tpxor %xmm2, %xmm4\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm1, %xmm6\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm1, %xmm3\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm0, %xmm5\n"
+ "\tpxor %xmm0, %xmm4\n"
+ "\tpxor %xmm0, %xmm2\n"
+ "\tpxor %xmm2, %xmm0\n"
+ );
+}
+
+static void gf8mul_00110101(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm7, %xmm2\n"
+ "\tpxor %xmm7, %xmm0\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm6, %xmm2\n"
+ "\tpxor %xmm6, %xmm1\n"
+ "\tpxor %xmm6, %xmm0\n"
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm5, %xmm6\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm4, %xmm7\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm3, %xmm7\n"
+ "\tpxor %xmm3, %xmm0\n"
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm2, %xmm7\n"
+ "\tpxor %xmm2, %xmm6\n"
+ "\tpxor %xmm2, %xmm4\n"
+ "\tpxor %xmm1, %xmm6\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm1, %xmm3\n"
+ "\tpxor %xmm0, %xmm5\n"
+ "\tpxor %xmm0, %xmm4\n"
+ "\tpxor %xmm0, %xmm2\n"
+ );
+}
+
+static void gf8mul_00110110(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm7, %xmm2\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm6, %xmm1\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm5, %xmm0\n"
+ "\tpxor %xmm4, %xmm7\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm4, %xmm0\n"
+ "\tpxor %xmm3, %xmm7\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm2, %xmm6\n"
+ "\tpxor %xmm2, %xmm3\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm0, %xmm4\n"
+ "\tpxor %xmm0, %xmm1\n"
+ "\tpxor %xmm1, %xmm0\n"
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm7, %xmm1\n"
+ "\tpxor %xmm7, %xmm0\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm5, %xmm6\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm2, %xmm3\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm0, %xmm1\n"
+ );
+}
+
+static void gf8mul_00110111(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm7, %xmm0\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm6, %xmm2\n"
+ "\tpxor %xmm6, %xmm1\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm4, %xmm6\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm3, %xmm5\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm3, %xmm0\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm2, %xmm7\n"
+ "\tpxor %xmm2, %xmm6\n"
+ "\tpxor %xmm2, %xmm4\n"
+ "\tpxor %xmm2, %xmm3\n"
+ "\tpxor %xmm1, %xmm6\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm1, %xmm3\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm0, %xmm5\n"
+ "\tpxor %xmm0, %xmm4\n"
+ "\tpxor %xmm0, %xmm2\n"
+ "\tpxor %xmm0, %xmm1\n"
+ );
+}
+
+static void gf8mul_00111000(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm7, %xmm2\n"
+ "\tpxor %xmm7, %xmm1\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm6, %xmm2\n"
+ "\tpxor %xmm6, %xmm1\n"
+ "\tpxor %xmm6, %xmm0\n"
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm5, %xmm6\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm5, %xmm1\n"
+ "\tpxor %xmm4, %xmm7\n"
+ "\tpxor %xmm4, %xmm6\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm4, %xmm0\n"
+ "\tpxor %xmm3, %xmm7\n"
+ "\tpxor %xmm3, %xmm5\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm2, %xmm7\n"
+ "\tpxor %xmm2, %xmm6\n"
+ "\tpxor %xmm2, %xmm5\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm1, %xmm6\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm1, %xmm4\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm0, %xmm5\n"
+ "\tpxor %xmm0, %xmm4\n"
+ "\tpxor %xmm0, %xmm3\n"
+ "\tpxor %xmm3, %xmm0\n"
+ );
+}
+
+static void gf8mul_00111001(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm3, %xmm0\n"
+ "\tpxor %xmm2, %xmm7\n"
+ "\tpxor %xmm2, %xmm6\n"
+ "\tpxor %xmm2, %xmm5\n"
+ "\tpxor %xmm1, %xmm6\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm1, %xmm4\n"
+ "\tpxor %xmm0, %xmm5\n"
+ "\tpxor %xmm0, %xmm4\n"
+ "\tpxor %xmm0, %xmm3\n"
+ );
+}
+
+static void gf8mul_00111010(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm7, %xmm1\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm2\n"
+ "\tpxor %xmm6, %xmm1\n"
+ "\tpxor %xmm6, %xmm0\n"
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm5, %xmm6\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm5, %xmm1\n"
+ "\tpxor %xmm4, %xmm6\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm4, %xmm0\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm3, %xmm0\n"
+ "\tpxor %xmm2, %xmm7\n"
+ "\tpxor %xmm2, %xmm6\n"
+ "\tpxor %xmm2, %xmm5\n"
+ "\tpxor %xmm2, %xmm3\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm1, %xmm6\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm1, %xmm4\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm0, %xmm5\n"
+ "\tpxor %xmm0, %xmm4\n"
+ "\tpxor %xmm0, %xmm3\n"
+ "\tpxor %xmm0, %xmm1\n"
+ "\tpxor %xmm1, %xmm0\n"
+ );
+}
+
+static void gf8mul_00111011(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm7, %xmm2\n"
+ "\tpxor %xmm7, %xmm1\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm6, %xmm2\n"
+ "\tpxor %xmm6, %xmm1\n"
+ "\tpxor %xmm6, %xmm0\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm5, %xmm1\n"
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm4, %xmm6\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm4, %xmm0\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm3, %xmm7\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm3, %xmm0\n"
+ "\tpxor %xmm2, %xmm7\n"
+ "\tpxor %xmm2, %xmm6\n"
+ "\tpxor %xmm2, %xmm5\n"
+ "\tpxor %xmm2, %xmm3\n"
+ "\tpxor %xmm1, %xmm6\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm1, %xmm4\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm0, %xmm5\n"
+ "\tpxor %xmm0, %xmm4\n"
+ "\tpxor %xmm0, %xmm3\n"
+ "\tpxor %xmm0, %xmm1\n"
+ );
+}
+
+static void gf8mul_00111100(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm7, %xmm1\n"
+ "\tpxor %xmm7, %xmm0\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm6, %xmm2\n"
+ "\tpxor %xmm6, %xmm1\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm5, %xmm1\n"
+ "\tpxor %xmm5, %xmm0\n"
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm4, %xmm7\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm3, %xmm6\n"
+ "\tpxor %xmm3, %xmm5\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm2, %xmm7\n"
+ "\tpxor %xmm2, %xmm6\n"
+ "\tpxor %xmm2, %xmm5\n"
+ "\tpxor %xmm2, %xmm4\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm1, %xmm6\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm1, %xmm4\n"
+ "\tpxor %xmm1, %xmm3\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm0, %xmm5\n"
+ "\tpxor %xmm0, %xmm4\n"
+ "\tpxor %xmm0, %xmm3\n"
+ "\tpxor %xmm0, %xmm2\n"
+ "\tpxor %xmm2, %xmm0\n"
+ );
+}
+
+static void gf8mul_00111101(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm7, %xmm2\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm6, %xmm2\n"
+ "\tpxor %xmm6, %xmm1\n"
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm5, %xmm6\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm5, %xmm1\n"
+ "\tpxor %xmm5, %xmm0\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm4, %xmm7\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm4, %xmm0\n"
+ "\tpxor %xmm3, %xmm7\n"
+ "\tpxor %xmm3, %xmm6\n"
+ "\tpxor %xmm3, %xmm0\n"
+ "\tpxor %xmm2, %xmm7\n"
+ "\tpxor %xmm2, %xmm6\n"
+ "\tpxor %xmm2, %xmm5\n"
+ "\tpxor %xmm2, %xmm4\n"
+ "\tpxor %xmm1, %xmm6\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm1, %xmm4\n"
+ "\tpxor %xmm1, %xmm3\n"
+ "\tpxor %xmm0, %xmm5\n"
+ "\tpxor %xmm0, %xmm4\n"
+ "\tpxor %xmm0, %xmm3\n"
+ "\tpxor %xmm0, %xmm2\n"
+ );
+}
+
+static void gf8mul_00111110(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm1, %xmm0\n"
+ "\tpxor %xmm0, %xmm7\n"
+ "\tpxor %xmm0, %xmm6\n"
+ "\tpxor %xmm0, %xmm5\n"
+ "\tpxor %xmm0, %xmm4\n"
+ "\tpxor %xmm0, %xmm2\n"
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm7, %xmm2\n"
+ "\tpxor %xmm7, %xmm1\n"
+ "\tpxor %xmm7, %xmm0\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm6, %xmm1\n"
+ "\tpxor %xmm5, %xmm6\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm5, %xmm0\n"
+ "\tpxor %xmm4, %xmm7\n"
+ "\tpxor %xmm4, %xmm6\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm4, %xmm0\n"
+ "\tpxor %xmm3, %xmm7\n"
+ "\tpxor %xmm3, %xmm5\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm3, %xmm0\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm2, %xmm7\n"
+ "\tpxor %xmm1, %xmm6\n"
+ "\tpxor %xmm0, %xmm5\n"
+ );
+}
+
+static void gf8mul_00111111(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm7, %xmm0\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm6, %xmm1\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm5, %xmm6\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm5, %xmm1\n"
+ "\tpxor %xmm5, %xmm0\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm4, %xmm7\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm3, %xmm6\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm3, %xmm0\n"
+ "\tpxor %xmm2, %xmm7\n"
+ "\tpxor %xmm2, %xmm6\n"
+ "\tpxor %xmm2, %xmm5\n"
+ "\tpxor %xmm2, %xmm4\n"
+ "\tpxor %xmm2, %xmm3\n"
+ "\tpxor %xmm1, %xmm6\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm1, %xmm4\n"
+ "\tpxor %xmm1, %xmm3\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm0, %xmm5\n"
+ "\tpxor %xmm0, %xmm4\n"
+ "\tpxor %xmm0, %xmm3\n"
+ "\tpxor %xmm0, %xmm2\n"
+ "\tpxor %xmm0, %xmm1\n"
+ );
+}
+
+static void gf8mul_01000000(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm7, %xmm2\n"
+ "\tpxor %xmm7, %xmm1\n"
+ "\tpxor %xmm7, %xmm0\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm6, %xmm2\n"
+ "\tpxor %xmm6, %xmm1\n"
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm5, %xmm6\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm5, %xmm1\n"
+ "\tpxor %xmm5, %xmm0\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm4, %xmm7\n"
+ "\tpxor %xmm4, %xmm6\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm3, %xmm7\n"
+ "\tpxor %xmm3, %xmm5\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm2, %xmm6\n"
+ "\tpxor %xmm2, %xmm4\n"
+ "\tpxor %xmm2, %xmm3\n"
+ "\tpxor %xmm2, %xmm0\n"
+ "\tpxor %xmm1, %xmm7\n"
+ "\tpxor %xmm7, %xmm1\n"
+ "\tpxor %xmm0, %xmm6\n"
+ "\tpxor %xmm6, %xmm0\n"
+ );
+}
+
+static void gf8mul_01000001(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm7, %xmm2\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm6, %xmm2\n"
+ "\tpxor %xmm6, %xmm1\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm5, %xmm1\n"
+ "\tpxor %xmm5, %xmm0\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm4, %xmm0\n"
+ "\tpxor %xmm3, %xmm7\n"
+ "\tpxor %xmm3, %xmm5\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm2, %xmm6\n"
+ "\tpxor %xmm2, %xmm4\n"
+ "\tpxor %xmm2, %xmm3\n"
+ "\tpxor %xmm2, %xmm0\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm1, %xmm7\n"
+ "\tpxor %xmm0, %xmm6\n"
+ );
+}
+
+static void gf8mul_01000010(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm2\n"
+ "\tpxor %xmm7, %xmm0\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm6, %xmm2\n"
+ "\tpxor %xmm6, %xmm1\n"
+ "\tpxor %xmm6, %xmm0\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm5, %xmm6\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm5, %xmm0\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm4, %xmm7\n"
+ "\tpxor %xmm4, %xmm6\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm3, %xmm6\n"
+ "\tpxor %xmm3, %xmm5\n"
+ "\tpxor %xmm3, %xmm0\n"
+ "\tpxor %xmm2, %xmm6\n"
+ "\tpxor %xmm2, %xmm4\n"
+ "\tpxor %xmm2, %xmm0\n"
+ "\tpxor %xmm1, %xmm7\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm0, %xmm6\n"
+ "\tpxor %xmm0, %xmm1\n"
+ "\tpxor %xmm1, %xmm0\n"
+ );
+}
+
+static void gf8mul_01000011(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm2\n"
+ "\tpxor %xmm7, %xmm1\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm6, %xmm1\n"
+ "\tpxor %xmm6, %xmm0\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm5, %xmm6\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm5, %xmm0\n"
+ "\tpxor %xmm4, %xmm7\n"
+ "\tpxor %xmm4, %xmm6\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm3, %xmm6\n"
+ "\tpxor %xmm3, %xmm5\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm3, %xmm0\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm2, %xmm7\n"
+ "\tpxor %xmm2, %xmm6\n"
+ "\tpxor %xmm2, %xmm4\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm2, %xmm0\n"
+ "\tpxor %xmm1, %xmm7\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm0, %xmm6\n"
+ "\tpxor %xmm0, %xmm1\n"
+ );
+}
+
+static void gf8mul_01000100(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm2\n"
+ "\tpxor %xmm7, %xmm1\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm6, %xmm1\n"
+ "\tpxor %xmm6, %xmm0\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm5, %xmm6\n"
+ "\tpxor %xmm5, %xmm1\n"
+ "\tpxor %xmm4, %xmm6\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm4, %xmm0\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm2, %xmm7\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm1, %xmm7\n"
+ "\tpxor %xmm1, %xmm3\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm0, %xmm6\n"
+ "\tpxor %xmm0, %xmm2\n"
+ "\tpxor %xmm2, %xmm0\n"
+ );
+}
+
+static void gf8mul_01000101(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm7, %xmm2\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm6, %xmm1\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm5, %xmm0\n"
+ "\tpxor %xmm4, %xmm7\n"
+ "\tpxor %xmm4, %xmm6\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm4, %xmm0\n"
+ "\tpxor %xmm3, %xmm7\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm2, %xmm6\n"
+ "\tpxor %xmm2, %xmm3\n"
+ "\tpxor %xmm2, %xmm0\n"
+ "\tpxor %xmm1, %xmm7\n"
+ "\tpxor %xmm1, %xmm3\n"
+ "\tpxor %xmm0, %xmm6\n"
+ "\tpxor %xmm0, %xmm2\n"
+ );
+}
+
+static void gf8mul_01000110(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm7, %xmm0\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm6, %xmm1\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm5, %xmm6\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm5, %xmm1\n"
+ "\tpxor %xmm5, %xmm0\n"
+ "\tpxor %xmm4, %xmm6\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm4, %xmm0\n"
+ "\tpxor %xmm3, %xmm7\n"
+ "\tpxor %xmm3, %xmm6\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm3, %xmm0\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm2, %xmm7\n"
+ "\tpxor %xmm2, %xmm6\n"
+ "\tpxor %xmm2, %xmm3\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm2, %xmm0\n"
+ "\tpxor %xmm1, %xmm7\n"
+ "\tpxor %xmm1, %xmm3\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm0, %xmm6\n"
+ "\tpxor %xmm0, %xmm2\n"
+ "\tpxor %xmm0, %xmm1\n"
+ "\tpxor %xmm1, %xmm0\n"
+ );
+}
+
+static void gf8mul_01000111(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm7, %xmm1\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm6, %xmm2\n"
+ "\tpxor %xmm6, %xmm0\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm5, %xmm1\n"
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm4, %xmm6\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm4, %xmm0\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm3, %xmm7\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm2, %xmm7\n"
+ "\tpxor %xmm2, %xmm6\n"
+ "\tpxor %xmm2, %xmm3\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm2, %xmm0\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm1, %xmm7\n"
+ "\tpxor %xmm1, %xmm3\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm0, %xmm6\n"
+ "\tpxor %xmm0, %xmm2\n"
+ "\tpxor %xmm0, %xmm1\n"
+ );
+}
+
+static void gf8mul_01001000(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm7, %xmm2\n"
+ "\tpxor %xmm7, %xmm1\n"
+ "\tpxor %xmm7, %xmm0\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm6, %xmm1\n"
+ "\tpxor %xmm6, %xmm0\n"
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm5, %xmm0\n"
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm4, %xmm7\n"
+ "\tpxor %xmm4, %xmm6\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm4, %xmm0\n"
+ "\tpxor %xmm3, %xmm5\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm3, %xmm0\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm2, %xmm7\n"
+ "\tpxor %xmm2, %xmm5\n"
+ "\tpxor %xmm2, %xmm3\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm1, %xmm7\n"
+ "\tpxor %xmm1, %xmm4\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm0, %xmm6\n"
+ "\tpxor %xmm0, %xmm3\n"
+ "\tpxor %xmm3, %xmm0\n"
+ );
+}
+
+static void gf8mul_01001001(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm7, %xmm1\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm6, %xmm2\n"
+ "\tpxor %xmm6, %xmm1\n"
+ "\tpxor %xmm6, %xmm0\n"
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm5, %xmm6\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm5, %xmm1\n"
+ "\tpxor %xmm4, %xmm7\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm4, %xmm0\n"
+ "\tpxor %xmm3, %xmm7\n"
+ "\tpxor %xmm3, %xmm6\n"
+ "\tpxor %xmm3, %xmm5\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm2, %xmm6\n"
+ "\tpxor %xmm2, %xmm5\n"
+ "\tpxor %xmm2, %xmm4\n"
+ "\tpxor %xmm2, %xmm0\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm1, %xmm7\n"
+ "\tpxor %xmm1, %xmm4\n"
+ "\tpxor %xmm0, %xmm6\n"
+ "\tpxor %xmm0, %xmm3\n"
+ );
+}
+
+static void gf8mul_01001010(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm4, %xmm6\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm3, %xmm5\n"
+ "\tpxor %xmm3, %xmm0\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm2, %xmm6\n"
+ "\tpxor %xmm2, %xmm5\n"
+ "\tpxor %xmm2, %xmm4\n"
+ "\tpxor %xmm2, %xmm3\n"
+ "\tpxor %xmm2, %xmm0\n"
+ "\tpxor %xmm1, %xmm7\n"
+ "\tpxor %xmm1, %xmm4\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm0, %xmm6\n"
+ "\tpxor %xmm0, %xmm3\n"
+ "\tpxor %xmm0, %xmm1\n"
+ "\tpxor %xmm1, %xmm0\n"
+ );
+}
+
+static void gf8mul_01001011(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm7, %xmm1\n"
+ "\tpxor %xmm7, %xmm0\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm5, %xmm6\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm3, %xmm0\n"
+ "\tpxor %xmm2, %xmm7\n"
+ "\tpxor %xmm2, %xmm6\n"
+ "\tpxor %xmm2, %xmm5\n"
+ "\tpxor %xmm2, %xmm3\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm2, %xmm0\n"
+ "\tpxor %xmm1, %xmm7\n"
+ "\tpxor %xmm1, %xmm4\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm0, %xmm6\n"
+ "\tpxor %xmm0, %xmm3\n"
+ "\tpxor %xmm0, %xmm1\n"
+ );
+}
+
+static void gf8mul_01001100(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm7, %xmm2\n"
+ "\tpxor %xmm7, %xmm1\n"
+ "\tpxor %xmm7, %xmm0\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm6, %xmm2\n"
+ "\tpxor %xmm6, %xmm1\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm5, %xmm6\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm5, %xmm1\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm4, %xmm6\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm4, %xmm0\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm3, %xmm6\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm2, %xmm7\n"
+ "\tpxor %xmm2, %xmm5\n"
+ "\tpxor %xmm2, %xmm4\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm1, %xmm7\n"
+ "\tpxor %xmm1, %xmm4\n"
+ "\tpxor %xmm1, %xmm3\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm0, %xmm6\n"
+ "\tpxor %xmm0, %xmm3\n"
+ "\tpxor %xmm0, %xmm2\n"
+ "\tpxor %xmm2, %xmm0\n"
+ );
+}
+
+static void gf8mul_01001101(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm7, %xmm2\n"
+ "\tpxor %xmm7, %xmm0\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm6, %xmm2\n"
+ "\tpxor %xmm6, %xmm1\n"
+ "\tpxor %xmm6, %xmm0\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm5, %xmm6\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm5, %xmm1\n"
+ "\tpxor %xmm5, %xmm0\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm4, %xmm7\n"
+ "\tpxor %xmm4, %xmm6\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm4, %xmm0\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm3, %xmm7\n"
+ "\tpxor %xmm3, %xmm6\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm2, %xmm6\n"
+ "\tpxor %xmm2, %xmm5\n"
+ "\tpxor %xmm2, %xmm0\n"
+ "\tpxor %xmm1, %xmm7\n"
+ "\tpxor %xmm1, %xmm4\n"
+ "\tpxor %xmm1, %xmm3\n"
+ "\tpxor %xmm0, %xmm6\n"
+ "\tpxor %xmm0, %xmm3\n"
+ "\tpxor %xmm0, %xmm2\n"
+ );
+}
+
+static void gf8mul_01001110(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm2\n"
+ "\tpxor %xmm7, %xmm0\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm6, %xmm2\n"
+ "\tpxor %xmm6, %xmm0\n"
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm5, %xmm0\n"
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm4, %xmm7\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm4, %xmm0\n"
+ "\tpxor %xmm3, %xmm7\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm3, %xmm0\n"
+ "\tpxor %xmm2, %xmm7\n"
+ "\tpxor %xmm2, %xmm6\n"
+ "\tpxor %xmm2, %xmm5\n"
+ "\tpxor %xmm2, %xmm4\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm2, %xmm0\n"
+ "\tpxor %xmm1, %xmm7\n"
+ "\tpxor %xmm1, %xmm4\n"
+ "\tpxor %xmm1, %xmm3\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm0, %xmm6\n"
+ "\tpxor %xmm0, %xmm3\n"
+ "\tpxor %xmm0, %xmm2\n"
+ "\tpxor %xmm0, %xmm1\n"
+ "\tpxor %xmm1, %xmm0\n"
+ );
+}
+
+static void gf8mul_01001111(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm1\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm6, %xmm2\n"
+ "\tpxor %xmm6, %xmm1\n"
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm5, %xmm6\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm5, %xmm1\n"
+ "\tpxor %xmm5, %xmm0\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm4, %xmm6\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm4, %xmm0\n"
+ "\tpxor %xmm3, %xmm7\n"
+ "\tpxor %xmm3, %xmm6\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm2, %xmm7\n"
+ "\tpxor %xmm2, %xmm6\n"
+ "\tpxor %xmm2, %xmm5\n"
+ "\tpxor %xmm2, %xmm4\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm2, %xmm0\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm1, %xmm7\n"
+ "\tpxor %xmm1, %xmm4\n"
+ "\tpxor %xmm1, %xmm3\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm0, %xmm6\n"
+ "\tpxor %xmm0, %xmm3\n"
+ "\tpxor %xmm0, %xmm2\n"
+ "\tpxor %xmm0, %xmm1\n"
+ );
+}
+
+static void gf8mul_01010000(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm7, %xmm2\n"
+ "\tpxor %xmm7, %xmm1\n"
+ "\tpxor %xmm7, %xmm0\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm6, %xmm0\n"
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm5, %xmm1\n"
+ "\tpxor %xmm5, %xmm0\n"
+ "\tpxor %xmm4, %xmm6\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm3, %xmm7\n"
+ "\tpxor %xmm3, %xmm6\n"
+ "\tpxor %xmm3, %xmm5\n"
+ "\tpxor %xmm3, %xmm0\n"
+ "\tpxor %xmm2, %xmm6\n"
+ "\tpxor %xmm2, %xmm4\n"
+ "\tpxor %xmm2, %xmm3\n"
+ "\tpxor %xmm1, %xmm7\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm5, %xmm1\n"
+ "\tpxor %xmm0, %xmm6\n"
+ "\tpxor %xmm0, %xmm4\n"
+ "\tpxor %xmm4, %xmm0\n"
+ );
+}
+
+static void gf8mul_01010001(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm7, %xmm2\n"
+ "\tpxor %xmm7, %xmm1\n"
+ "\tpxor %xmm7, %xmm0\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm5, %xmm6\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm2, %xmm3\n"
+ "\tpxor %xmm2, %xmm0\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm1, %xmm7\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm0, %xmm6\n"
+ "\tpxor %xmm0, %xmm4\n"
+ );
+}
+
+static void gf8mul_01010010(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm7, %xmm2\n"
+ "\tpxor %xmm7, %xmm1\n"
+ "\tpxor %xmm7, %xmm0\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm6, %xmm2\n"
+ "\tpxor %xmm6, %xmm1\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm5, %xmm1\n"
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm4, %xmm6\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm4, %xmm0\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm3, %xmm7\n"
+ "\tpxor %xmm3, %xmm6\n"
+ "\tpxor %xmm3, %xmm5\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm3, %xmm0\n"
+ "\tpxor %xmm2, %xmm0\n"
+ "\tpxor %xmm1, %xmm7\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm0, %xmm6\n"
+ "\tpxor %xmm0, %xmm4\n"
+ "\tpxor %xmm0, %xmm1\n"
+ "\tpxor %xmm1, %xmm0\n"
+ );
+}
+
+static void gf8mul_01010011(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm7, %xmm2\n"
+ "\tpxor %xmm7, %xmm0\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm6, %xmm2\n"
+ "\tpxor %xmm6, %xmm0\n"
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm5, %xmm0\n"
+ "\tpxor %xmm4, %xmm7\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm4, %xmm0\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm3, %xmm7\n"
+ "\tpxor %xmm3, %xmm5\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm3, %xmm0\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm2, %xmm7\n"
+ "\tpxor %xmm2, %xmm5\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm2, %xmm0\n"
+ "\tpxor %xmm1, %xmm7\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm0, %xmm6\n"
+ "\tpxor %xmm0, %xmm4\n"
+ "\tpxor %xmm0, %xmm1\n"
+ );
+}
+
+static void gf8mul_01010100(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm7, %xmm1\n"
+ "\tpxor %xmm7, %xmm0\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm6, %xmm2\n"
+ "\tpxor %xmm5, %xmm6\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm5, %xmm1\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm4, %xmm0\n"
+ "\tpxor %xmm3, %xmm7\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm2, %xmm7\n"
+ "\tpxor %xmm2, %xmm6\n"
+ "\tpxor %xmm2, %xmm5\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm1, %xmm7\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm1, %xmm3\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm0, %xmm6\n"
+ "\tpxor %xmm0, %xmm4\n"
+ "\tpxor %xmm0, %xmm2\n"
+ "\tpxor %xmm2, %xmm0\n"
+ );
+}
+
+static void gf8mul_01010101(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm0\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm6, %xmm2\n"
+ "\tpxor %xmm6, %xmm1\n"
+ "\tpxor %xmm6, %xmm0\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm5, %xmm6\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm4, %xmm6\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm3, %xmm5\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm2, %xmm4\n"
+ "\tpxor %xmm2, %xmm3\n"
+ "\tpxor %xmm2, %xmm0\n"
+ "\tpxor %xmm1, %xmm7\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm1, %xmm3\n"
+ "\tpxor %xmm0, %xmm6\n"
+ "\tpxor %xmm0, %xmm4\n"
+ "\tpxor %xmm0, %xmm2\n"
+ );
+}
+
+static void gf8mul_01010110(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm1\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm6, %xmm2\n"
+ "\tpxor %xmm6, %xmm1\n"
+ "\tpxor %xmm6, %xmm0\n"
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm5, %xmm6\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm5, %xmm1\n"
+ "\tpxor %xmm4, %xmm7\n"
+ "\tpxor %xmm4, %xmm6\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm3, %xmm6\n"
+ "\tpxor %xmm3, %xmm5\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm3, %xmm0\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm2, %xmm7\n"
+ "\tpxor %xmm2, %xmm5\n"
+ "\tpxor %xmm2, %xmm4\n"
+ "\tpxor %xmm2, %xmm3\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm2, %xmm0\n"
+ "\tpxor %xmm1, %xmm7\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm1, %xmm3\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm0, %xmm6\n"
+ "\tpxor %xmm0, %xmm4\n"
+ "\tpxor %xmm0, %xmm2\n"
+ "\tpxor %xmm0, %xmm1\n"
+ "\tpxor %xmm1, %xmm0\n"
+ );
+}
+
+static void gf8mul_01010111(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm7, %xmm1\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm6, %xmm2\n"
+ "\tpxor %xmm6, %xmm0\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm4, %xmm7\n"
+ "\tpxor %xmm4, %xmm6\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm4, %xmm0\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm3, %xmm5\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm2, %xmm7\n"
+ "\tpxor %xmm2, %xmm5\n"
+ "\tpxor %xmm2, %xmm4\n"
+ "\tpxor %xmm2, %xmm3\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm2, %xmm0\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm1, %xmm7\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm1, %xmm3\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm0, %xmm6\n"
+ "\tpxor %xmm0, %xmm4\n"
+ "\tpxor %xmm0, %xmm2\n"
+ "\tpxor %xmm0, %xmm1\n"
+ );
+}
+
+static void gf8mul_01011000(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm0\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm6, %xmm2\n"
+ "\tpxor %xmm6, %xmm0\n"
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm5, %xmm1\n"
+ "\tpxor %xmm5, %xmm0\n"
+ "\tpxor %xmm4, %xmm7\n"
+ "\tpxor %xmm4, %xmm6\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm4, %xmm0\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm3, %xmm0\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm2, %xmm7\n"
+ "\tpxor %xmm2, %xmm6\n"
+ "\tpxor %xmm2, %xmm3\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm1, %xmm7\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm1, %xmm4\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm0, %xmm6\n"
+ "\tpxor %xmm0, %xmm4\n"
+ "\tpxor %xmm0, %xmm3\n"
+ "\tpxor %xmm3, %xmm0\n"
+ );
+}
+
+static void gf8mul_01011001(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm6, %xmm2\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm5, %xmm1\n"
+ "\tpxor %xmm4, %xmm7\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm3, %xmm6\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm2, %xmm5\n"
+ "\tpxor %xmm2, %xmm0\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm1, %xmm7\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm1, %xmm4\n"
+ "\tpxor %xmm0, %xmm6\n"
+ "\tpxor %xmm0, %xmm4\n"
+ "\tpxor %xmm0, %xmm3\n"
+ );
+}
+
+static void gf8mul_01011010(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm7, %xmm2\n"
+ "\tpxor %xmm7, %xmm0\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm6, %xmm2\n"
+ "\tpxor %xmm6, %xmm0\n"
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm5, %xmm0\n"
+ "\tpxor %xmm4, %xmm7\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm4, %xmm0\n"
+ "\tpxor %xmm3, %xmm7\n"
+ "\tpxor %xmm3, %xmm5\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm3, %xmm0\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm2, %xmm5\n"
+ "\tpxor %xmm2, %xmm3\n"
+ "\tpxor %xmm2, %xmm0\n"
+ "\tpxor %xmm1, %xmm7\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm1, %xmm4\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm0, %xmm6\n"
+ "\tpxor %xmm0, %xmm4\n"
+ "\tpxor %xmm0, %xmm3\n"
+ "\tpxor %xmm0, %xmm1\n"
+ "\tpxor %xmm1, %xmm0\n"
+ );
+}
+
+static void gf8mul_01011011(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm7, %xmm2\n"
+ "\tpxor %xmm7, %xmm1\n"
+ "\tpxor %xmm7, %xmm0\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm6, %xmm2\n"
+ "\tpxor %xmm6, %xmm1\n"
+ "\tpxor %xmm6, %xmm0\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm5, %xmm6\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm5, %xmm1\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm4, %xmm6\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm4, %xmm0\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm3, %xmm7\n"
+ "\tpxor %xmm3, %xmm6\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm3, %xmm0\n"
+ "\tpxor %xmm2, %xmm7\n"
+ "\tpxor %xmm2, %xmm4\n"
+ "\tpxor %xmm2, %xmm3\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm2, %xmm0\n"
+ "\tpxor %xmm1, %xmm7\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm1, %xmm4\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm0, %xmm6\n"
+ "\tpxor %xmm0, %xmm4\n"
+ "\tpxor %xmm0, %xmm3\n"
+ "\tpxor %xmm0, %xmm1\n"
+ );
+}
+
+static void gf8mul_01011100(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm6, %xmm2\n"
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm5, %xmm6\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm5, %xmm1\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm4, %xmm0\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm3, %xmm7\n"
+ "\tpxor %xmm3, %xmm6\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm2, %xmm7\n"
+ "\tpxor %xmm2, %xmm6\n"
+ "\tpxor %xmm2, %xmm4\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm1, %xmm7\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm1, %xmm4\n"
+ "\tpxor %xmm1, %xmm3\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm0, %xmm6\n"
+ "\tpxor %xmm0, %xmm4\n"
+ "\tpxor %xmm0, %xmm3\n"
+ "\tpxor %xmm0, %xmm2\n"
+ "\tpxor %xmm2, %xmm0\n"
+ );
+}
+
+static void gf8mul_01011101(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm7, %xmm2\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm6, %xmm1\n"
+ "\tpxor %xmm6, %xmm0\n"
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm5, %xmm6\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm5, %xmm0\n"
+ "\tpxor %xmm4, %xmm7\n"
+ "\tpxor %xmm4, %xmm6\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm3, %xmm6\n"
+ "\tpxor %xmm3, %xmm5\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm2, %xmm5\n"
+ "\tpxor %xmm2, %xmm4\n"
+ "\tpxor %xmm2, %xmm0\n"
+ "\tpxor %xmm1, %xmm7\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm1, %xmm4\n"
+ "\tpxor %xmm1, %xmm3\n"
+ "\tpxor %xmm0, %xmm6\n"
+ "\tpxor %xmm0, %xmm4\n"
+ "\tpxor %xmm0, %xmm3\n"
+ "\tpxor %xmm0, %xmm2\n"
+ );
+}
+
+static void gf8mul_01011110(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm1\n"
+ "\tpxor %xmm6, %xmm0\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm4, %xmm6\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm3, %xmm5\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm3, %xmm0\n"
+ "\tpxor %xmm2, %xmm7\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm2, %xmm0\n"
+ "\tpxor %xmm1, %xmm7\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm1, %xmm4\n"
+ "\tpxor %xmm1, %xmm3\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm0, %xmm6\n"
+ "\tpxor %xmm0, %xmm4\n"
+ "\tpxor %xmm0, %xmm3\n"
+ "\tpxor %xmm0, %xmm2\n"
+ "\tpxor %xmm0, %xmm1\n"
+ "\tpxor %xmm1, %xmm0\n"
+ );
+}
+
+static void gf8mul_01011111(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm7, %xmm1\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm6, %xmm0\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm5, %xmm0\n"
+ "\tpxor %xmm4, %xmm7\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm4, %xmm0\n"
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm3, %xmm7\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm2, %xmm6\n"
+ "\tpxor %xmm2, %xmm3\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm0, %xmm4\n"
+ "\tpxor %xmm0, %xmm1\n"
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm7, %xmm1\n"
+ "\tpxor %xmm7, %xmm0\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm6, %xmm0\n"
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm4, %xmm6\n"
+ "\tpxor %xmm3, %xmm5\n"
+ "\tpxor %xmm2, %xmm4\n"
+ "\tpxor %xmm1, %xmm3\n"
+ "\tpxor %xmm0, %xmm2\n"
+ );
+}
+
+static void gf8mul_01100000(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm7, %xmm2\n"
+ "\tpxor %xmm7, %xmm1\n"
+ "\tpxor %xmm7, %xmm0\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm6, %xmm1\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm5, %xmm1\n"
+ "\tpxor %xmm5, %xmm0\n"
+ "\tpxor %xmm4, %xmm6\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm4, %xmm0\n"
+ "\tpxor %xmm3, %xmm7\n"
+ "\tpxor %xmm3, %xmm6\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm3, %xmm0\n"
+ "\tpxor %xmm2, %xmm5\n"
+ "\tpxor %xmm2, %xmm4\n"
+ "\tpxor %xmm2, %xmm3\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm2, %xmm0\n"
+ "\tpxor %xmm1, %xmm7\n"
+ "\tpxor %xmm1, %xmm6\n"
+ "\tpxor %xmm6, %xmm1\n"
+ "\tpxor %xmm0, %xmm6\n"
+ "\tpxor %xmm0, %xmm5\n"
+ "\tpxor %xmm5, %xmm0\n"
+ );
+}
+
+static void gf8mul_01100001(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm6, %xmm2\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm5, %xmm1\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm4, %xmm0\n"
+ "\tpxor %xmm3, %xmm7\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm3, %xmm0\n"
+ "\tpxor %xmm2, %xmm7\n"
+ "\tpxor %xmm2, %xmm6\n"
+ "\tpxor %xmm2, %xmm5\n"
+ "\tpxor %xmm2, %xmm4\n"
+ "\tpxor %xmm2, %xmm3\n"
+ "\tpxor %xmm2, %xmm0\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm1, %xmm7\n"
+ "\tpxor %xmm1, %xmm6\n"
+ "\tpxor %xmm0, %xmm6\n"
+ "\tpxor %xmm0, %xmm5\n"
+ );
+}
+
+static void gf8mul_01100010(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm7, %xmm2\n"
+ "\tpxor %xmm7, %xmm0\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm6, %xmm2\n"
+ "\tpxor %xmm6, %xmm1\n"
+ "\tpxor %xmm6, %xmm0\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm5, %xmm6\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm5, %xmm1\n"
+ "\tpxor %xmm5, %xmm0\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm4, %xmm7\n"
+ "\tpxor %xmm4, %xmm6\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm4, %xmm0\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm3, %xmm7\n"
+ "\tpxor %xmm3, %xmm6\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm3, %xmm0\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm2, %xmm7\n"
+ "\tpxor %xmm2, %xmm6\n"
+ "\tpxor %xmm2, %xmm5\n"
+ "\tpxor %xmm2, %xmm4\n"
+ "\tpxor %xmm2, %xmm0\n"
+ "\tpxor %xmm1, %xmm7\n"
+ "\tpxor %xmm1, %xmm6\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm0, %xmm6\n"
+ "\tpxor %xmm0, %xmm5\n"
+ "\tpxor %xmm0, %xmm1\n"
+ "\tpxor %xmm1, %xmm0\n"
+ );
+}
+
+static void gf8mul_01100011(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm2\n"
+ "\tpxor %xmm7, %xmm1\n"
+ "\tpxor %xmm6, %xmm1\n"
+ "\tpxor %xmm6, %xmm0\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm5, %xmm0\n"
+ "\tpxor %xmm4, %xmm7\n"
+ "\tpxor %xmm4, %xmm6\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm3, %xmm6\n"
+ "\tpxor %xmm3, %xmm5\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm2, %xmm5\n"
+ "\tpxor %xmm2, %xmm4\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm2, %xmm0\n"
+ "\tpxor %xmm1, %xmm7\n"
+ "\tpxor %xmm1, %xmm6\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm0, %xmm6\n"
+ "\tpxor %xmm0, %xmm5\n"
+ "\tpxor %xmm0, %xmm1\n"
+ );
+}
+
+static void gf8mul_01100100(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm1\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm6, %xmm0\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm5, %xmm1\n"
+ "\tpxor %xmm5, %xmm0\n"
+ "\tpxor %xmm4, %xmm7\n"
+ "\tpxor %xmm4, %xmm6\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm4, %xmm0\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm3, %xmm7\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm2, %xmm6\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm1, %xmm7\n"
+ "\tpxor %xmm1, %xmm6\n"
+ "\tpxor %xmm1, %xmm3\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm0, %xmm6\n"
+ "\tpxor %xmm0, %xmm5\n"
+ "\tpxor %xmm0, %xmm2\n"
+ "\tpxor %xmm2, %xmm0\n"
+ );
+}
+
+static void gf8mul_01100101(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm7, %xmm2\n"
+ "\tpxor %xmm7, %xmm1\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm6, %xmm2\n"
+ "\tpxor %xmm6, %xmm1\n"
+ "\tpxor %xmm6, %xmm0\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm5, %xmm6\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm5, %xmm1\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm4, %xmm0\n"
+ "\tpxor %xmm3, %xmm7\n"
+ "\tpxor %xmm3, %xmm5\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm3, %xmm0\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm2, %xmm7\n"
+ "\tpxor %xmm2, %xmm6\n"
+ "\tpxor %xmm2, %xmm5\n"
+ "\tpxor %xmm2, %xmm3\n"
+ "\tpxor %xmm2, %xmm0\n"
+ "\tpxor %xmm1, %xmm7\n"
+ "\tpxor %xmm1, %xmm6\n"
+ "\tpxor %xmm1, %xmm3\n"
+ "\tpxor %xmm0, %xmm6\n"
+ "\tpxor %xmm0, %xmm5\n"
+ "\tpxor %xmm0, %xmm2\n"
+ );
+}
+
+static void gf8mul_01100110(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm1, %xmm0\n"
+ "\tpxor %xmm0, %xmm7\n"
+ "\tpxor %xmm0, %xmm6\n"
+ "\tpxor %xmm0, %xmm5\n"
+ "\tpxor %xmm0, %xmm4\n"
+ "\tpxor %xmm0, %xmm2\n"
+ "\tpxor %xmm7, %xmm0\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm6, %xmm2\n"
+ "\tpxor %xmm6, %xmm1\n"
+ "\tpxor %xmm6, %xmm0\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm5, %xmm6\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm4, %xmm6\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm3, %xmm5\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm2, %xmm4\n"
+ "\tpxor %xmm2, %xmm3\n"
+ "\tpxor %xmm2, %xmm0\n"
+ "\tpxor %xmm1, %xmm7\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm1, %xmm3\n"
+ "\tpxor %xmm0, %xmm6\n"
+ "\tpxor %xmm0, %xmm4\n"
+ "\tpxor %xmm0, %xmm2\n"
+ );
+}
+
+static void gf8mul_01100111(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm1\n"
+ "\tpxor %xmm7, %xmm0\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm6, %xmm0\n"
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm5, %xmm6\n"
+ "\tpxor %xmm4, %xmm7\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm3, %xmm6\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm2, %xmm5\n"
+ "\tpxor %xmm2, %xmm3\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm2, %xmm0\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm1, %xmm7\n"
+ "\tpxor %xmm1, %xmm6\n"
+ "\tpxor %xmm1, %xmm3\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm0, %xmm6\n"
+ "\tpxor %xmm0, %xmm5\n"
+ "\tpxor %xmm0, %xmm2\n"
+ "\tpxor %xmm0, %xmm1\n"
+ );
+}
+
+static void gf8mul_01101000(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm7, %xmm0\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm6, %xmm1\n"
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm5, %xmm6\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm5, %xmm0\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm4, %xmm0\n"
+ "\tpxor %xmm3, %xmm7\n"
+ "\tpxor %xmm3, %xmm5\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm3, %xmm0\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm2, %xmm6\n"
+ "\tpxor %xmm2, %xmm5\n"
+ "\tpxor %xmm2, %xmm3\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm1, %xmm7\n"
+ "\tpxor %xmm1, %xmm6\n"
+ "\tpxor %xmm1, %xmm4\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm0, %xmm6\n"
+ "\tpxor %xmm0, %xmm5\n"
+ "\tpxor %xmm0, %xmm3\n"
+ "\tpxor %xmm3, %xmm0\n"
+ );
+}
+
+static void gf8mul_01101001(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm6, %xmm2\n"
+ "\tpxor %xmm5, %xmm1\n"
+ "\tpxor %xmm4, %xmm0\n"
+ "\tpxor %xmm3, %xmm7\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm2, %xmm6\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm2, %xmm0\n"
+ "\tpxor %xmm1, %xmm7\n"
+ "\tpxor %xmm1, %xmm6\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm0, %xmm6\n"
+ "\tpxor %xmm0, %xmm5\n"
+ "\tpxor %xmm0, %xmm4\n"
+ "\tpxor %xmm0, %xmm1\n"
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm3, %xmm0\n"
+ "\tpxor %xmm2, %xmm7\n"
+ "\tpxor %xmm2, %xmm6\n"
+ "\tpxor %xmm2, %xmm5\n"
+ "\tpxor %xmm1, %xmm6\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm1, %xmm4\n"
+ "\tpxor %xmm0, %xmm5\n"
+ "\tpxor %xmm0, %xmm4\n"
+ "\tpxor %xmm0, %xmm3\n"
+ );
+}
+
+static void gf8mul_01101010(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm7, %xmm1\n"
+ "\tpxor %xmm6, %xmm2\n"
+ "\tpxor %xmm6, %xmm0\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm5, %xmm1\n"
+ "\tpxor %xmm4, %xmm6\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm4, %xmm0\n"
+ "\tpxor %xmm3, %xmm7\n"
+ "\tpxor %xmm3, %xmm5\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm3, %xmm0\n"
+ "\tpxor %xmm2, %xmm7\n"
+ "\tpxor %xmm2, %xmm6\n"
+ "\tpxor %xmm2, %xmm4\n"
+ "\tpxor %xmm2, %xmm3\n"
+ "\tpxor %xmm2, %xmm0\n"
+ "\tpxor %xmm1, %xmm7\n"
+ "\tpxor %xmm1, %xmm6\n"
+ "\tpxor %xmm1, %xmm4\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm0, %xmm6\n"
+ "\tpxor %xmm0, %xmm5\n"
+ "\tpxor %xmm0, %xmm3\n"
+ "\tpxor %xmm0, %xmm1\n"
+ "\tpxor %xmm1, %xmm0\n"
+ );
+}
+
+static void gf8mul_01101011(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm7, %xmm1\n"
+ "\tpxor %xmm7, %xmm0\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm5, %xmm6\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm2, %xmm3\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm2, %xmm0\n"
+ "\tpxor %xmm1, %xmm7\n"
+ "\tpxor %xmm1, %xmm6\n"
+ "\tpxor %xmm1, %xmm4\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm0, %xmm6\n"
+ "\tpxor %xmm0, %xmm5\n"
+ "\tpxor %xmm0, %xmm3\n"
+ "\tpxor %xmm0, %xmm1\n"
+ );
+}
+
+static void gf8mul_01101100(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm7, %xmm2\n"
+ "\tpxor %xmm7, %xmm0\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm6, %xmm0\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm5, %xmm6\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm5, %xmm0\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm4, %xmm7\n"
+ "\tpxor %xmm4, %xmm6\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm4, %xmm0\n"
+ "\tpxor %xmm3, %xmm7\n"
+ "\tpxor %xmm3, %xmm6\n"
+ "\tpxor %xmm3, %xmm5\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm2, %xmm6\n"
+ "\tpxor %xmm2, %xmm5\n"
+ "\tpxor %xmm2, %xmm4\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm1, %xmm7\n"
+ "\tpxor %xmm1, %xmm6\n"
+ "\tpxor %xmm1, %xmm4\n"
+ "\tpxor %xmm1, %xmm3\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm0, %xmm6\n"
+ "\tpxor %xmm0, %xmm5\n"
+ "\tpxor %xmm0, %xmm3\n"
+ "\tpxor %xmm0, %xmm2\n"
+ "\tpxor %xmm2, %xmm0\n"
+ );
+}
+
+static void gf8mul_01101101(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm7, %xmm2\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm6, %xmm0\n"
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm5, %xmm6\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm4, %xmm7\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm4, %xmm0\n"
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm3, %xmm7\n"
+ "\tpxor %xmm3, %xmm6\n"
+ "\tpxor %xmm3, %xmm5\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm3, %xmm0\n"
+ "\tpxor %xmm2, %xmm7\n"
+ "\tpxor %xmm2, %xmm6\n"
+ "\tpxor %xmm2, %xmm0\n"
+ "\tpxor %xmm1, %xmm7\n"
+ "\tpxor %xmm1, %xmm6\n"
+ "\tpxor %xmm1, %xmm4\n"
+ "\tpxor %xmm1, %xmm3\n"
+ "\tpxor %xmm0, %xmm6\n"
+ "\tpxor %xmm0, %xmm5\n"
+ "\tpxor %xmm0, %xmm3\n"
+ "\tpxor %xmm0, %xmm2\n"
+ );
+}
+
+static void gf8mul_01101110(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm1, %xmm0\n"
+ "\tpxor %xmm0, %xmm7\n"
+ "\tpxor %xmm0, %xmm6\n"
+ "\tpxor %xmm0, %xmm5\n"
+ "\tpxor %xmm0, %xmm4\n"
+ "\tpxor %xmm0, %xmm2\n"
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm6, %xmm2\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm5, %xmm1\n"
+ "\tpxor %xmm4, %xmm7\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm3, %xmm6\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm2, %xmm5\n"
+ "\tpxor %xmm2, %xmm0\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm1, %xmm7\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm1, %xmm4\n"
+ "\tpxor %xmm0, %xmm6\n"
+ "\tpxor %xmm0, %xmm4\n"
+ "\tpxor %xmm0, %xmm3\n"
+ );
+}
+
+static void gf8mul_01101111(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm7, %xmm1\n"
+ "\tpxor %xmm7, %xmm0\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm4, %xmm6\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm3, %xmm5\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm2, %xmm4\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm2, %xmm0\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm1, %xmm7\n"
+ "\tpxor %xmm1, %xmm6\n"
+ "\tpxor %xmm1, %xmm4\n"
+ "\tpxor %xmm1, %xmm3\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm0, %xmm6\n"
+ "\tpxor %xmm0, %xmm5\n"
+ "\tpxor %xmm0, %xmm3\n"
+ "\tpxor %xmm0, %xmm2\n"
+ "\tpxor %xmm0, %xmm1\n"
+ );
+}
+
+static void gf8mul_01110000(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm7, %xmm0\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm6, %xmm1\n"
+ "\tpxor %xmm6, %xmm0\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm5, %xmm0\n"
+ "\tpxor %xmm4, %xmm7\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm3, %xmm7\n"
+ "\tpxor %xmm3, %xmm6\n"
+ "\tpxor %xmm3, %xmm5\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm3, %xmm0\n"
+ "\tpxor %xmm2, %xmm7\n"
+ "\tpxor %xmm2, %xmm6\n"
+ "\tpxor %xmm2, %xmm4\n"
+ "\tpxor %xmm2, %xmm3\n"
+ "\tpxor %xmm1, %xmm7\n"
+ "\tpxor %xmm1, %xmm6\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm5, %xmm1\n"
+ "\tpxor %xmm0, %xmm6\n"
+ "\tpxor %xmm0, %xmm5\n"
+ "\tpxor %xmm0, %xmm4\n"
+ "\tpxor %xmm4, %xmm0\n"
+ );
+}
+
+static void gf8mul_01110001(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm7, %xmm2\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm6, %xmm2\n"
+ "\tpxor %xmm6, %xmm1\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm5, %xmm1\n"
+ "\tpxor %xmm5, %xmm0\n"
+ "\tpxor %xmm4, %xmm7\n"
+ "\tpxor %xmm4, %xmm6\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm4, %xmm0\n"
+ "\tpxor %xmm3, %xmm5\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm3, %xmm0\n"
+ "\tpxor %xmm2, %xmm7\n"
+ "\tpxor %xmm2, %xmm5\n"
+ "\tpxor %xmm2, %xmm3\n"
+ "\tpxor %xmm2, %xmm0\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm1, %xmm7\n"
+ "\tpxor %xmm1, %xmm6\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm0, %xmm6\n"
+ "\tpxor %xmm0, %xmm5\n"
+ "\tpxor %xmm0, %xmm4\n"
+ );
+}
+
+static void gf8mul_01110010(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm1\n"
+ "\tpxor %xmm7, %xmm0\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm5, %xmm6\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm3, %xmm0\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm2, %xmm7\n"
+ "\tpxor %xmm2, %xmm5\n"
+ "\tpxor %xmm2, %xmm0\n"
+ "\tpxor %xmm1, %xmm7\n"
+ "\tpxor %xmm1, %xmm6\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm0, %xmm6\n"
+ "\tpxor %xmm0, %xmm5\n"
+ "\tpxor %xmm0, %xmm4\n"
+ "\tpxor %xmm0, %xmm1\n"
+ "\tpxor %xmm1, %xmm0\n"
+ );
+}
+
+static void gf8mul_01110011(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm6, %xmm2\n"
+ "\tpxor %xmm5, %xmm1\n"
+ "\tpxor %xmm4, %xmm0\n"
+ "\tpxor %xmm3, %xmm7\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm2, %xmm6\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm2, %xmm0\n"
+ "\tpxor %xmm1, %xmm7\n"
+ "\tpxor %xmm1, %xmm6\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm0, %xmm6\n"
+ "\tpxor %xmm0, %xmm5\n"
+ "\tpxor %xmm0, %xmm4\n"
+ "\tpxor %xmm0, %xmm1\n"
+ );
+}
+
+static void gf8mul_01110100(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm7, %xmm2\n"
+ "\tpxor %xmm7, %xmm0\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm6, %xmm2\n"
+ "\tpxor %xmm6, %xmm0\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm5, %xmm6\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm5, %xmm1\n"
+ "\tpxor %xmm5, %xmm0\n"
+ "\tpxor %xmm4, %xmm7\n"
+ "\tpxor %xmm4, %xmm6\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm4, %xmm0\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm3, %xmm6\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm2, %xmm5\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm1, %xmm7\n"
+ "\tpxor %xmm1, %xmm6\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm1, %xmm3\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm0, %xmm6\n"
+ "\tpxor %xmm0, %xmm5\n"
+ "\tpxor %xmm0, %xmm4\n"
+ "\tpxor %xmm0, %xmm2\n"
+ "\tpxor %xmm2, %xmm0\n"
+ );
+}
+
+static void gf8mul_01110101(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm1\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm5, %xmm6\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm3, %xmm0\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm2, %xmm7\n"
+ "\tpxor %xmm2, %xmm5\n"
+ "\tpxor %xmm2, %xmm4\n"
+ "\tpxor %xmm2, %xmm3\n"
+ "\tpxor %xmm2, %xmm0\n"
+ "\tpxor %xmm1, %xmm7\n"
+ "\tpxor %xmm1, %xmm6\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm1, %xmm3\n"
+ "\tpxor %xmm0, %xmm6\n"
+ "\tpxor %xmm0, %xmm5\n"
+ "\tpxor %xmm0, %xmm4\n"
+ "\tpxor %xmm0, %xmm2\n"
+ );
+}
+
+static void gf8mul_01110110(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm7, %xmm1\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm6, %xmm2\n"
+ "\tpxor %xmm6, %xmm1\n"
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm4, %xmm7\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm4, %xmm0\n"
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm3, %xmm7\n"
+ "\tpxor %xmm3, %xmm6\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm3, %xmm0\n"
+ "\tpxor %xmm2, %xmm6\n"
+ "\tpxor %xmm2, %xmm4\n"
+ "\tpxor %xmm2, %xmm3\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm2, %xmm0\n"
+ "\tpxor %xmm1, %xmm7\n"
+ "\tpxor %xmm1, %xmm6\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm1, %xmm3\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm0, %xmm6\n"
+ "\tpxor %xmm0, %xmm5\n"
+ "\tpxor %xmm0, %xmm4\n"
+ "\tpxor %xmm0, %xmm2\n"
+ "\tpxor %xmm0, %xmm1\n"
+ "\tpxor %xmm1, %xmm0\n"
+ );
+}
+
+static void gf8mul_01110111(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm2\n"
+ "\tpxor %xmm7, %xmm1\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm6, %xmm2\n"
+ "\tpxor %xmm6, %xmm1\n"
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm5, %xmm6\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm5, %xmm1\n"
+ "\tpxor %xmm5, %xmm0\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm4, %xmm7\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm3, %xmm7\n"
+ "\tpxor %xmm3, %xmm5\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm2, %xmm6\n"
+ "\tpxor %xmm2, %xmm4\n"
+ "\tpxor %xmm2, %xmm3\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm2, %xmm0\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm1, %xmm7\n"
+ "\tpxor %xmm1, %xmm6\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm1, %xmm3\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm0, %xmm6\n"
+ "\tpxor %xmm0, %xmm5\n"
+ "\tpxor %xmm0, %xmm4\n"
+ "\tpxor %xmm0, %xmm2\n"
+ "\tpxor %xmm0, %xmm1\n"
+ );
+}
+
+static void gf8mul_01111000(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm7, %xmm2\n"
+ "\tpxor %xmm7, %xmm0\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm6, %xmm0\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm5, %xmm6\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm5, %xmm1\n"
+ "\tpxor %xmm5, %xmm0\n"
+ "\tpxor %xmm4, %xmm6\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm4, %xmm0\n"
+ "\tpxor %xmm3, %xmm7\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm3, %xmm0\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm2, %xmm3\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm1, %xmm7\n"
+ "\tpxor %xmm1, %xmm6\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm1, %xmm4\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm0, %xmm6\n"
+ "\tpxor %xmm0, %xmm5\n"
+ "\tpxor %xmm0, %xmm4\n"
+ "\tpxor %xmm0, %xmm3\n"
+ "\tpxor %xmm3, %xmm0\n"
+ );
+}
+
+static void gf8mul_01111001(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm2\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm5, %xmm1\n"
+ "\tpxor %xmm4, %xmm7\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm4, %xmm0\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm3, %xmm7\n"
+ "\tpxor %xmm3, %xmm6\n"
+ "\tpxor %xmm3, %xmm5\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm2, %xmm7\n"
+ "\tpxor %xmm2, %xmm0\n"
+ "\tpxor %xmm7, %xmm2\n"
+ "\tpxor %xmm1, %xmm7\n"
+ "\tpxor %xmm1, %xmm6\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm1, %xmm4\n"
+ "\tpxor %xmm0, %xmm6\n"
+ "\tpxor %xmm0, %xmm5\n"
+ "\tpxor %xmm0, %xmm4\n"
+ "\tpxor %xmm0, %xmm3\n"
+ );
+}
+
+static void gf8mul_01111010(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm2\n"
+ "\tpxor %xmm7, %xmm1\n"
+ "\tpxor %xmm7, %xmm0\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm6, %xmm1\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm5, %xmm1\n"
+ "\tpxor %xmm5, %xmm0\n"
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm4, %xmm7\n"
+ "\tpxor %xmm4, %xmm6\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm3, %xmm6\n"
+ "\tpxor %xmm3, %xmm5\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm3, %xmm0\n"
+ "\tpxor %xmm2, %xmm7\n"
+ "\tpxor %xmm2, %xmm3\n"
+ "\tpxor %xmm2, %xmm0\n"
+ "\tpxor %xmm1, %xmm7\n"
+ "\tpxor %xmm1, %xmm6\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm1, %xmm4\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm0, %xmm6\n"
+ "\tpxor %xmm0, %xmm5\n"
+ "\tpxor %xmm0, %xmm4\n"
+ "\tpxor %xmm0, %xmm3\n"
+ "\tpxor %xmm0, %xmm1\n"
+ "\tpxor %xmm1, %xmm0\n"
+ );
+}
+
+static void gf8mul_01111011(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm2\n"
+ "\tpxor %xmm6, %xmm1\n"
+ "\tpxor %xmm5, %xmm0\n"
+ "\tpxor %xmm4, %xmm7\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm3, %xmm6\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm3, %xmm0\n"
+ "\tpxor %xmm2, %xmm7\n"
+ "\tpxor %xmm2, %xmm4\n"
+ "\tpxor %xmm1, %xmm7\n"
+ "\tpxor %xmm1, %xmm6\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm0, %xmm7\n"
+ "\tpxor %xmm0, %xmm6\n"
+ "\tpxor %xmm0, %xmm5\n"
+ "\tpxor %xmm0, %xmm3\n"
+ "\tpxor %xmm0, %xmm2\n"
+ "\tpxor %xmm2, %xmm0\n"
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm7, %xmm1\n"
+ "\tpxor %xmm7, %xmm0\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm6, %xmm0\n"
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm4, %xmm6\n"
+ "\tpxor %xmm3, %xmm5\n"
+ "\tpxor %xmm2, %xmm4\n"
+ "\tpxor %xmm1, %xmm3\n"
+ "\tpxor %xmm0, %xmm2\n"
+ );
+}
+
+static void gf8mul_01111100(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm7, %xmm2\n"
+ "\tpxor %xmm7, %xmm1\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm6, %xmm2\n"
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm5, %xmm0\n"
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm4, %xmm7\n"
+ "\tpxor %xmm4, %xmm6\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm4, %xmm0\n"
+ "\tpxor %xmm3, %xmm5\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm2, %xmm4\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm1, %xmm7\n"
+ "\tpxor %xmm1, %xmm6\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm1, %xmm4\n"
+ "\tpxor %xmm1, %xmm3\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm0, %xmm6\n"
+ "\tpxor %xmm0, %xmm5\n"
+ "\tpxor %xmm0, %xmm4\n"
+ "\tpxor %xmm0, %xmm3\n"
+ "\tpxor %xmm0, %xmm2\n"
+ "\tpxor %xmm2, %xmm0\n"
+ );
+}
+
+static void gf8mul_01111101(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm7, %xmm2\n"
+ "\tpxor %xmm7, %xmm1\n"
+ "\tpxor %xmm7, %xmm0\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm6, %xmm1\n"
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm5, %xmm6\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm5, %xmm0\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm4, %xmm7\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm3, %xmm6\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm3, %xmm0\n"
+ "\tpxor %xmm2, %xmm7\n"
+ "\tpxor %xmm2, %xmm4\n"
+ "\tpxor %xmm2, %xmm0\n"
+ "\tpxor %xmm1, %xmm7\n"
+ "\tpxor %xmm1, %xmm6\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm1, %xmm4\n"
+ "\tpxor %xmm1, %xmm3\n"
+ "\tpxor %xmm0, %xmm6\n"
+ "\tpxor %xmm0, %xmm5\n"
+ "\tpxor %xmm0, %xmm4\n"
+ "\tpxor %xmm0, %xmm3\n"
+ "\tpxor %xmm0, %xmm2\n"
+ );
+}
+
+static void gf8mul_01111110(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm1\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm6, %xmm0\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm5, %xmm6\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm5, %xmm0\n"
+ "\tpxor %xmm4, %xmm7\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm3, %xmm7\n"
+ "\tpxor %xmm3, %xmm5\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm3, %xmm0\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm2, %xmm6\n"
+ "\tpxor %xmm2, %xmm5\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm2, %xmm0\n"
+ "\tpxor %xmm1, %xmm7\n"
+ "\tpxor %xmm1, %xmm6\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm1, %xmm4\n"
+ "\tpxor %xmm1, %xmm3\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm0, %xmm6\n"
+ "\tpxor %xmm0, %xmm5\n"
+ "\tpxor %xmm0, %xmm4\n"
+ "\tpxor %xmm0, %xmm3\n"
+ "\tpxor %xmm0, %xmm2\n"
+ "\tpxor %xmm0, %xmm1\n"
+ "\tpxor %xmm1, %xmm0\n"
+ );
+}
+
+static void gf8mul_01111111(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm7, %xmm2\n"
+ "\tpxor %xmm7, %xmm1\n"
+ "\tpxor %xmm7, %xmm0\n"
+ "\tpxor %xmm6, %xmm1\n"
+ "\tpxor %xmm5, %xmm0\n"
+ "\tpxor %xmm4, %xmm6\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm3, %xmm7\n"
+ "\tpxor %xmm3, %xmm6\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm2, %xmm6\n"
+ "\tpxor %xmm2, %xmm5\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm2, %xmm0\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm1, %xmm7\n"
+ "\tpxor %xmm1, %xmm6\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm1, %xmm4\n"
+ "\tpxor %xmm1, %xmm3\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm0, %xmm6\n"
+ "\tpxor %xmm0, %xmm5\n"
+ "\tpxor %xmm0, %xmm4\n"
+ "\tpxor %xmm0, %xmm3\n"
+ "\tpxor %xmm0, %xmm2\n"
+ "\tpxor %xmm0, %xmm1\n"
+ );
+}
+
+static void gf8mul_10000000(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm7, %xmm2\n"
+ "\tpxor %xmm7, %xmm1\n"
+ "\tpxor %xmm7, %xmm0\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm6, %xmm2\n"
+ "\tpxor %xmm6, %xmm1\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm5, %xmm6\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm5, %xmm0\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm4, %xmm7\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm3, %xmm7\n"
+ "\tpxor %xmm3, %xmm6\n"
+ "\tpxor %xmm3, %xmm5\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm3, %xmm0\n"
+ "\tpxor %xmm2, %xmm7\n"
+ "\tpxor %xmm2, %xmm5\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm2, %xmm0\n"
+ "\tpxor %xmm1, %xmm7\n"
+ "\tpxor %xmm1, %xmm4\n"
+ "\tpxor %xmm1, %xmm3\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm1, %xmm0\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm0, %xmm7\n"
+ "\tpxor %xmm7, %xmm0\n"
+ );
+}
+
+static void gf8mul_10000001(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm7, %xmm0\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm6, %xmm2\n"
+ "\tpxor %xmm6, %xmm0\n"
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm5, %xmm1\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm4, %xmm0\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm3, %xmm6\n"
+ "\tpxor %xmm3, %xmm5\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm2, %xmm7\n"
+ "\tpxor %xmm2, %xmm5\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm2, %xmm0\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm1, %xmm7\n"
+ "\tpxor %xmm1, %xmm4\n"
+ "\tpxor %xmm1, %xmm3\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm1, %xmm0\n"
+ "\tpxor %xmm0, %xmm7\n"
+ );
+}
+
+static void gf8mul_10000010(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm6, %xmm2\n"
+ "\tpxor %xmm6, %xmm1\n"
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm5, %xmm6\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm5, %xmm1\n"
+ "\tpxor %xmm5, %xmm0\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm4, %xmm0\n"
+ "\tpxor %xmm3, %xmm7\n"
+ "\tpxor %xmm3, %xmm6\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm3, %xmm0\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm2, %xmm7\n"
+ "\tpxor %xmm2, %xmm5\n"
+ "\tpxor %xmm2, %xmm4\n"
+ "\tpxor %xmm2, %xmm0\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm1, %xmm7\n"
+ "\tpxor %xmm1, %xmm4\n"
+ "\tpxor %xmm1, %xmm3\n"
+ "\tpxor %xmm1, %xmm0\n"
+ "\tpxor %xmm0, %xmm7\n"
+ "\tpxor %xmm0, %xmm1\n"
+ "\tpxor %xmm1, %xmm0\n"
+ );
+}
+
+static void gf8mul_10000011(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm0\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm6, %xmm2\n"
+ "\tpxor %xmm6, %xmm1\n"
+ "\tpxor %xmm6, %xmm0\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm5, %xmm1\n"
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm4, %xmm6\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm4, %xmm0\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm3, %xmm6\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm2, %xmm7\n"
+ "\tpxor %xmm2, %xmm5\n"
+ "\tpxor %xmm2, %xmm3\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm2, %xmm0\n"
+ "\tpxor %xmm1, %xmm7\n"
+ "\tpxor %xmm1, %xmm4\n"
+ "\tpxor %xmm1, %xmm3\n"
+ "\tpxor %xmm1, %xmm0\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm0, %xmm7\n"
+ "\tpxor %xmm0, %xmm1\n"
+ );
+}
+
+static void gf8mul_10000100(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm7, %xmm2\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm6, %xmm2\n"
+ "\tpxor %xmm6, %xmm1\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm5, %xmm1\n"
+ "\tpxor %xmm5, %xmm0\n"
+ "\tpxor %xmm4, %xmm6\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm4, %xmm0\n"
+ "\tpxor %xmm3, %xmm7\n"
+ "\tpxor %xmm3, %xmm6\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm3, %xmm0\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm2, %xmm5\n"
+ "\tpxor %xmm2, %xmm4\n"
+ "\tpxor %xmm2, %xmm3\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm1, %xmm4\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm0, %xmm7\n"
+ "\tpxor %xmm0, %xmm2\n"
+ "\tpxor %xmm2, %xmm0\n"
+ );
+}
+
+static void gf8mul_10000101(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm7, %xmm1\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm6, %xmm2\n"
+ "\tpxor %xmm6, %xmm0\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm5, %xmm6\n"
+ "\tpxor %xmm5, %xmm0\n"
+ "\tpxor %xmm4, %xmm7\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm3, %xmm7\n"
+ "\tpxor %xmm3, %xmm6\n"
+ "\tpxor %xmm3, %xmm5\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm3, %xmm0\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm2, %xmm7\n"
+ "\tpxor %xmm2, %xmm5\n"
+ "\tpxor %xmm2, %xmm4\n"
+ "\tpxor %xmm2, %xmm3\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm2, %xmm0\n"
+ "\tpxor %xmm1, %xmm7\n"
+ "\tpxor %xmm1, %xmm4\n"
+ "\tpxor %xmm1, %xmm0\n"
+ "\tpxor %xmm0, %xmm7\n"
+ "\tpxor %xmm0, %xmm2\n"
+ );
+}
+
+static void gf8mul_10000110(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm7, %xmm2\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm6, %xmm1\n"
+ "\tpxor %xmm6, %xmm0\n"
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm5, %xmm1\n"
+ "\tpxor %xmm5, %xmm0\n"
+ "\tpxor %xmm4, %xmm6\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm4, %xmm0\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm3, %xmm7\n"
+ "\tpxor %xmm3, %xmm6\n"
+ "\tpxor %xmm3, %xmm5\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm3, %xmm0\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm2, %xmm7\n"
+ "\tpxor %xmm2, %xmm5\n"
+ "\tpxor %xmm2, %xmm0\n"
+ "\tpxor %xmm1, %xmm7\n"
+ "\tpxor %xmm1, %xmm4\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm1, %xmm0\n"
+ "\tpxor %xmm0, %xmm7\n"
+ "\tpxor %xmm0, %xmm2\n"
+ "\tpxor %xmm0, %xmm1\n"
+ "\tpxor %xmm1, %xmm0\n"
+ );
+}
+
+static void gf8mul_10000111(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm7, %xmm2\n"
+ "\tpxor %xmm7, %xmm0\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm6, %xmm0\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm5, %xmm1\n"
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm4, %xmm6\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm4, %xmm0\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm3, %xmm7\n"
+ "\tpxor %xmm3, %xmm6\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm3, %xmm0\n"
+ "\tpxor %xmm2, %xmm5\n"
+ "\tpxor %xmm1, %xmm7\n"
+ "\tpxor %xmm1, %xmm4\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm1, %xmm0\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm0, %xmm7\n"
+ "\tpxor %xmm0, %xmm2\n"
+ "\tpxor %xmm0, %xmm1\n"
+ );
+}
+
+static void gf8mul_10001000(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm2\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm6, %xmm1\n"
+ "\tpxor %xmm6, %xmm0\n"
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm5, %xmm0\n"
+ "\tpxor %xmm4, %xmm7\n"
+ "\tpxor %xmm4, %xmm6\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm4, %xmm0\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm3, %xmm5\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm2, %xmm7\n"
+ "\tpxor %xmm2, %xmm4\n"
+ "\tpxor %xmm2, %xmm3\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm2, %xmm0\n"
+ "\tpxor %xmm1, %xmm3\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm0, %xmm7\n"
+ "\tpxor %xmm0, %xmm3\n"
+ "\tpxor %xmm3, %xmm0\n"
+ );
+}
+
+static void gf8mul_10001001(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm2\n"
+ "\tpxor %xmm7, %xmm0\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm6, %xmm0\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm5, %xmm6\n"
+ "\tpxor %xmm4, %xmm7\n"
+ "\tpxor %xmm4, %xmm6\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm4, %xmm0\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm3, %xmm5\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm2, %xmm7\n"
+ "\tpxor %xmm2, %xmm4\n"
+ "\tpxor %xmm2, %xmm3\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm2, %xmm0\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm1, %xmm7\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm1, %xmm0\n"
+ "\tpxor %xmm0, %xmm7\n"
+ "\tpxor %xmm0, %xmm3\n"
+ );
+}
+
+static void gf8mul_10001010(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm7, %xmm1\n"
+ "\tpxor %xmm7, %xmm0\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm6, %xmm1\n"
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm5, %xmm6\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm5, %xmm0\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm4, %xmm7\n"
+ "\tpxor %xmm4, %xmm6\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm4, %xmm0\n"
+ "\tpxor %xmm3, %xmm7\n"
+ "\tpxor %xmm3, %xmm5\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm3, %xmm0\n"
+ "\tpxor %xmm2, %xmm7\n"
+ "\tpxor %xmm2, %xmm4\n"
+ "\tpxor %xmm2, %xmm3\n"
+ "\tpxor %xmm2, %xmm0\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm1, %xmm7\n"
+ "\tpxor %xmm1, %xmm0\n"
+ "\tpxor %xmm0, %xmm7\n"
+ "\tpxor %xmm0, %xmm3\n"
+ "\tpxor %xmm0, %xmm1\n"
+ "\tpxor %xmm1, %xmm0\n"
+ );
+}
+
+static void gf8mul_10001011(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm7, %xmm2\n"
+ "\tpxor %xmm7, %xmm1\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm6, %xmm0\n"
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm5, %xmm1\n"
+ "\tpxor %xmm5, %xmm0\n"
+ "\tpxor %xmm4, %xmm7\n"
+ "\tpxor %xmm4, %xmm6\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm4, %xmm0\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm3, %xmm7\n"
+ "\tpxor %xmm3, %xmm5\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm3, %xmm0\n"
+ "\tpxor %xmm2, %xmm7\n"
+ "\tpxor %xmm2, %xmm4\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm2, %xmm0\n"
+ "\tpxor %xmm1, %xmm7\n"
+ "\tpxor %xmm1, %xmm0\n"
+ "\tpxor %xmm7, %xmm1\n"
+ "\tpxor %xmm0, %xmm7\n"
+ "\tpxor %xmm0, %xmm3\n"
+ "\tpxor %xmm0, %xmm1\n"
+ );
+}
+
+static void gf8mul_10001100(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm7, %xmm1\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm6, %xmm0\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm5, %xmm6\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm5, %xmm0\n"
+ "\tpxor %xmm4, %xmm7\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm4, %xmm0\n"
+ "\tpxor %xmm3, %xmm7\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm3, %xmm0\n"
+ "\tpxor %xmm2, %xmm3\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm0, %xmm7\n"
+ "\tpxor %xmm0, %xmm3\n"
+ "\tpxor %xmm0, %xmm2\n"
+ "\tpxor %xmm2, %xmm0\n"
+ );
+}
+
+static void gf8mul_10001101(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm7, %xmm2\n"
+ "\tpxor %xmm7, %xmm1\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm6, %xmm2\n"
+ "\tpxor %xmm6, %xmm1\n"
+ "\tpxor %xmm6, %xmm0\n"
+ "\tpxor %xmm5, %xmm6\n"
+ "\tpxor %xmm4, %xmm7\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm4, %xmm0\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm3, %xmm7\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm3, %xmm0\n"
+ "\tpxor %xmm2, %xmm7\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm2, %xmm0\n"
+ "\tpxor %xmm1, %xmm7\n"
+ "\tpxor %xmm1, %xmm3\n"
+ "\tpxor %xmm1, %xmm0\n"
+ "\tpxor %xmm0, %xmm7\n"
+ "\tpxor %xmm0, %xmm3\n"
+ "\tpxor %xmm0, %xmm2\n"
+ );
+}
+
+static void gf8mul_10001110(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm7, %xmm2\n"
+ "\tpxor %xmm7, %xmm0\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm6, %xmm2\n"
+ "\tpxor %xmm6, %xmm0\n"
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm5, %xmm0\n"
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm4, %xmm7\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm4, %xmm0\n"
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm3, %xmm7\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm3, %xmm0\n"
+ "\tpxor %xmm2, %xmm7\n"
+ "\tpxor %xmm2, %xmm3\n"
+ "\tpxor %xmm2, %xmm0\n"
+ "\tpxor %xmm1, %xmm7\n"
+ "\tpxor %xmm1, %xmm3\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm1, %xmm0\n"
+ "\tpxor %xmm0, %xmm7\n"
+ "\tpxor %xmm0, %xmm3\n"
+ "\tpxor %xmm0, %xmm2\n"
+ "\tpxor %xmm0, %xmm1\n"
+ "\tpxor %xmm1, %xmm0\n"
+ );
+}
+
+static void gf8mul_10001111(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm6, %xmm1\n"
+ "\tpxor %xmm6, %xmm0\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm5, %xmm1\n"
+ "\tpxor %xmm5, %xmm0\n"
+ "\tpxor %xmm4, %xmm7\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm4, %xmm0\n"
+ "\tpxor %xmm3, %xmm7\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm3, %xmm0\n"
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm1, %xmm7\n"
+ "\tpxor %xmm1, %xmm3\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm1, %xmm0\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm0, %xmm7\n"
+ "\tpxor %xmm0, %xmm3\n"
+ "\tpxor %xmm0, %xmm2\n"
+ "\tpxor %xmm0, %xmm1\n"
+ );
+}
+
+static void gf8mul_10010000(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm7, %xmm2\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm6, %xmm2\n"
+ "\tpxor %xmm6, %xmm1\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm5, %xmm1\n"
+ "\tpxor %xmm5, %xmm0\n"
+ "\tpxor %xmm4, %xmm7\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm4, %xmm0\n"
+ "\tpxor %xmm3, %xmm6\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm3, %xmm0\n"
+ "\tpxor %xmm2, %xmm7\n"
+ "\tpxor %xmm2, %xmm6\n"
+ "\tpxor %xmm2, %xmm4\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm2, %xmm0\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm1, %xmm4\n"
+ "\tpxor %xmm1, %xmm3\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm0, %xmm7\n"
+ "\tpxor %xmm0, %xmm4\n"
+ "\tpxor %xmm4, %xmm0\n"
+ );
+}
+
+static void gf8mul_10010001(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm7, %xmm2\n"
+ "\tpxor %xmm7, %xmm1\n"
+ "\tpxor %xmm7, %xmm0\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm6, %xmm2\n"
+ "\tpxor %xmm6, %xmm1\n"
+ "\tpxor %xmm5, %xmm6\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm5, %xmm1\n"
+ "\tpxor %xmm5, %xmm0\n"
+ "\tpxor %xmm4, %xmm6\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm4, %xmm0\n"
+ "\tpxor %xmm3, %xmm7\n"
+ "\tpxor %xmm3, %xmm6\n"
+ "\tpxor %xmm3, %xmm5\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm2, %xmm7\n"
+ "\tpxor %xmm2, %xmm6\n"
+ "\tpxor %xmm2, %xmm4\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm2, %xmm0\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm1, %xmm7\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm1, %xmm3\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm1, %xmm0\n"
+ "\tpxor %xmm0, %xmm7\n"
+ "\tpxor %xmm0, %xmm4\n"
+ );
+}
+
+static void gf8mul_10010010(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm7, %xmm2\n"
+ "\tpxor %xmm7, %xmm0\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm6, %xmm2\n"
+ "\tpxor %xmm6, %xmm0\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm5, %xmm6\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm5, %xmm1\n"
+ "\tpxor %xmm4, %xmm7\n"
+ "\tpxor %xmm4, %xmm6\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm3, %xmm7\n"
+ "\tpxor %xmm3, %xmm6\n"
+ "\tpxor %xmm3, %xmm5\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm2, %xmm7\n"
+ "\tpxor %xmm2, %xmm6\n"
+ "\tpxor %xmm2, %xmm5\n"
+ "\tpxor %xmm2, %xmm0\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm1, %xmm7\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm1, %xmm3\n"
+ "\tpxor %xmm1, %xmm0\n"
+ "\tpxor %xmm0, %xmm7\n"
+ "\tpxor %xmm0, %xmm4\n"
+ "\tpxor %xmm0, %xmm1\n"
+ "\tpxor %xmm1, %xmm0\n"
+ );
+}
+
+static void gf8mul_10010011(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm7, %xmm2\n"
+ "\tpxor %xmm7, %xmm1\n"
+ "\tpxor %xmm7, %xmm0\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm6, %xmm2\n"
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm5, %xmm6\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm5, %xmm1\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm4, %xmm0\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm3, %xmm7\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm2, %xmm7\n"
+ "\tpxor %xmm2, %xmm6\n"
+ "\tpxor %xmm2, %xmm4\n"
+ "\tpxor %xmm2, %xmm3\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm2, %xmm0\n"
+ "\tpxor %xmm1, %xmm7\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm1, %xmm3\n"
+ "\tpxor %xmm1, %xmm0\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm0, %xmm7\n"
+ "\tpxor %xmm0, %xmm4\n"
+ "\tpxor %xmm0, %xmm1\n"
+ );
+}
+
+static void gf8mul_10010100(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm7, %xmm1\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm6, %xmm2\n"
+ "\tpxor %xmm6, %xmm1\n"
+ "\tpxor %xmm6, %xmm0\n"
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm5, %xmm6\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm5, %xmm1\n"
+ "\tpxor %xmm5, %xmm0\n"
+ "\tpxor %xmm4, %xmm7\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm3, %xmm6\n"
+ "\tpxor %xmm3, %xmm0\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm2, %xmm6\n"
+ "\tpxor %xmm2, %xmm4\n"
+ "\tpxor %xmm2, %xmm3\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm1, %xmm4\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm0, %xmm7\n"
+ "\tpxor %xmm0, %xmm4\n"
+ "\tpxor %xmm0, %xmm2\n"
+ "\tpxor %xmm2, %xmm0\n"
+ );
+}
+
+static void gf8mul_10010101(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm6, %xmm2\n"
+ "\tpxor %xmm5, %xmm1\n"
+ "\tpxor %xmm4, %xmm0\n"
+ "\tpxor %xmm3, %xmm7\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm2, %xmm6\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm2, %xmm0\n"
+ "\tpxor %xmm1, %xmm7\n"
+ "\tpxor %xmm1, %xmm6\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm0, %xmm6\n"
+ "\tpxor %xmm0, %xmm5\n"
+ "\tpxor %xmm0, %xmm4\n"
+ "\tpxor %xmm0, %xmm1\n"
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm7, %xmm1\n"
+ "\tpxor %xmm7, %xmm0\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm5, %xmm6\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm2, %xmm3\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm0, %xmm1\n"
+ );
+}
+
+static void gf8mul_10010110(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm7, %xmm1\n"
+ "\tpxor %xmm7, %xmm0\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm0\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm5, %xmm6\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm4, %xmm6\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm3, %xmm5\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm3, %xmm0\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm2, %xmm7\n"
+ "\tpxor %xmm2, %xmm6\n"
+ "\tpxor %xmm2, %xmm5\n"
+ "\tpxor %xmm2, %xmm4\n"
+ "\tpxor %xmm2, %xmm0\n"
+ "\tpxor %xmm1, %xmm7\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm1, %xmm0\n"
+ "\tpxor %xmm0, %xmm7\n"
+ "\tpxor %xmm0, %xmm4\n"
+ "\tpxor %xmm0, %xmm2\n"
+ "\tpxor %xmm0, %xmm1\n"
+ "\tpxor %xmm1, %xmm0\n"
+ );
+}
+
+static void gf8mul_10010111(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm6, %xmm2\n"
+ "\tpxor %xmm6, %xmm1\n"
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm5, %xmm0\n"
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm4, %xmm7\n"
+ "\tpxor %xmm4, %xmm6\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm3, %xmm6\n"
+ "\tpxor %xmm3, %xmm5\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm3, %xmm0\n"
+ "\tpxor %xmm2, %xmm6\n"
+ "\tpxor %xmm2, %xmm5\n"
+ "\tpxor %xmm1, %xmm7\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm1, %xmm0\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm0, %xmm7\n"
+ "\tpxor %xmm0, %xmm4\n"
+ "\tpxor %xmm0, %xmm2\n"
+ "\tpxor %xmm0, %xmm1\n"
+ );
+}
+
+static void gf8mul_10011000(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm7, %xmm0\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm6, %xmm1\n"
+ "\tpxor %xmm5, %xmm6\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm5, %xmm1\n"
+ "\tpxor %xmm5, %xmm0\n"
+ "\tpxor %xmm4, %xmm7\n"
+ "\tpxor %xmm4, %xmm6\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm4, %xmm0\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm3, %xmm7\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm2, %xmm7\n"
+ "\tpxor %xmm2, %xmm6\n"
+ "\tpxor %xmm2, %xmm5\n"
+ "\tpxor %xmm2, %xmm3\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm2, %xmm0\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm1, %xmm3\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm0, %xmm7\n"
+ "\tpxor %xmm0, %xmm4\n"
+ "\tpxor %xmm0, %xmm3\n"
+ "\tpxor %xmm3, %xmm0\n"
+ );
+}
+
+static void gf8mul_10011001(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm5, %xmm1\n"
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm4, %xmm6\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm4, %xmm0\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm3, %xmm7\n"
+ "\tpxor %xmm3, %xmm5\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm2, %xmm7\n"
+ "\tpxor %xmm2, %xmm6\n"
+ "\tpxor %xmm2, %xmm5\n"
+ "\tpxor %xmm2, %xmm3\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm2, %xmm0\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm1, %xmm7\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm1, %xmm4\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm1, %xmm0\n"
+ "\tpxor %xmm0, %xmm7\n"
+ "\tpxor %xmm0, %xmm4\n"
+ "\tpxor %xmm0, %xmm3\n"
+ );
+}
+
+static void gf8mul_10011010(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm6, %xmm2\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm5, %xmm1\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm4, %xmm0\n"
+ "\tpxor %xmm3, %xmm6\n"
+ "\tpxor %xmm3, %xmm5\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm3, %xmm0\n"
+ "\tpxor %xmm2, %xmm7\n"
+ "\tpxor %xmm2, %xmm6\n"
+ "\tpxor %xmm2, %xmm3\n"
+ "\tpxor %xmm2, %xmm0\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm1, %xmm7\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm1, %xmm4\n"
+ "\tpxor %xmm1, %xmm0\n"
+ "\tpxor %xmm0, %xmm7\n"
+ "\tpxor %xmm0, %xmm4\n"
+ "\tpxor %xmm0, %xmm3\n"
+ "\tpxor %xmm0, %xmm1\n"
+ "\tpxor %xmm1, %xmm0\n"
+ );
+}
+
+static void gf8mul_10011011(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm0\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm5, %xmm6\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm5, %xmm1\n"
+ "\tpxor %xmm5, %xmm0\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm4, %xmm6\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm4, %xmm0\n"
+ "\tpxor %xmm3, %xmm7\n"
+ "\tpxor %xmm3, %xmm6\n"
+ "\tpxor %xmm3, %xmm5\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm2, %xmm6\n"
+ "\tpxor %xmm2, %xmm4\n"
+ "\tpxor %xmm1, %xmm7\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm1, %xmm4\n"
+ "\tpxor %xmm1, %xmm0\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm0, %xmm7\n"
+ "\tpxor %xmm0, %xmm4\n"
+ "\tpxor %xmm0, %xmm3\n"
+ "\tpxor %xmm0, %xmm1\n"
+ );
+}
+
+static void gf8mul_10011100(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm3, %xmm0\n"
+ "\tpxor %xmm2, %xmm6\n"
+ "\tpxor %xmm2, %xmm5\n"
+ "\tpxor %xmm2, %xmm3\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm0, %xmm7\n"
+ "\tpxor %xmm0, %xmm4\n"
+ "\tpxor %xmm0, %xmm3\n"
+ "\tpxor %xmm0, %xmm2\n"
+ "\tpxor %xmm2, %xmm0\n"
+ );
+}
+
+static void gf8mul_10011101(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm7, %xmm2\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm6, %xmm1\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm5, %xmm0\n"
+ "\tpxor %xmm4, %xmm7\n"
+ "\tpxor %xmm4, %xmm6\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm3, %xmm6\n"
+ "\tpxor %xmm3, %xmm5\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm3, %xmm0\n"
+ "\tpxor %xmm2, %xmm7\n"
+ "\tpxor %xmm2, %xmm6\n"
+ "\tpxor %xmm2, %xmm5\n"
+ "\tpxor %xmm2, %xmm4\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm2, %xmm0\n"
+ "\tpxor %xmm1, %xmm7\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm1, %xmm4\n"
+ "\tpxor %xmm1, %xmm3\n"
+ "\tpxor %xmm1, %xmm0\n"
+ "\tpxor %xmm0, %xmm7\n"
+ "\tpxor %xmm0, %xmm4\n"
+ "\tpxor %xmm0, %xmm3\n"
+ "\tpxor %xmm0, %xmm2\n"
+ );
+}
+
+static void gf8mul_10011110(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm7, %xmm2\n"
+ "\tpxor %xmm7, %xmm1\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm6, %xmm0\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm5, %xmm1\n"
+ "\tpxor %xmm5, %xmm0\n"
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm4, %xmm7\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm3, %xmm6\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm3, %xmm0\n"
+ "\tpxor %xmm2, %xmm7\n"
+ "\tpxor %xmm2, %xmm6\n"
+ "\tpxor %xmm2, %xmm4\n"
+ "\tpxor %xmm2, %xmm3\n"
+ "\tpxor %xmm2, %xmm0\n"
+ "\tpxor %xmm1, %xmm7\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm1, %xmm4\n"
+ "\tpxor %xmm1, %xmm3\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm1, %xmm0\n"
+ "\tpxor %xmm0, %xmm7\n"
+ "\tpxor %xmm0, %xmm4\n"
+ "\tpxor %xmm0, %xmm3\n"
+ "\tpxor %xmm0, %xmm2\n"
+ "\tpxor %xmm0, %xmm1\n"
+ "\tpxor %xmm1, %xmm0\n"
+ );
+}
+
+static void gf8mul_10011111(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm1\n"
+ "\tpxor %xmm7, %xmm0\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm6, %xmm2\n"
+ "\tpxor %xmm6, %xmm1\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm5, %xmm6\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm4, %xmm6\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm3, %xmm5\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm3, %xmm0\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm2, %xmm6\n"
+ "\tpxor %xmm1, %xmm7\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm1, %xmm4\n"
+ "\tpxor %xmm1, %xmm3\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm1, %xmm0\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm0, %xmm7\n"
+ "\tpxor %xmm0, %xmm4\n"
+ "\tpxor %xmm0, %xmm3\n"
+ "\tpxor %xmm0, %xmm2\n"
+ "\tpxor %xmm0, %xmm1\n"
+ );
+}
+
+static void gf8mul_10100000(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm2\n"
+ "\tpxor %xmm7, %xmm1\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm6, %xmm1\n"
+ "\tpxor %xmm6, %xmm0\n"
+ "\tpxor %xmm5, %xmm6\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm5, %xmm0\n"
+ "\tpxor %xmm4, %xmm7\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm4, %xmm0\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm3, %xmm6\n"
+ "\tpxor %xmm3, %xmm5\n"
+ "\tpxor %xmm2, %xmm7\n"
+ "\tpxor %xmm2, %xmm6\n"
+ "\tpxor %xmm2, %xmm5\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm1, %xmm7\n"
+ "\tpxor %xmm1, %xmm6\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm1, %xmm4\n"
+ "\tpxor %xmm1, %xmm3\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm1, %xmm0\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm0, %xmm7\n"
+ "\tpxor %xmm0, %xmm5\n"
+ "\tpxor %xmm5, %xmm0\n"
+ );
+}
+
+static void gf8mul_10100001(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm7, %xmm1\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm6, %xmm1\n"
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm5, %xmm6\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm5, %xmm0\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm4, %xmm7\n"
+ "\tpxor %xmm4, %xmm6\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm3, %xmm7\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm2, %xmm6\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm2, %xmm0\n"
+ "\tpxor %xmm6, %xmm2\n"
+ "\tpxor %xmm1, %xmm7\n"
+ "\tpxor %xmm1, %xmm6\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm1, %xmm4\n"
+ "\tpxor %xmm1, %xmm3\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm1, %xmm0\n"
+ "\tpxor %xmm0, %xmm7\n"
+ "\tpxor %xmm0, %xmm5\n"
+ );
+}
+
+static void gf8mul_10100010(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm7, %xmm2\n"
+ "\tpxor %xmm7, %xmm1\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm6, %xmm2\n"
+ "\tpxor %xmm6, %xmm1\n"
+ "\tpxor %xmm6, %xmm0\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm4, %xmm6\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm3, %xmm5\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm2, %xmm4\n"
+ "\tpxor %xmm2, %xmm0\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm1, %xmm7\n"
+ "\tpxor %xmm1, %xmm6\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm1, %xmm4\n"
+ "\tpxor %xmm1, %xmm3\n"
+ "\tpxor %xmm1, %xmm0\n"
+ "\tpxor %xmm0, %xmm7\n"
+ "\tpxor %xmm0, %xmm5\n"
+ "\tpxor %xmm0, %xmm1\n"
+ "\tpxor %xmm1, %xmm0\n"
+ );
+}
+
+static void gf8mul_10100011(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm7, %xmm1\n"
+ "\tpxor %xmm7, %xmm0\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm6, %xmm2\n"
+ "\tpxor %xmm5, %xmm6\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm5, %xmm1\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm4, %xmm0\n"
+ "\tpxor %xmm3, %xmm5\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm2, %xmm6\n"
+ "\tpxor %xmm2, %xmm3\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm2, %xmm0\n"
+ "\tpxor %xmm1, %xmm7\n"
+ "\tpxor %xmm1, %xmm6\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm1, %xmm4\n"
+ "\tpxor %xmm1, %xmm3\n"
+ "\tpxor %xmm1, %xmm0\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm0, %xmm7\n"
+ "\tpxor %xmm0, %xmm5\n"
+ "\tpxor %xmm0, %xmm1\n"
+ );
+}
+
+static void gf8mul_10100100(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm6, %xmm2\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm5, %xmm1\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm4, %xmm0\n"
+ "\tpxor %xmm3, %xmm7\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm2, %xmm6\n"
+ "\tpxor %xmm2, %xmm3\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm1, %xmm0\n"
+ "\tpxor %xmm0, %xmm7\n"
+ "\tpxor %xmm0, %xmm6\n"
+ "\tpxor %xmm0, %xmm5\n"
+ "\tpxor %xmm0, %xmm3\n"
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm6, %xmm2\n"
+ "\tpxor %xmm5, %xmm1\n"
+ "\tpxor %xmm4, %xmm0\n"
+ "\tpxor %xmm3, %xmm7\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm2, %xmm6\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm2, %xmm0\n"
+ "\tpxor %xmm1, %xmm7\n"
+ "\tpxor %xmm1, %xmm6\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm0, %xmm6\n"
+ "\tpxor %xmm0, %xmm5\n"
+ "\tpxor %xmm0, %xmm4\n"
+ "\tpxor %xmm0, %xmm1\n"
+ );
+}
+
+static void gf8mul_10100101(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm7, %xmm0\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm1\n"
+ "\tpxor %xmm6, %xmm0\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm5, %xmm6\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm5, %xmm1\n"
+ "\tpxor %xmm4, %xmm6\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm4, %xmm0\n"
+ "\tpxor %xmm3, %xmm7\n"
+ "\tpxor %xmm3, %xmm5\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm2, %xmm6\n"
+ "\tpxor %xmm2, %xmm4\n"
+ "\tpxor %xmm2, %xmm3\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm2, %xmm0\n"
+ "\tpxor %xmm1, %xmm7\n"
+ "\tpxor %xmm1, %xmm6\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm1, %xmm4\n"
+ "\tpxor %xmm1, %xmm0\n"
+ "\tpxor %xmm0, %xmm7\n"
+ "\tpxor %xmm0, %xmm5\n"
+ "\tpxor %xmm0, %xmm2\n"
+ );
+}
+
+static void gf8mul_10100110(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm2, %xmm0\n"
+ "\tpxor %xmm1, %xmm7\n"
+ "\tpxor %xmm1, %xmm6\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm1, %xmm4\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm1, %xmm0\n"
+ "\tpxor %xmm0, %xmm7\n"
+ "\tpxor %xmm0, %xmm5\n"
+ "\tpxor %xmm0, %xmm2\n"
+ "\tpxor %xmm0, %xmm1\n"
+ "\tpxor %xmm1, %xmm0\n"
+ );
+}
+
+static void gf8mul_10100111(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm7, %xmm1\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm6, %xmm2\n"
+ "\tpxor %xmm6, %xmm0\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm5, %xmm6\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm5, %xmm1\n"
+ "\tpxor %xmm5, %xmm0\n"
+ "\tpxor %xmm4, %xmm7\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm3, %xmm6\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm3, %xmm0\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm2, %xmm7\n"
+ "\tpxor %xmm2, %xmm5\n"
+ "\tpxor %xmm1, %xmm7\n"
+ "\tpxor %xmm1, %xmm6\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm1, %xmm4\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm1, %xmm0\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm0, %xmm7\n"
+ "\tpxor %xmm0, %xmm5\n"
+ "\tpxor %xmm0, %xmm2\n"
+ "\tpxor %xmm0, %xmm1\n"
+ );
+}
+
+static void gf8mul_10101000(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm2\n"
+ "\tpxor %xmm7, %xmm1\n"
+ "\tpxor %xmm7, %xmm0\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm6, %xmm0\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm5, %xmm6\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm5, %xmm1\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm4, %xmm6\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm4, %xmm0\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm3, %xmm5\n"
+ "\tpxor %xmm2, %xmm6\n"
+ "\tpxor %xmm2, %xmm5\n"
+ "\tpxor %xmm2, %xmm4\n"
+ "\tpxor %xmm2, %xmm3\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm2, %xmm0\n"
+ "\tpxor %xmm1, %xmm6\n"
+ "\tpxor %xmm1, %xmm3\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm0, %xmm7\n"
+ "\tpxor %xmm0, %xmm5\n"
+ "\tpxor %xmm0, %xmm3\n"
+ "\tpxor %xmm3, %xmm0\n"
+ );
+}
+
+static void gf8mul_10101001(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm1, %xmm0\n"
+ "\tpxor %xmm0, %xmm7\n"
+ "\tpxor %xmm0, %xmm6\n"
+ "\tpxor %xmm0, %xmm5\n"
+ "\tpxor %xmm0, %xmm4\n"
+ "\tpxor %xmm0, %xmm2\n"
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm6, %xmm2\n"
+ "\tpxor %xmm5, %xmm1\n"
+ "\tpxor %xmm4, %xmm0\n"
+ "\tpxor %xmm3, %xmm7\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm2, %xmm6\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm2, %xmm0\n"
+ "\tpxor %xmm1, %xmm7\n"
+ "\tpxor %xmm1, %xmm6\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm0, %xmm6\n"
+ "\tpxor %xmm0, %xmm5\n"
+ "\tpxor %xmm0, %xmm4\n"
+ "\tpxor %xmm0, %xmm1\n"
+ );
+}
+
+static void gf8mul_10101010(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm7, %xmm2\n"
+ "\tpxor %xmm7, %xmm1\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm6, %xmm2\n"
+ "\tpxor %xmm6, %xmm1\n"
+ "\tpxor %xmm6, %xmm0\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm5, %xmm0\n"
+ "\tpxor %xmm4, %xmm7\n"
+ "\tpxor %xmm4, %xmm6\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm3, %xmm6\n"
+ "\tpxor %xmm3, %xmm5\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm2, %xmm5\n"
+ "\tpxor %xmm2, %xmm4\n"
+ "\tpxor %xmm2, %xmm3\n"
+ "\tpxor %xmm2, %xmm0\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm1, %xmm7\n"
+ "\tpxor %xmm1, %xmm6\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm1, %xmm0\n"
+ "\tpxor %xmm0, %xmm7\n"
+ "\tpxor %xmm0, %xmm5\n"
+ "\tpxor %xmm0, %xmm3\n"
+ "\tpxor %xmm0, %xmm1\n"
+ "\tpxor %xmm1, %xmm0\n"
+ );
+}
+
+static void gf8mul_10101011(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm6, %xmm1\n"
+ "\tpxor %xmm6, %xmm0\n"
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm5, %xmm1\n"
+ "\tpxor %xmm5, %xmm0\n"
+ "\tpxor %xmm4, %xmm6\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm3, %xmm6\n"
+ "\tpxor %xmm3, %xmm5\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm2, %xmm6\n"
+ "\tpxor %xmm2, %xmm5\n"
+ "\tpxor %xmm2, %xmm4\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm2, %xmm0\n"
+ "\tpxor %xmm1, %xmm7\n"
+ "\tpxor %xmm1, %xmm6\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm1, %xmm0\n"
+ "\tpxor %xmm5, %xmm1\n"
+ "\tpxor %xmm0, %xmm7\n"
+ "\tpxor %xmm0, %xmm5\n"
+ "\tpxor %xmm0, %xmm3\n"
+ "\tpxor %xmm0, %xmm1\n"
+ );
+}
+
+static void gf8mul_10101100(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm7, %xmm2\n"
+ "\tpxor %xmm7, %xmm0\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm6, %xmm0\n"
+ "\tpxor %xmm5, %xmm6\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm5, %xmm1\n"
+ "\tpxor %xmm4, %xmm6\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm4, %xmm0\n"
+ "\tpxor %xmm3, %xmm7\n"
+ "\tpxor %xmm3, %xmm6\n"
+ "\tpxor %xmm3, %xmm5\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm3, %xmm0\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm2, %xmm7\n"
+ "\tpxor %xmm2, %xmm6\n"
+ "\tpxor %xmm2, %xmm3\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm1, %xmm6\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm0, %xmm7\n"
+ "\tpxor %xmm0, %xmm5\n"
+ "\tpxor %xmm0, %xmm3\n"
+ "\tpxor %xmm0, %xmm2\n"
+ "\tpxor %xmm2, %xmm0\n"
+ );
+}
+
+static void gf8mul_10101101(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm7, %xmm1\n"
+ "\tpxor %xmm7, %xmm0\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm6, %xmm2\n"
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm5, %xmm6\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm5, %xmm1\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm4, %xmm0\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm3, %xmm7\n"
+ "\tpxor %xmm3, %xmm6\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm2, %xmm6\n"
+ "\tpxor %xmm2, %xmm5\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm2, %xmm0\n"
+ "\tpxor %xmm1, %xmm7\n"
+ "\tpxor %xmm1, %xmm6\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm1, %xmm3\n"
+ "\tpxor %xmm1, %xmm0\n"
+ "\tpxor %xmm0, %xmm7\n"
+ "\tpxor %xmm0, %xmm5\n"
+ "\tpxor %xmm0, %xmm3\n"
+ "\tpxor %xmm0, %xmm2\n"
+ );
+}
+
+static void gf8mul_10101110(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm7, %xmm0\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm6, %xmm2\n"
+ "\tpxor %xmm6, %xmm0\n"
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm5, %xmm1\n"
+ "\tpxor %xmm5, %xmm0\n"
+ "\tpxor %xmm4, %xmm7\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm3, %xmm6\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm2, %xmm5\n"
+ "\tpxor %xmm2, %xmm3\n"
+ "\tpxor %xmm2, %xmm0\n"
+ "\tpxor %xmm1, %xmm7\n"
+ "\tpxor %xmm1, %xmm6\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm1, %xmm3\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm1, %xmm0\n"
+ "\tpxor %xmm0, %xmm7\n"
+ "\tpxor %xmm0, %xmm5\n"
+ "\tpxor %xmm0, %xmm3\n"
+ "\tpxor %xmm0, %xmm2\n"
+ "\tpxor %xmm0, %xmm1\n"
+ "\tpxor %xmm1, %xmm0\n"
+ );
+}
+
+static void gf8mul_10101111(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm7, %xmm0\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm6, %xmm2\n"
+ "\tpxor %xmm6, %xmm1\n"
+ "\tpxor %xmm6, %xmm0\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm5, %xmm6\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm4, %xmm6\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm3, %xmm5\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm3, %xmm0\n"
+ "\tpxor %xmm2, %xmm7\n"
+ "\tpxor %xmm1, %xmm7\n"
+ "\tpxor %xmm1, %xmm6\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm1, %xmm3\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm1, %xmm0\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm0, %xmm7\n"
+ "\tpxor %xmm0, %xmm5\n"
+ "\tpxor %xmm0, %xmm3\n"
+ "\tpxor %xmm0, %xmm2\n"
+ "\tpxor %xmm0, %xmm1\n"
+ );
+}
+
+static void gf8mul_10110000(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm7, %xmm2\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm6, %xmm1\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm5, %xmm0\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm4, %xmm0\n"
+ "\tpxor %xmm3, %xmm6\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm3, %xmm0\n"
+ "\tpxor %xmm2, %xmm5\n"
+ "\tpxor %xmm2, %xmm4\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm2, %xmm0\n"
+ "\tpxor %xmm1, %xmm6\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm1, %xmm4\n"
+ "\tpxor %xmm1, %xmm3\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm0, %xmm7\n"
+ "\tpxor %xmm0, %xmm5\n"
+ "\tpxor %xmm0, %xmm4\n"
+ "\tpxor %xmm4, %xmm0\n"
+ );
+}
+
+static void gf8mul_10110001(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm1\n"
+ "\tpxor %xmm7, %xmm0\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm6, %xmm0\n"
+ "\tpxor %xmm5, %xmm6\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm4, %xmm7\n"
+ "\tpxor %xmm4, %xmm6\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm3, %xmm6\n"
+ "\tpxor %xmm3, %xmm5\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm2, %xmm5\n"
+ "\tpxor %xmm2, %xmm4\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm2, %xmm0\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm1, %xmm7\n"
+ "\tpxor %xmm1, %xmm6\n"
+ "\tpxor %xmm1, %xmm3\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm1, %xmm0\n"
+ "\tpxor %xmm0, %xmm7\n"
+ "\tpxor %xmm0, %xmm5\n"
+ "\tpxor %xmm0, %xmm4\n"
+ );
+}
+
+static void gf8mul_10110010(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm1\n"
+ "\tpxor %xmm7, %xmm0\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm6, %xmm0\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm5, %xmm0\n"
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm4, %xmm6\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm3, %xmm7\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm2, %xmm6\n"
+ "\tpxor %xmm2, %xmm0\n"
+ "\tpxor %xmm6, %xmm2\n"
+ "\tpxor %xmm1, %xmm7\n"
+ "\tpxor %xmm1, %xmm6\n"
+ "\tpxor %xmm1, %xmm3\n"
+ "\tpxor %xmm1, %xmm0\n"
+ "\tpxor %xmm0, %xmm7\n"
+ "\tpxor %xmm0, %xmm5\n"
+ "\tpxor %xmm0, %xmm4\n"
+ "\tpxor %xmm0, %xmm1\n"
+ "\tpxor %xmm1, %xmm0\n"
+ );
+}
+
+static void gf8mul_10110011(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm1\n"
+ "\tpxor %xmm7, %xmm0\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm6, %xmm2\n"
+ "\tpxor %xmm5, %xmm6\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm5, %xmm1\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm4, %xmm0\n"
+ "\tpxor %xmm3, %xmm7\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm2, %xmm5\n"
+ "\tpxor %xmm2, %xmm4\n"
+ "\tpxor %xmm2, %xmm3\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm2, %xmm0\n"
+ "\tpxor %xmm1, %xmm7\n"
+ "\tpxor %xmm1, %xmm6\n"
+ "\tpxor %xmm1, %xmm3\n"
+ "\tpxor %xmm1, %xmm0\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm0, %xmm7\n"
+ "\tpxor %xmm0, %xmm5\n"
+ "\tpxor %xmm0, %xmm4\n"
+ "\tpxor %xmm0, %xmm1\n"
+ );
+}
+
+static void gf8mul_10110100(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm3, %xmm0\n"
+ "\tpxor %xmm2, %xmm7\n"
+ "\tpxor %xmm2, %xmm4\n"
+ "\tpxor %xmm2, %xmm3\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm1, %xmm6\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm1, %xmm4\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm0, %xmm7\n"
+ "\tpxor %xmm0, %xmm5\n"
+ "\tpxor %xmm0, %xmm4\n"
+ "\tpxor %xmm0, %xmm2\n"
+ "\tpxor %xmm2, %xmm0\n"
+ );
+}
+
+static void gf8mul_10110101(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm7, %xmm2\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm6, %xmm2\n"
+ "\tpxor %xmm6, %xmm1\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm5, %xmm1\n"
+ "\tpxor %xmm5, %xmm0\n"
+ "\tpxor %xmm4, %xmm7\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm3, %xmm6\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm2, %xmm5\n"
+ "\tpxor %xmm2, %xmm3\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm2, %xmm0\n"
+ "\tpxor %xmm1, %xmm7\n"
+ "\tpxor %xmm1, %xmm6\n"
+ "\tpxor %xmm1, %xmm0\n"
+ "\tpxor %xmm0, %xmm7\n"
+ "\tpxor %xmm0, %xmm5\n"
+ "\tpxor %xmm0, %xmm4\n"
+ "\tpxor %xmm0, %xmm2\n"
+ );
+}
+
+static void gf8mul_10110110(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm2\n"
+ "\tpxor %xmm7, %xmm0\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm5, %xmm6\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm5, %xmm1\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm4, %xmm7\n"
+ "\tpxor %xmm4, %xmm6\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm4, %xmm0\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm3, %xmm7\n"
+ "\tpxor %xmm3, %xmm5\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm2, %xmm6\n"
+ "\tpxor %xmm2, %xmm4\n"
+ "\tpxor %xmm2, %xmm0\n"
+ "\tpxor %xmm1, %xmm7\n"
+ "\tpxor %xmm1, %xmm6\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm1, %xmm0\n"
+ "\tpxor %xmm0, %xmm7\n"
+ "\tpxor %xmm0, %xmm5\n"
+ "\tpxor %xmm0, %xmm4\n"
+ "\tpxor %xmm0, %xmm2\n"
+ "\tpxor %xmm0, %xmm1\n"
+ "\tpxor %xmm1, %xmm0\n"
+ );
+}
+
+static void gf8mul_10110111(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm7, %xmm1\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm6, %xmm1\n"
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm5, %xmm1\n"
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm4, %xmm7\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm3, %xmm7\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm3, %xmm0\n"
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm2, %xmm7\n"
+ "\tpxor %xmm2, %xmm6\n"
+ "\tpxor %xmm2, %xmm5\n"
+ "\tpxor %xmm1, %xmm7\n"
+ "\tpxor %xmm1, %xmm6\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm1, %xmm0\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm0, %xmm7\n"
+ "\tpxor %xmm0, %xmm5\n"
+ "\tpxor %xmm0, %xmm4\n"
+ "\tpxor %xmm0, %xmm2\n"
+ "\tpxor %xmm0, %xmm1\n"
+ );
+}
+
+static void gf8mul_10111000(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm7, %xmm1\n"
+ "\tpxor %xmm7, %xmm0\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm5, %xmm1\n"
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm4, %xmm6\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm4, %xmm0\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm3, %xmm7\n"
+ "\tpxor %xmm3, %xmm5\n"
+ "\tpxor %xmm2, %xmm3\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm2, %xmm0\n"
+ "\tpxor %xmm1, %xmm6\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm1, %xmm3\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm0, %xmm7\n"
+ "\tpxor %xmm0, %xmm5\n"
+ "\tpxor %xmm0, %xmm4\n"
+ "\tpxor %xmm0, %xmm3\n"
+ "\tpxor %xmm3, %xmm0\n"
+ );
+}
+
+static void gf8mul_10111001(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm3, %xmm0\n"
+ "\tpxor %xmm2, %xmm6\n"
+ "\tpxor %xmm2, %xmm5\n"
+ "\tpxor %xmm2, %xmm3\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm0, %xmm7\n"
+ "\tpxor %xmm0, %xmm4\n"
+ "\tpxor %xmm0, %xmm3\n"
+ "\tpxor %xmm0, %xmm2\n"
+ "\tpxor %xmm2, %xmm0\n"
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm7, %xmm1\n"
+ "\tpxor %xmm7, %xmm0\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm5, %xmm6\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm2, %xmm3\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm0, %xmm1\n"
+ );
+}
+
+static void gf8mul_10111010(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm2\n"
+ "\tpxor %xmm7, %xmm1\n"
+ "\tpxor %xmm7, %xmm0\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm6, %xmm1\n"
+ "\tpxor %xmm5, %xmm6\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm5, %xmm0\n"
+ "\tpxor %xmm4, %xmm7\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm4, %xmm0\n"
+ "\tpxor %xmm3, %xmm7\n"
+ "\tpxor %xmm3, %xmm6\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm2, %xmm6\n"
+ "\tpxor %xmm2, %xmm5\n"
+ "\tpxor %xmm2, %xmm3\n"
+ "\tpxor %xmm2, %xmm0\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm1, %xmm7\n"
+ "\tpxor %xmm1, %xmm6\n"
+ "\tpxor %xmm1, %xmm4\n"
+ "\tpxor %xmm1, %xmm0\n"
+ "\tpxor %xmm0, %xmm7\n"
+ "\tpxor %xmm0, %xmm5\n"
+ "\tpxor %xmm0, %xmm4\n"
+ "\tpxor %xmm0, %xmm3\n"
+ "\tpxor %xmm0, %xmm1\n"
+ "\tpxor %xmm1, %xmm0\n"
+ );
+}
+
+static void gf8mul_10111011(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm6, %xmm1\n"
+ "\tpxor %xmm6, %xmm0\n"
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm5, %xmm6\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm5, %xmm1\n"
+ "\tpxor %xmm5, %xmm0\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm4, %xmm0\n"
+ "\tpxor %xmm3, %xmm7\n"
+ "\tpxor %xmm3, %xmm6\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm2, %xmm7\n"
+ "\tpxor %xmm2, %xmm6\n"
+ "\tpxor %xmm2, %xmm4\n"
+ "\tpxor %xmm1, %xmm7\n"
+ "\tpxor %xmm1, %xmm6\n"
+ "\tpxor %xmm1, %xmm4\n"
+ "\tpxor %xmm1, %xmm0\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm0, %xmm7\n"
+ "\tpxor %xmm0, %xmm5\n"
+ "\tpxor %xmm0, %xmm4\n"
+ "\tpxor %xmm0, %xmm3\n"
+ "\tpxor %xmm0, %xmm1\n"
+ );
+}
+
+static void gf8mul_10111100(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm7, %xmm2\n"
+ "\tpxor %xmm7, %xmm1\n"
+ "\tpxor %xmm7, %xmm0\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm0\n"
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm5, %xmm1\n"
+ "\tpxor %xmm4, %xmm7\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm3, %xmm6\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm3, %xmm0\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm2, %xmm7\n"
+ "\tpxor %xmm2, %xmm5\n"
+ "\tpxor %xmm2, %xmm3\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm1, %xmm6\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm0, %xmm7\n"
+ "\tpxor %xmm0, %xmm5\n"
+ "\tpxor %xmm0, %xmm4\n"
+ "\tpxor %xmm0, %xmm3\n"
+ "\tpxor %xmm0, %xmm2\n"
+ "\tpxor %xmm2, %xmm0\n"
+ );
+}
+
+static void gf8mul_10111101(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm7, %xmm1\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm6, %xmm0\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm4, %xmm6\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm3, %xmm5\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm2, %xmm4\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm2, %xmm0\n"
+ "\tpxor %xmm1, %xmm7\n"
+ "\tpxor %xmm1, %xmm6\n"
+ "\tpxor %xmm1, %xmm4\n"
+ "\tpxor %xmm1, %xmm3\n"
+ "\tpxor %xmm1, %xmm0\n"
+ "\tpxor %xmm0, %xmm7\n"
+ "\tpxor %xmm0, %xmm5\n"
+ "\tpxor %xmm0, %xmm4\n"
+ "\tpxor %xmm0, %xmm3\n"
+ "\tpxor %xmm0, %xmm2\n"
+ );
+}
+
+static void gf8mul_10111110(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm7, %xmm2\n"
+ "\tpxor %xmm7, %xmm0\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm2\n"
+ "\tpxor %xmm6, %xmm0\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm5, %xmm6\n"
+ "\tpxor %xmm5, %xmm0\n"
+ "\tpxor %xmm4, %xmm7\n"
+ "\tpxor %xmm4, %xmm6\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm4, %xmm0\n"
+ "\tpxor %xmm3, %xmm7\n"
+ "\tpxor %xmm3, %xmm6\n"
+ "\tpxor %xmm3, %xmm5\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm2, %xmm6\n"
+ "\tpxor %xmm2, %xmm5\n"
+ "\tpxor %xmm2, %xmm4\n"
+ "\tpxor %xmm2, %xmm3\n"
+ "\tpxor %xmm2, %xmm0\n"
+ "\tpxor %xmm1, %xmm7\n"
+ "\tpxor %xmm1, %xmm6\n"
+ "\tpxor %xmm1, %xmm4\n"
+ "\tpxor %xmm1, %xmm3\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm1, %xmm0\n"
+ "\tpxor %xmm0, %xmm7\n"
+ "\tpxor %xmm0, %xmm5\n"
+ "\tpxor %xmm0, %xmm4\n"
+ "\tpxor %xmm0, %xmm3\n"
+ "\tpxor %xmm0, %xmm2\n"
+ "\tpxor %xmm0, %xmm1\n"
+ "\tpxor %xmm1, %xmm0\n"
+ );
+}
+
+static void gf8mul_10111111(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm2, %xmm0\n"
+ "\tpxor %xmm1, %xmm7\n"
+ "\tpxor %xmm1, %xmm6\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm1, %xmm4\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm1, %xmm0\n"
+ "\tpxor %xmm0, %xmm7\n"
+ "\tpxor %xmm0, %xmm5\n"
+ "\tpxor %xmm0, %xmm2\n"
+ "\tpxor %xmm0, %xmm1\n"
+ "\tpxor %xmm1, %xmm0\n"
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm6, %xmm2\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm5, %xmm1\n"
+ "\tpxor %xmm4, %xmm7\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm3, %xmm6\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm2, %xmm5\n"
+ "\tpxor %xmm2, %xmm0\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm1, %xmm7\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm1, %xmm4\n"
+ "\tpxor %xmm0, %xmm6\n"
+ "\tpxor %xmm0, %xmm4\n"
+ "\tpxor %xmm0, %xmm3\n"
+ );
+}
+
+static void gf8mul_11000000(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm7, %xmm2\n"
+ "\tpxor %xmm7, %xmm0\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm6, %xmm2\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm5, %xmm6\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm5, %xmm1\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm4, %xmm7\n"
+ "\tpxor %xmm4, %xmm6\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm4, %xmm0\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm3, %xmm6\n"
+ "\tpxor %xmm3, %xmm5\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm2, %xmm7\n"
+ "\tpxor %xmm2, %xmm6\n"
+ "\tpxor %xmm2, %xmm5\n"
+ "\tpxor %xmm2, %xmm0\n"
+ "\tpxor %xmm1, %xmm6\n"
+ "\tpxor %xmm1, %xmm4\n"
+ "\tpxor %xmm1, %xmm3\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm1, %xmm0\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm0, %xmm7\n"
+ "\tpxor %xmm0, %xmm6\n"
+ "\tpxor %xmm6, %xmm0\n"
+ );
+}
+
+static void gf8mul_11000001(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm7, %xmm1\n"
+ "\tpxor %xmm7, %xmm0\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm5, %xmm6\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm3, %xmm0\n"
+ "\tpxor %xmm2, %xmm7\n"
+ "\tpxor %xmm2, %xmm5\n"
+ "\tpxor %xmm2, %xmm4\n"
+ "\tpxor %xmm2, %xmm3\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm1, %xmm6\n"
+ "\tpxor %xmm1, %xmm4\n"
+ "\tpxor %xmm1, %xmm3\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm1, %xmm0\n"
+ "\tpxor %xmm0, %xmm7\n"
+ "\tpxor %xmm0, %xmm6\n"
+ );
+}
+
+static void gf8mul_11000010(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm7, %xmm2\n"
+ "\tpxor %xmm7, %xmm0\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm6, %xmm2\n"
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm5, %xmm6\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm5, %xmm1\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm4, %xmm0\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm3, %xmm7\n"
+ "\tpxor %xmm3, %xmm6\n"
+ "\tpxor %xmm3, %xmm5\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm2, %xmm6\n"
+ "\tpxor %xmm2, %xmm5\n"
+ "\tpxor %xmm2, %xmm4\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm2, %xmm0\n"
+ "\tpxor %xmm1, %xmm6\n"
+ "\tpxor %xmm1, %xmm4\n"
+ "\tpxor %xmm1, %xmm3\n"
+ "\tpxor %xmm1, %xmm0\n"
+ "\tpxor %xmm0, %xmm7\n"
+ "\tpxor %xmm0, %xmm6\n"
+ "\tpxor %xmm0, %xmm1\n"
+ "\tpxor %xmm1, %xmm0\n"
+ );
+}
+
+static void gf8mul_11000011(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm7, %xmm2\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm6, %xmm2\n"
+ "\tpxor %xmm6, %xmm1\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm5, %xmm1\n"
+ "\tpxor %xmm5, %xmm0\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm4, %xmm0\n"
+ "\tpxor %xmm3, %xmm7\n"
+ "\tpxor %xmm3, %xmm5\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm2, %xmm6\n"
+ "\tpxor %xmm2, %xmm4\n"
+ "\tpxor %xmm2, %xmm3\n"
+ "\tpxor %xmm2, %xmm0\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm1, %xmm7\n"
+ "\tpxor %xmm0, %xmm6\n"
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm7, %xmm1\n"
+ "\tpxor %xmm7, %xmm0\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm5, %xmm6\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm2, %xmm3\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm0, %xmm1\n"
+ );
+}
+
+static void gf8mul_11000100(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm3, %xmm0\n"
+ "\tpxor %xmm2, %xmm5\n"
+ "\tpxor %xmm2, %xmm4\n"
+ "\tpxor %xmm1, %xmm7\n"
+ "\tpxor %xmm1, %xmm4\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm0, %xmm7\n"
+ "\tpxor %xmm0, %xmm6\n"
+ "\tpxor %xmm0, %xmm2\n"
+ "\tpxor %xmm2, %xmm0\n"
+ );
+}
+
+static void gf8mul_11000101(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm7, %xmm2\n"
+ "\tpxor %xmm7, %xmm0\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm1\n"
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm5, %xmm6\n"
+ "\tpxor %xmm5, %xmm0\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm4, %xmm7\n"
+ "\tpxor %xmm4, %xmm6\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm4, %xmm0\n"
+ "\tpxor %xmm3, %xmm7\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm3, %xmm0\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm2, %xmm7\n"
+ "\tpxor %xmm2, %xmm5\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm1, %xmm6\n"
+ "\tpxor %xmm1, %xmm4\n"
+ "\tpxor %xmm1, %xmm0\n"
+ "\tpxor %xmm0, %xmm7\n"
+ "\tpxor %xmm0, %xmm6\n"
+ "\tpxor %xmm0, %xmm2\n"
+ );
+}
+
+static void gf8mul_11000110(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm7, %xmm2\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm1\n"
+ "\tpxor %xmm6, %xmm0\n"
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm5, %xmm6\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm5, %xmm0\n"
+ "\tpxor %xmm4, %xmm7\n"
+ "\tpxor %xmm4, %xmm6\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm3, %xmm7\n"
+ "\tpxor %xmm3, %xmm6\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm2, %xmm6\n"
+ "\tpxor %xmm2, %xmm5\n"
+ "\tpxor %xmm2, %xmm3\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm2, %xmm0\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm1, %xmm6\n"
+ "\tpxor %xmm1, %xmm4\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm1, %xmm0\n"
+ "\tpxor %xmm0, %xmm7\n"
+ "\tpxor %xmm0, %xmm6\n"
+ "\tpxor %xmm0, %xmm2\n"
+ "\tpxor %xmm0, %xmm1\n"
+ "\tpxor %xmm1, %xmm0\n"
+ );
+}
+
+static void gf8mul_11000111(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm6, %xmm2\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm5, %xmm1\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm4, %xmm0\n"
+ "\tpxor %xmm3, %xmm7\n"
+ "\tpxor %xmm3, %xmm6\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm2, %xmm7\n"
+ "\tpxor %xmm2, %xmm5\n"
+ "\tpxor %xmm2, %xmm3\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm1, %xmm6\n"
+ "\tpxor %xmm1, %xmm4\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm1, %xmm0\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm0, %xmm7\n"
+ "\tpxor %xmm0, %xmm6\n"
+ "\tpxor %xmm0, %xmm2\n"
+ "\tpxor %xmm0, %xmm1\n"
+ );
+}
+
+static void gf8mul_11001000(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm7, %xmm2\n"
+ "\tpxor %xmm7, %xmm1\n"
+ "\tpxor %xmm7, %xmm0\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm6, %xmm0\n"
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm5, %xmm1\n"
+ "\tpxor %xmm4, %xmm6\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm4, %xmm0\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm2, %xmm7\n"
+ "\tpxor %xmm2, %xmm6\n"
+ "\tpxor %xmm2, %xmm3\n"
+ "\tpxor %xmm2, %xmm0\n"
+ "\tpxor %xmm1, %xmm7\n"
+ "\tpxor %xmm1, %xmm3\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm0, %xmm7\n"
+ "\tpxor %xmm0, %xmm6\n"
+ "\tpxor %xmm0, %xmm3\n"
+ "\tpxor %xmm3, %xmm0\n"
+ );
+}
+
+static void gf8mul_11001001(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm7, %xmm2\n"
+ "\tpxor %xmm7, %xmm0\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm6, %xmm1\n"
+ "\tpxor %xmm6, %xmm0\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm5, %xmm6\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm5, %xmm0\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm4, %xmm7\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm3, %xmm6\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm3, %xmm0\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm2, %xmm7\n"
+ "\tpxor %xmm2, %xmm3\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm1, %xmm6\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm1, %xmm0\n"
+ "\tpxor %xmm0, %xmm7\n"
+ "\tpxor %xmm0, %xmm6\n"
+ "\tpxor %xmm0, %xmm3\n"
+ );
+}
+
+static void gf8mul_11001010(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm7, %xmm1\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm6, %xmm2\n"
+ "\tpxor %xmm6, %xmm1\n"
+ "\tpxor %xmm6, %xmm0\n"
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm5, %xmm6\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm5, %xmm1\n"
+ "\tpxor %xmm4, %xmm6\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm4, %xmm0\n"
+ "\tpxor %xmm3, %xmm7\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm2, %xmm6\n"
+ "\tpxor %xmm2, %xmm3\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm2, %xmm0\n"
+ "\tpxor %xmm1, %xmm6\n"
+ "\tpxor %xmm1, %xmm0\n"
+ "\tpxor %xmm0, %xmm7\n"
+ "\tpxor %xmm0, %xmm6\n"
+ "\tpxor %xmm0, %xmm3\n"
+ "\tpxor %xmm0, %xmm1\n"
+ "\tpxor %xmm1, %xmm0\n"
+ );
+}
+
+static void gf8mul_11001011(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm2\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm6, %xmm1\n"
+ "\tpxor %xmm6, %xmm0\n"
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm5, %xmm6\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm4, %xmm6\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm4, %xmm0\n"
+ "\tpxor %xmm3, %xmm7\n"
+ "\tpxor %xmm3, %xmm6\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm3, %xmm0\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm2, %xmm7\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm7, %xmm2\n"
+ "\tpxor %xmm1, %xmm6\n"
+ "\tpxor %xmm1, %xmm0\n"
+ "\tpxor %xmm6, %xmm1\n"
+ "\tpxor %xmm0, %xmm7\n"
+ "\tpxor %xmm0, %xmm6\n"
+ "\tpxor %xmm0, %xmm3\n"
+ "\tpxor %xmm0, %xmm1\n"
+ );
+}
+
+static void gf8mul_11001100(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm7, %xmm0\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm6, %xmm1\n"
+ "\tpxor %xmm6, %xmm0\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm4, %xmm7\n"
+ "\tpxor %xmm4, %xmm6\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm3, %xmm6\n"
+ "\tpxor %xmm3, %xmm5\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm3, %xmm0\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm2, %xmm4\n"
+ "\tpxor %xmm1, %xmm7\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm0, %xmm7\n"
+ "\tpxor %xmm0, %xmm6\n"
+ "\tpxor %xmm0, %xmm3\n"
+ "\tpxor %xmm0, %xmm2\n"
+ "\tpxor %xmm2, %xmm0\n"
+ );
+}
+
+static void gf8mul_11001101(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm1\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm6, %xmm2\n"
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm5, %xmm6\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm5, %xmm1\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm4, %xmm0\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm3, %xmm7\n"
+ "\tpxor %xmm3, %xmm6\n"
+ "\tpxor %xmm3, %xmm5\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm3, %xmm0\n"
+ "\tpxor %xmm2, %xmm7\n"
+ "\tpxor %xmm2, %xmm4\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm1, %xmm6\n"
+ "\tpxor %xmm1, %xmm3\n"
+ "\tpxor %xmm1, %xmm0\n"
+ "\tpxor %xmm0, %xmm7\n"
+ "\tpxor %xmm0, %xmm6\n"
+ "\tpxor %xmm0, %xmm3\n"
+ "\tpxor %xmm0, %xmm2\n"
+ );
+}
+
+static void gf8mul_11001110(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm7, %xmm1\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm6, %xmm2\n"
+ "\tpxor %xmm6, %xmm0\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm5, %xmm1\n"
+ "\tpxor %xmm5, %xmm0\n"
+ "\tpxor %xmm4, %xmm6\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm3, %xmm7\n"
+ "\tpxor %xmm3, %xmm5\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm2, %xmm6\n"
+ "\tpxor %xmm2, %xmm4\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm2, %xmm0\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm1, %xmm6\n"
+ "\tpxor %xmm1, %xmm3\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm1, %xmm0\n"
+ "\tpxor %xmm0, %xmm7\n"
+ "\tpxor %xmm0, %xmm6\n"
+ "\tpxor %xmm0, %xmm3\n"
+ "\tpxor %xmm0, %xmm2\n"
+ "\tpxor %xmm0, %xmm1\n"
+ "\tpxor %xmm1, %xmm0\n"
+ );
+}
+
+static void gf8mul_11001111(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm7, %xmm2\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm6, %xmm1\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm5, %xmm0\n"
+ "\tpxor %xmm4, %xmm7\n"
+ "\tpxor %xmm4, %xmm6\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm4, %xmm0\n"
+ "\tpxor %xmm3, %xmm7\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm2, %xmm6\n"
+ "\tpxor %xmm2, %xmm3\n"
+ "\tpxor %xmm2, %xmm0\n"
+ "\tpxor %xmm1, %xmm7\n"
+ "\tpxor %xmm1, %xmm3\n"
+ "\tpxor %xmm0, %xmm6\n"
+ "\tpxor %xmm0, %xmm2\n"
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm7, %xmm1\n"
+ "\tpxor %xmm7, %xmm0\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm5, %xmm6\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm2, %xmm3\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm0, %xmm1\n"
+ );
+}
+
+static void gf8mul_11010000(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm1\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm5, %xmm0\n"
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm4, %xmm6\n"
+ "\tpxor %xmm4, %xmm0\n"
+ "\tpxor %xmm3, %xmm6\n"
+ "\tpxor %xmm3, %xmm5\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm3, %xmm0\n"
+ "\tpxor %xmm2, %xmm7\n"
+ "\tpxor %xmm2, %xmm5\n"
+ "\tpxor %xmm2, %xmm4\n"
+ "\tpxor %xmm2, %xmm0\n"
+ "\tpxor %xmm1, %xmm7\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm1, %xmm4\n"
+ "\tpxor %xmm1, %xmm3\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm0, %xmm7\n"
+ "\tpxor %xmm0, %xmm6\n"
+ "\tpxor %xmm0, %xmm4\n"
+ "\tpxor %xmm4, %xmm0\n"
+ );
+}
+
+static void gf8mul_11010001(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm2\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm6, %xmm1\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm5, %xmm1\n"
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm4, %xmm6\n"
+ "\tpxor %xmm4, %xmm0\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm3, %xmm7\n"
+ "\tpxor %xmm3, %xmm5\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm3, %xmm0\n"
+ "\tpxor %xmm2, %xmm7\n"
+ "\tpxor %xmm2, %xmm6\n"
+ "\tpxor %xmm2, %xmm4\n"
+ "\tpxor %xmm2, %xmm3\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm1, %xmm6\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm1, %xmm3\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm1, %xmm0\n"
+ "\tpxor %xmm0, %xmm7\n"
+ "\tpxor %xmm0, %xmm6\n"
+ "\tpxor %xmm0, %xmm4\n"
+ );
+}
+
+static void gf8mul_11010010(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm2, %xmm0\n"
+ "\tpxor %xmm1, %xmm6\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm1, %xmm3\n"
+ "\tpxor %xmm1, %xmm0\n"
+ "\tpxor %xmm0, %xmm7\n"
+ "\tpxor %xmm0, %xmm6\n"
+ "\tpxor %xmm0, %xmm4\n"
+ "\tpxor %xmm0, %xmm1\n"
+ "\tpxor %xmm1, %xmm0\n"
+ );
+}
+
+static void gf8mul_11010011(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm7, %xmm2\n"
+ "\tpxor %xmm7, %xmm1\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm6, %xmm1\n"
+ "\tpxor %xmm6, %xmm0\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm5, %xmm0\n"
+ "\tpxor %xmm4, %xmm7\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm4, %xmm0\n"
+ "\tpxor %xmm3, %xmm7\n"
+ "\tpxor %xmm3, %xmm5\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm3, %xmm0\n"
+ "\tpxor %xmm2, %xmm7\n"
+ "\tpxor %xmm2, %xmm6\n"
+ "\tpxor %xmm2, %xmm4\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm1, %xmm6\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm1, %xmm3\n"
+ "\tpxor %xmm1, %xmm0\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm0, %xmm7\n"
+ "\tpxor %xmm0, %xmm6\n"
+ "\tpxor %xmm0, %xmm4\n"
+ "\tpxor %xmm0, %xmm1\n"
+ );
+}
+
+static void gf8mul_11010100(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm7, %xmm0\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm2\n"
+ "\tpxor %xmm6, %xmm0\n"
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm4, %xmm7\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm4, %xmm0\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm3, %xmm7\n"
+ "\tpxor %xmm3, %xmm6\n"
+ "\tpxor %xmm3, %xmm5\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm3, %xmm0\n"
+ "\tpxor %xmm2, %xmm6\n"
+ "\tpxor %xmm2, %xmm5\n"
+ "\tpxor %xmm2, %xmm4\n"
+ "\tpxor %xmm1, %xmm7\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm1, %xmm4\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm0, %xmm7\n"
+ "\tpxor %xmm0, %xmm6\n"
+ "\tpxor %xmm0, %xmm4\n"
+ "\tpxor %xmm0, %xmm2\n"
+ "\tpxor %xmm2, %xmm0\n"
+ );
+}
+
+static void gf8mul_11010101(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm7, %xmm0\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm6, %xmm2\n"
+ "\tpxor %xmm6, %xmm1\n"
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm5, %xmm6\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm5, %xmm1\n"
+ "\tpxor %xmm5, %xmm0\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm4, %xmm7\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm3, %xmm6\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm3, %xmm0\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm2, %xmm7\n"
+ "\tpxor %xmm2, %xmm6\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm1, %xmm6\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm1, %xmm0\n"
+ "\tpxor %xmm0, %xmm7\n"
+ "\tpxor %xmm0, %xmm6\n"
+ "\tpxor %xmm0, %xmm4\n"
+ "\tpxor %xmm0, %xmm2\n"
+ );
+}
+
+static void gf8mul_11010110(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm3, %xmm0\n"
+ "\tpxor %xmm2, %xmm6\n"
+ "\tpxor %xmm2, %xmm5\n"
+ "\tpxor %xmm2, %xmm3\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm0, %xmm7\n"
+ "\tpxor %xmm0, %xmm4\n"
+ "\tpxor %xmm0, %xmm3\n"
+ "\tpxor %xmm0, %xmm2\n"
+ "\tpxor %xmm2, %xmm0\n"
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm7, %xmm1\n"
+ "\tpxor %xmm7, %xmm0\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm6, %xmm0\n"
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm4, %xmm6\n"
+ "\tpxor %xmm3, %xmm5\n"
+ "\tpxor %xmm2, %xmm4\n"
+ "\tpxor %xmm1, %xmm3\n"
+ "\tpxor %xmm0, %xmm2\n"
+ );
+}
+
+static void gf8mul_11010111(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm6, %xmm1\n"
+ "\tpxor %xmm6, %xmm0\n"
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm5, %xmm6\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm5, %xmm1\n"
+ "\tpxor %xmm5, %xmm0\n"
+ "\tpxor %xmm4, %xmm7\n"
+ "\tpxor %xmm4, %xmm6\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm4, %xmm0\n"
+ "\tpxor %xmm3, %xmm5\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm2, %xmm7\n"
+ "\tpxor %xmm2, %xmm6\n"
+ "\tpxor %xmm2, %xmm3\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm1, %xmm6\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm1, %xmm0\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm0, %xmm7\n"
+ "\tpxor %xmm0, %xmm6\n"
+ "\tpxor %xmm0, %xmm4\n"
+ "\tpxor %xmm0, %xmm2\n"
+ "\tpxor %xmm0, %xmm1\n"
+ );
+}
+
+static void gf8mul_11011000(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm6, %xmm2\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm5, %xmm1\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm4, %xmm0\n"
+ "\tpxor %xmm3, %xmm7\n"
+ "\tpxor %xmm3, %xmm6\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm2, %xmm7\n"
+ "\tpxor %xmm2, %xmm4\n"
+ "\tpxor %xmm2, %xmm3\n"
+ "\tpxor %xmm2, %xmm0\n"
+ "\tpxor %xmm1, %xmm7\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm1, %xmm3\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm0, %xmm7\n"
+ "\tpxor %xmm0, %xmm6\n"
+ "\tpxor %xmm0, %xmm4\n"
+ "\tpxor %xmm0, %xmm3\n"
+ "\tpxor %xmm3, %xmm0\n"
+ );
+}
+
+static void gf8mul_11011001(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm7, %xmm0\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm5, %xmm1\n"
+ "\tpxor %xmm5, %xmm0\n"
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm3, %xmm7\n"
+ "\tpxor %xmm3, %xmm6\n"
+ "\tpxor %xmm3, %xmm5\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm3, %xmm0\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm2, %xmm7\n"
+ "\tpxor %xmm2, %xmm6\n"
+ "\tpxor %xmm2, %xmm5\n"
+ "\tpxor %xmm2, %xmm3\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm1, %xmm6\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm1, %xmm4\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm1, %xmm0\n"
+ "\tpxor %xmm0, %xmm7\n"
+ "\tpxor %xmm0, %xmm6\n"
+ "\tpxor %xmm0, %xmm4\n"
+ "\tpxor %xmm0, %xmm3\n"
+ );
+}
+
+static void gf8mul_11011010(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm2\n"
+ "\tpxor %xmm7, %xmm0\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm6, %xmm0\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm5, %xmm6\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm5, %xmm1\n"
+ "\tpxor %xmm5, %xmm0\n"
+ "\tpxor %xmm4, %xmm7\n"
+ "\tpxor %xmm4, %xmm6\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm3, %xmm6\n"
+ "\tpxor %xmm3, %xmm5\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm2, %xmm5\n"
+ "\tpxor %xmm2, %xmm4\n"
+ "\tpxor %xmm2, %xmm3\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm2, %xmm0\n"
+ "\tpxor %xmm1, %xmm6\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm1, %xmm4\n"
+ "\tpxor %xmm1, %xmm0\n"
+ "\tpxor %xmm0, %xmm7\n"
+ "\tpxor %xmm0, %xmm6\n"
+ "\tpxor %xmm0, %xmm4\n"
+ "\tpxor %xmm0, %xmm3\n"
+ "\tpxor %xmm0, %xmm1\n"
+ "\tpxor %xmm1, %xmm0\n"
+ );
+}
+
+static void gf8mul_11011011(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm7, %xmm2\n"
+ "\tpxor %xmm7, %xmm1\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm6, %xmm1\n"
+ "\tpxor %xmm6, %xmm0\n"
+ "\tpxor %xmm5, %xmm6\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm5, %xmm0\n"
+ "\tpxor %xmm4, %xmm7\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm3, %xmm6\n"
+ "\tpxor %xmm3, %xmm5\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm2, %xmm7\n"
+ "\tpxor %xmm2, %xmm6\n"
+ "\tpxor %xmm2, %xmm5\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm1, %xmm6\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm1, %xmm4\n"
+ "\tpxor %xmm1, %xmm0\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm0, %xmm7\n"
+ "\tpxor %xmm0, %xmm6\n"
+ "\tpxor %xmm0, %xmm4\n"
+ "\tpxor %xmm0, %xmm3\n"
+ "\tpxor %xmm0, %xmm1\n"
+ );
+}
+
+static void gf8mul_11011100(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm7, %xmm1\n"
+ "\tpxor %xmm7, %xmm0\n"
+ "\tpxor %xmm6, %xmm1\n"
+ "\tpxor %xmm6, %xmm0\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm5, %xmm6\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm5, %xmm1\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm4, %xmm7\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm3, %xmm7\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm3, %xmm0\n"
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm2, %xmm6\n"
+ "\tpxor %xmm2, %xmm4\n"
+ "\tpxor %xmm1, %xmm7\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm0, %xmm7\n"
+ "\tpxor %xmm0, %xmm6\n"
+ "\tpxor %xmm0, %xmm4\n"
+ "\tpxor %xmm0, %xmm3\n"
+ "\tpxor %xmm0, %xmm2\n"
+ "\tpxor %xmm2, %xmm0\n"
+ );
+}
+
+static void gf8mul_11011101(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm7, %xmm1\n"
+ "\tpxor %xmm7, %xmm0\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm6, %xmm0\n"
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm4, %xmm6\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm3, %xmm5\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm3, %xmm0\n"
+ "\tpxor %xmm2, %xmm7\n"
+ "\tpxor %xmm2, %xmm6\n"
+ "\tpxor %xmm2, %xmm5\n"
+ "\tpxor %xmm2, %xmm4\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm1, %xmm6\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm1, %xmm4\n"
+ "\tpxor %xmm1, %xmm3\n"
+ "\tpxor %xmm1, %xmm0\n"
+ "\tpxor %xmm0, %xmm7\n"
+ "\tpxor %xmm0, %xmm6\n"
+ "\tpxor %xmm0, %xmm4\n"
+ "\tpxor %xmm0, %xmm3\n"
+ "\tpxor %xmm0, %xmm2\n"
+ );
+}
+
+static void gf8mul_11011110(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm7, %xmm0\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm6, %xmm2\n"
+ "\tpxor %xmm6, %xmm1\n"
+ "\tpxor %xmm6, %xmm0\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm5, %xmm6\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm4, %xmm7\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm3, %xmm6\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm2, %xmm5\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm2, %xmm0\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm1, %xmm6\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm1, %xmm4\n"
+ "\tpxor %xmm1, %xmm3\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm1, %xmm0\n"
+ "\tpxor %xmm0, %xmm7\n"
+ "\tpxor %xmm0, %xmm6\n"
+ "\tpxor %xmm0, %xmm4\n"
+ "\tpxor %xmm0, %xmm3\n"
+ "\tpxor %xmm0, %xmm2\n"
+ "\tpxor %xmm0, %xmm1\n"
+ "\tpxor %xmm1, %xmm0\n"
+ );
+}
+
+static void gf8mul_11011111(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm6, %xmm1\n"
+ "\tpxor %xmm6, %xmm0\n"
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm5, %xmm1\n"
+ "\tpxor %xmm4, %xmm7\n"
+ "\tpxor %xmm4, %xmm6\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm4, %xmm0\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm3, %xmm6\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm2, %xmm7\n"
+ "\tpxor %xmm2, %xmm6\n"
+ "\tpxor %xmm2, %xmm5\n"
+ "\tpxor %xmm2, %xmm4\n"
+ "\tpxor %xmm2, %xmm3\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm1, %xmm6\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm1, %xmm4\n"
+ "\tpxor %xmm1, %xmm3\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm1, %xmm0\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm0, %xmm7\n"
+ "\tpxor %xmm0, %xmm6\n"
+ "\tpxor %xmm0, %xmm4\n"
+ "\tpxor %xmm0, %xmm3\n"
+ "\tpxor %xmm0, %xmm2\n"
+ "\tpxor %xmm0, %xmm1\n"
+ );
+}
+
+static void gf8mul_11100000(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm7, %xmm1\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm6, %xmm0\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm5, %xmm0\n"
+ "\tpxor %xmm4, %xmm7\n"
+ "\tpxor %xmm4, %xmm6\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm4, %xmm0\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm3, %xmm5\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm2, %xmm7\n"
+ "\tpxor %xmm2, %xmm5\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm1, %xmm4\n"
+ "\tpxor %xmm1, %xmm3\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm1, %xmm0\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm0, %xmm7\n"
+ "\tpxor %xmm0, %xmm6\n"
+ "\tpxor %xmm0, %xmm5\n"
+ "\tpxor %xmm5, %xmm0\n"
+ );
+}
+
+static void gf8mul_11100001(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm1, %xmm0\n"
+ "\tpxor %xmm0, %xmm7\n"
+ "\tpxor %xmm0, %xmm6\n"
+ "\tpxor %xmm0, %xmm5\n"
+ "\tpxor %xmm0, %xmm4\n"
+ "\tpxor %xmm0, %xmm2\n"
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm2\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm6, %xmm1\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm5, %xmm0\n"
+ "\tpxor %xmm4, %xmm7\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm4, %xmm0\n"
+ "\tpxor %xmm3, %xmm7\n"
+ "\tpxor %xmm3, %xmm6\n"
+ "\tpxor %xmm3, %xmm5\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm2, %xmm6\n"
+ "\tpxor %xmm2, %xmm5\n"
+ "\tpxor %xmm2, %xmm4\n"
+ "\tpxor %xmm2, %xmm3\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm1, %xmm4\n"
+ "\tpxor %xmm1, %xmm3\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm0, %xmm4\n"
+ "\tpxor %xmm0, %xmm3\n"
+ "\tpxor %xmm0, %xmm2\n"
+ "\tpxor %xmm0, %xmm1\n"
+ );
+}
+
+static void gf8mul_11100010(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm1\n"
+ "\tpxor %xmm6, %xmm0\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm4, %xmm6\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm3, %xmm5\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm3, %xmm0\n"
+ "\tpxor %xmm2, %xmm7\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm2, %xmm0\n"
+ "\tpxor %xmm1, %xmm7\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm1, %xmm4\n"
+ "\tpxor %xmm1, %xmm3\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm0, %xmm6\n"
+ "\tpxor %xmm0, %xmm4\n"
+ "\tpxor %xmm0, %xmm3\n"
+ "\tpxor %xmm0, %xmm2\n"
+ "\tpxor %xmm0, %xmm1\n"
+ "\tpxor %xmm1, %xmm0\n"
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm7, %xmm1\n"
+ "\tpxor %xmm7, %xmm0\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm5, %xmm6\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm2, %xmm3\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm0, %xmm1\n"
+ );
+}
+
+static void gf8mul_11100011(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm7, %xmm0\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm6, %xmm2\n"
+ "\tpxor %xmm6, %xmm1\n"
+ "\tpxor %xmm6, %xmm0\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm5, %xmm6\n"
+ "\tpxor %xmm5, %xmm1\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm4, %xmm0\n"
+ "\tpxor %xmm3, %xmm7\n"
+ "\tpxor %xmm3, %xmm6\n"
+ "\tpxor %xmm3, %xmm5\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm2, %xmm6\n"
+ "\tpxor %xmm2, %xmm5\n"
+ "\tpxor %xmm2, %xmm4\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm1, %xmm4\n"
+ "\tpxor %xmm1, %xmm3\n"
+ "\tpxor %xmm1, %xmm0\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm0, %xmm7\n"
+ "\tpxor %xmm0, %xmm6\n"
+ "\tpxor %xmm0, %xmm5\n"
+ "\tpxor %xmm0, %xmm1\n"
+ );
+}
+
+static void gf8mul_11100100(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm3, %xmm0\n"
+ "\tpxor %xmm2, %xmm7\n"
+ "\tpxor %xmm2, %xmm6\n"
+ "\tpxor %xmm2, %xmm5\n"
+ "\tpxor %xmm1, %xmm6\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm1, %xmm4\n"
+ "\tpxor %xmm0, %xmm5\n"
+ "\tpxor %xmm0, %xmm4\n"
+ "\tpxor %xmm0, %xmm3\n"
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm2\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm6, %xmm1\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm4, %xmm6\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm3, %xmm5\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm2, %xmm4\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm1, %xmm3\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm0, %xmm2\n"
+ "\tpxor %xmm2, %xmm0\n"
+ );
+}
+
+static void gf8mul_11100101(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm7, %xmm2\n"
+ "\tpxor %xmm7, %xmm1\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm6, %xmm1\n"
+ "\tpxor %xmm6, %xmm0\n"
+ "\tpxor %xmm5, %xmm6\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm5, %xmm1\n"
+ "\tpxor %xmm5, %xmm0\n"
+ "\tpxor %xmm4, %xmm7\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm4, %xmm0\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm3, %xmm7\n"
+ "\tpxor %xmm3, %xmm6\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm2, %xmm6\n"
+ "\tpxor %xmm2, %xmm5\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm1, %xmm4\n"
+ "\tpxor %xmm1, %xmm0\n"
+ "\tpxor %xmm0, %xmm7\n"
+ "\tpxor %xmm0, %xmm6\n"
+ "\tpxor %xmm0, %xmm5\n"
+ "\tpxor %xmm0, %xmm2\n"
+ );
+}
+
+static void gf8mul_11100110(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm7, %xmm0\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm2\n"
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm5, %xmm6\n"
+ "\tpxor %xmm5, %xmm1\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm4, %xmm0\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm3, %xmm7\n"
+ "\tpxor %xmm3, %xmm6\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm2, %xmm7\n"
+ "\tpxor %xmm2, %xmm3\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm2, %xmm0\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm1, %xmm4\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm1, %xmm0\n"
+ "\tpxor %xmm0, %xmm7\n"
+ "\tpxor %xmm0, %xmm6\n"
+ "\tpxor %xmm0, %xmm5\n"
+ "\tpxor %xmm0, %xmm2\n"
+ "\tpxor %xmm0, %xmm1\n"
+ "\tpxor %xmm1, %xmm0\n"
+ );
+}
+
+static void gf8mul_11100111(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm7, %xmm2\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm6, %xmm1\n"
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm5, %xmm0\n"
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm4, %xmm7\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm3, %xmm7\n"
+ "\tpxor %xmm3, %xmm6\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm2, %xmm6\n"
+ "\tpxor %xmm2, %xmm5\n"
+ "\tpxor %xmm2, %xmm3\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm1, %xmm4\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm1, %xmm0\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm0, %xmm7\n"
+ "\tpxor %xmm0, %xmm6\n"
+ "\tpxor %xmm0, %xmm5\n"
+ "\tpxor %xmm0, %xmm2\n"
+ "\tpxor %xmm0, %xmm1\n"
+ );
+}
+
+static void gf8mul_11101000(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm1, %xmm0\n"
+ "\tpxor %xmm0, %xmm7\n"
+ "\tpxor %xmm0, %xmm6\n"
+ "\tpxor %xmm0, %xmm5\n"
+ "\tpxor %xmm0, %xmm4\n"
+ "\tpxor %xmm0, %xmm2\n"
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm3, %xmm0\n"
+ "\tpxor %xmm2, %xmm6\n"
+ "\tpxor %xmm2, %xmm5\n"
+ "\tpxor %xmm2, %xmm3\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm0, %xmm7\n"
+ "\tpxor %xmm0, %xmm4\n"
+ "\tpxor %xmm0, %xmm3\n"
+ "\tpxor %xmm0, %xmm2\n"
+ "\tpxor %xmm2, %xmm0\n"
+ );
+}
+
+static void gf8mul_11101001(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm6, %xmm2\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm5, %xmm1\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm4, %xmm0\n"
+ "\tpxor %xmm3, %xmm7\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm2, %xmm6\n"
+ "\tpxor %xmm2, %xmm3\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm1, %xmm0\n"
+ "\tpxor %xmm0, %xmm7\n"
+ "\tpxor %xmm0, %xmm6\n"
+ "\tpxor %xmm0, %xmm5\n"
+ "\tpxor %xmm0, %xmm3\n"
+ );
+}
+
+static void gf8mul_11101010(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm7, %xmm2\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm6, %xmm0\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm4, %xmm6\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm3, %xmm5\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm3, %xmm0\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm2, %xmm7\n"
+ "\tpxor %xmm2, %xmm5\n"
+ "\tpxor %xmm2, %xmm3\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm2, %xmm0\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm1, %xmm0\n"
+ "\tpxor %xmm0, %xmm7\n"
+ "\tpxor %xmm0, %xmm6\n"
+ "\tpxor %xmm0, %xmm5\n"
+ "\tpxor %xmm0, %xmm3\n"
+ "\tpxor %xmm0, %xmm1\n"
+ "\tpxor %xmm1, %xmm0\n"
+ );
+}
+
+static void gf8mul_11101011(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm6, %xmm2\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm5, %xmm1\n"
+ "\tpxor %xmm4, %xmm7\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm3, %xmm6\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm2, %xmm5\n"
+ "\tpxor %xmm2, %xmm0\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm1, %xmm7\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm1, %xmm4\n"
+ "\tpxor %xmm0, %xmm6\n"
+ "\tpxor %xmm0, %xmm4\n"
+ "\tpxor %xmm0, %xmm3\n"
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm7, %xmm1\n"
+ "\tpxor %xmm7, %xmm0\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm5, %xmm6\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm2, %xmm3\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm0, %xmm1\n"
+ );
+}
+
+static void gf8mul_11101100(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm2\n"
+ "\tpxor %xmm6, %xmm1\n"
+ "\tpxor %xmm5, %xmm0\n"
+ "\tpxor %xmm4, %xmm7\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm3, %xmm6\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm3, %xmm0\n"
+ "\tpxor %xmm2, %xmm7\n"
+ "\tpxor %xmm2, %xmm4\n"
+ "\tpxor %xmm1, %xmm7\n"
+ "\tpxor %xmm1, %xmm6\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm0, %xmm7\n"
+ "\tpxor %xmm0, %xmm6\n"
+ "\tpxor %xmm0, %xmm5\n"
+ "\tpxor %xmm0, %xmm3\n"
+ "\tpxor %xmm0, %xmm2\n"
+ "\tpxor %xmm2, %xmm0\n"
+ );
+}
+
+static void gf8mul_11101101(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm2\n"
+ "\tpxor %xmm7, %xmm0\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm6, %xmm2\n"
+ "\tpxor %xmm6, %xmm0\n"
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm5, %xmm1\n"
+ "\tpxor %xmm5, %xmm0\n"
+ "\tpxor %xmm4, %xmm7\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm4, %xmm0\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm3, %xmm7\n"
+ "\tpxor %xmm3, %xmm5\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm2, %xmm6\n"
+ "\tpxor %xmm2, %xmm4\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm1, %xmm3\n"
+ "\tpxor %xmm1, %xmm0\n"
+ "\tpxor %xmm0, %xmm7\n"
+ "\tpxor %xmm0, %xmm6\n"
+ "\tpxor %xmm0, %xmm5\n"
+ "\tpxor %xmm0, %xmm3\n"
+ "\tpxor %xmm0, %xmm2\n"
+ );
+}
+
+static void gf8mul_11101110(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm6, %xmm1\n"
+ "\tpxor %xmm6, %xmm0\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm5, %xmm1\n"
+ "\tpxor %xmm5, %xmm0\n"
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm4, %xmm0\n"
+ "\tpxor %xmm3, %xmm7\n"
+ "\tpxor %xmm3, %xmm5\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm2, %xmm7\n"
+ "\tpxor %xmm2, %xmm5\n"
+ "\tpxor %xmm2, %xmm4\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm2, %xmm0\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm1, %xmm3\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm1, %xmm0\n"
+ "\tpxor %xmm0, %xmm7\n"
+ "\tpxor %xmm0, %xmm6\n"
+ "\tpxor %xmm0, %xmm5\n"
+ "\tpxor %xmm0, %xmm3\n"
+ "\tpxor %xmm0, %xmm2\n"
+ "\tpxor %xmm0, %xmm1\n"
+ "\tpxor %xmm1, %xmm0\n"
+ );
+}
+
+static void gf8mul_11101111(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm1\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm6, %xmm2\n"
+ "\tpxor %xmm6, %xmm0\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm5, %xmm1\n"
+ "\tpxor %xmm5, %xmm0\n"
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm4, %xmm7\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm3, %xmm7\n"
+ "\tpxor %xmm3, %xmm5\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm2, %xmm6\n"
+ "\tpxor %xmm2, %xmm4\n"
+ "\tpxor %xmm2, %xmm3\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm1, %xmm3\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm1, %xmm0\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm0, %xmm7\n"
+ "\tpxor %xmm0, %xmm6\n"
+ "\tpxor %xmm0, %xmm5\n"
+ "\tpxor %xmm0, %xmm3\n"
+ "\tpxor %xmm0, %xmm2\n"
+ "\tpxor %xmm0, %xmm1\n"
+ );
+}
+
+static void gf8mul_11110000(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm2\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm6, %xmm1\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm5, %xmm0\n"
+ "\tpxor %xmm4, %xmm6\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm4, %xmm0\n"
+ "\tpxor %xmm3, %xmm5\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm3, %xmm0\n"
+ "\tpxor %xmm2, %xmm4\n"
+ "\tpxor %xmm2, %xmm0\n"
+ "\tpxor %xmm1, %xmm7\n"
+ "\tpxor %xmm1, %xmm6\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm1, %xmm4\n"
+ "\tpxor %xmm1, %xmm3\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm0, %xmm7\n"
+ "\tpxor %xmm0, %xmm6\n"
+ "\tpxor %xmm0, %xmm5\n"
+ "\tpxor %xmm0, %xmm4\n"
+ "\tpxor %xmm4, %xmm0\n"
+ );
+}
+
+static void gf8mul_11110001(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm7, %xmm2\n"
+ "\tpxor %xmm7, %xmm1\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm6, %xmm2\n"
+ "\tpxor %xmm6, %xmm1\n"
+ "\tpxor %xmm6, %xmm0\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm5, %xmm6\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm4, %xmm6\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm3, %xmm5\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm2, %xmm4\n"
+ "\tpxor %xmm2, %xmm3\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm1, %xmm3\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm1, %xmm0\n"
+ "\tpxor %xmm0, %xmm7\n"
+ "\tpxor %xmm0, %xmm6\n"
+ "\tpxor %xmm0, %xmm5\n"
+ "\tpxor %xmm0, %xmm4\n"
+ );
+}
+
+static void gf8mul_11110010(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm7, %xmm2\n"
+ "\tpxor %xmm6, %xmm2\n"
+ "\tpxor %xmm6, %xmm1\n"
+ "\tpxor %xmm5, %xmm1\n"
+ "\tpxor %xmm5, %xmm0\n"
+ "\tpxor %xmm4, %xmm7\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm4, %xmm0\n"
+ "\tpxor %xmm3, %xmm7\n"
+ "\tpxor %xmm3, %xmm6\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm3, %xmm0\n"
+ "\tpxor %xmm2, %xmm7\n"
+ "\tpxor %xmm2, %xmm6\n"
+ "\tpxor %xmm2, %xmm5\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm2, %xmm0\n"
+ "\tpxor %xmm1, %xmm3\n"
+ "\tpxor %xmm1, %xmm0\n"
+ "\tpxor %xmm0, %xmm7\n"
+ "\tpxor %xmm0, %xmm6\n"
+ "\tpxor %xmm0, %xmm5\n"
+ "\tpxor %xmm0, %xmm4\n"
+ "\tpxor %xmm0, %xmm1\n"
+ "\tpxor %xmm1, %xmm0\n"
+ );
+}
+
+static void gf8mul_11110011(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm1, %xmm0\n"
+ "\tpxor %xmm0, %xmm7\n"
+ "\tpxor %xmm0, %xmm6\n"
+ "\tpxor %xmm0, %xmm5\n"
+ "\tpxor %xmm0, %xmm4\n"
+ "\tpxor %xmm0, %xmm2\n"
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm2\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm6, %xmm1\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm4, %xmm6\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm3, %xmm5\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm2, %xmm4\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm1, %xmm3\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm0, %xmm2\n"
+ "\tpxor %xmm2, %xmm0\n"
+ );
+}
+
+static void gf8mul_11110100(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm7, %xmm2\n"
+ "\tpxor %xmm7, %xmm0\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm6, %xmm2\n"
+ "\tpxor %xmm6, %xmm0\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm5, %xmm6\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm5, %xmm0\n"
+ "\tpxor %xmm4, %xmm7\n"
+ "\tpxor %xmm4, %xmm6\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm4, %xmm0\n"
+ "\tpxor %xmm3, %xmm7\n"
+ "\tpxor %xmm3, %xmm6\n"
+ "\tpxor %xmm3, %xmm5\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm3, %xmm0\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm2, %xmm7\n"
+ "\tpxor %xmm2, %xmm6\n"
+ "\tpxor %xmm2, %xmm5\n"
+ "\tpxor %xmm2, %xmm4\n"
+ "\tpxor %xmm1, %xmm7\n"
+ "\tpxor %xmm1, %xmm6\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm1, %xmm4\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm0, %xmm7\n"
+ "\tpxor %xmm0, %xmm6\n"
+ "\tpxor %xmm0, %xmm5\n"
+ "\tpxor %xmm0, %xmm4\n"
+ "\tpxor %xmm0, %xmm2\n"
+ "\tpxor %xmm2, %xmm0\n"
+ );
+}
+
+static void gf8mul_11110101(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm1, %xmm0\n"
+ "\tpxor %xmm0, %xmm7\n"
+ "\tpxor %xmm0, %xmm6\n"
+ "\tpxor %xmm0, %xmm5\n"
+ "\tpxor %xmm0, %xmm4\n"
+ "\tpxor %xmm0, %xmm2\n"
+ );
+}
+
+static void gf8mul_11110110(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm0\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm6, %xmm2\n"
+ "\tpxor %xmm6, %xmm1\n"
+ "\tpxor %xmm6, %xmm0\n"
+ "\tpxor %xmm5, %xmm6\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm4, %xmm7\n"
+ "\tpxor %xmm4, %xmm6\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm4, %xmm0\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm2, %xmm7\n"
+ "\tpxor %xmm2, %xmm6\n"
+ "\tpxor %xmm2, %xmm5\n"
+ "\tpxor %xmm2, %xmm4\n"
+ "\tpxor %xmm2, %xmm3\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm2, %xmm0\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm1, %xmm0\n"
+ "\tpxor %xmm0, %xmm7\n"
+ "\tpxor %xmm0, %xmm6\n"
+ "\tpxor %xmm0, %xmm5\n"
+ "\tpxor %xmm0, %xmm4\n"
+ "\tpxor %xmm0, %xmm2\n"
+ "\tpxor %xmm0, %xmm1\n"
+ "\tpxor %xmm1, %xmm0\n"
+ );
+}
+
+static void gf8mul_11110111(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm2, %xmm0\n"
+ "\tpxor %xmm1, %xmm7\n"
+ "\tpxor %xmm1, %xmm6\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm1, %xmm4\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm1, %xmm0\n"
+ "\tpxor %xmm0, %xmm7\n"
+ "\tpxor %xmm0, %xmm5\n"
+ "\tpxor %xmm0, %xmm2\n"
+ "\tpxor %xmm0, %xmm1\n"
+ "\tpxor %xmm1, %xmm0\n"
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm7, %xmm1\n"
+ "\tpxor %xmm7, %xmm0\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm5, %xmm6\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm2, %xmm3\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm0, %xmm1\n"
+ );
+}
+
+static void gf8mul_11111000(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm7, %xmm1\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm6, %xmm2\n"
+ "\tpxor %xmm6, %xmm0\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm5, %xmm1\n"
+ "\tpxor %xmm4, %xmm6\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm4, %xmm0\n"
+ "\tpxor %xmm3, %xmm7\n"
+ "\tpxor %xmm3, %xmm5\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm2, %xmm5\n"
+ "\tpxor %xmm2, %xmm4\n"
+ "\tpxor %xmm2, %xmm3\n"
+ "\tpxor %xmm2, %xmm0\n"
+ "\tpxor %xmm1, %xmm7\n"
+ "\tpxor %xmm1, %xmm6\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm1, %xmm3\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm0, %xmm7\n"
+ "\tpxor %xmm0, %xmm6\n"
+ "\tpxor %xmm0, %xmm5\n"
+ "\tpxor %xmm0, %xmm4\n"
+ "\tpxor %xmm0, %xmm3\n"
+ "\tpxor %xmm3, %xmm0\n"
+ );
+}
+
+static void gf8mul_11111001(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm7, %xmm1\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm2\n"
+ "\tpxor %xmm6, %xmm1\n"
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm5, %xmm6\n"
+ "\tpxor %xmm5, %xmm1\n"
+ "\tpxor %xmm5, %xmm0\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm4, %xmm7\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm3, %xmm6\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm2, %xmm5\n"
+ "\tpxor %xmm2, %xmm3\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm1, %xmm4\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm1, %xmm0\n"
+ "\tpxor %xmm0, %xmm7\n"
+ "\tpxor %xmm0, %xmm6\n"
+ "\tpxor %xmm0, %xmm5\n"
+ "\tpxor %xmm0, %xmm4\n"
+ "\tpxor %xmm0, %xmm3\n"
+ );
+}
+
+static void gf8mul_11111010(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm7, %xmm0\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm6, %xmm0\n"
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm5, %xmm0\n"
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm4, %xmm7\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm3, %xmm7\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm3, %xmm0\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm2, %xmm7\n"
+ "\tpxor %xmm2, %xmm6\n"
+ "\tpxor %xmm2, %xmm4\n"
+ "\tpxor %xmm2, %xmm3\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm2, %xmm0\n"
+ "\tpxor %xmm1, %xmm4\n"
+ "\tpxor %xmm1, %xmm0\n"
+ "\tpxor %xmm0, %xmm7\n"
+ "\tpxor %xmm0, %xmm6\n"
+ "\tpxor %xmm0, %xmm5\n"
+ "\tpxor %xmm0, %xmm4\n"
+ "\tpxor %xmm0, %xmm3\n"
+ "\tpxor %xmm0, %xmm1\n"
+ "\tpxor %xmm1, %xmm0\n"
+ );
+}
+
+static void gf8mul_11111011(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm7, %xmm0\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm6, %xmm1\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm5, %xmm1\n"
+ "\tpxor %xmm4, %xmm7\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm3, %xmm6\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm2, %xmm5\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm5, %xmm2\n"
+ "\tpxor %xmm1, %xmm4\n"
+ "\tpxor %xmm1, %xmm0\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm0, %xmm7\n"
+ "\tpxor %xmm0, %xmm6\n"
+ "\tpxor %xmm0, %xmm5\n"
+ "\tpxor %xmm0, %xmm4\n"
+ "\tpxor %xmm0, %xmm3\n"
+ "\tpxor %xmm0, %xmm1\n"
+ );
+}
+
+static void gf8mul_11111100(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm2\n"
+ "\tpxor %xmm7, %xmm1\n"
+ "\tpxor %xmm7, %xmm0\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm6, %xmm0\n"
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm5, %xmm1\n"
+ "\tpxor %xmm4, %xmm6\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm4, %xmm0\n"
+ "\tpxor %xmm3, %xmm7\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm3, %xmm0\n"
+ "\tpxor %xmm2, %xmm7\n"
+ "\tpxor %xmm2, %xmm6\n"
+ "\tpxor %xmm2, %xmm4\n"
+ "\tpxor %xmm1, %xmm7\n"
+ "\tpxor %xmm1, %xmm6\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm0, %xmm7\n"
+ "\tpxor %xmm0, %xmm6\n"
+ "\tpxor %xmm0, %xmm5\n"
+ "\tpxor %xmm0, %xmm4\n"
+ "\tpxor %xmm0, %xmm3\n"
+ "\tpxor %xmm0, %xmm2\n"
+ "\tpxor %xmm2, %xmm0\n"
+ );
+}
+
+static void gf8mul_11111101(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm2\n"
+ "\tpxor %xmm7, %xmm0\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm6, %xmm1\n"
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm5, %xmm6\n"
+ "\tpxor %xmm5, %xmm4\n"
+ "\tpxor %xmm5, %xmm0\n"
+ "\tpxor %xmm6, %xmm5\n"
+ "\tpxor %xmm4, %xmm7\n"
+ "\tpxor %xmm4, %xmm6\n"
+ "\tpxor %xmm4, %xmm3\n"
+ "\tpxor %xmm3, %xmm6\n"
+ "\tpxor %xmm3, %xmm5\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm2, %xmm5\n"
+ "\tpxor %xmm2, %xmm4\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm1, %xmm4\n"
+ "\tpxor %xmm1, %xmm3\n"
+ "\tpxor %xmm1, %xmm0\n"
+ "\tpxor %xmm0, %xmm7\n"
+ "\tpxor %xmm0, %xmm6\n"
+ "\tpxor %xmm0, %xmm5\n"
+ "\tpxor %xmm0, %xmm4\n"
+ "\tpxor %xmm0, %xmm3\n"
+ "\tpxor %xmm0, %xmm2\n"
+ );
+}
+
+static void gf8mul_11111110(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm6\n"
+ "\tpxor %xmm7, %xmm5\n"
+ "\tpxor %xmm7, %xmm4\n"
+ "\tpxor %xmm7, %xmm0\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm6, %xmm0\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm5, %xmm6\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm5, %xmm1\n"
+ "\tpxor %xmm4, %xmm6\n"
+ "\tpxor %xmm4, %xmm1\n"
+ "\tpxor %xmm4, %xmm0\n"
+ "\tpxor %xmm6, %xmm4\n"
+ "\tpxor %xmm3, %xmm6\n"
+ "\tpxor %xmm3, %xmm5\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm3, %xmm2\n"
+ "\tpxor %xmm2, %xmm7\n"
+ "\tpxor %xmm2, %xmm6\n"
+ "\tpxor %xmm2, %xmm1\n"
+ "\tpxor %xmm2, %xmm0\n"
+ "\tpxor %xmm6, %xmm2\n"
+ "\tpxor %xmm1, %xmm4\n"
+ "\tpxor %xmm1, %xmm3\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm1, %xmm0\n"
+ "\tpxor %xmm0, %xmm7\n"
+ "\tpxor %xmm0, %xmm6\n"
+ "\tpxor %xmm0, %xmm5\n"
+ "\tpxor %xmm0, %xmm4\n"
+ "\tpxor %xmm0, %xmm3\n"
+ "\tpxor %xmm0, %xmm2\n"
+ "\tpxor %xmm0, %xmm1\n"
+ "\tpxor %xmm1, %xmm0\n"
+ );
+}
+
+static void gf8mul_11111111(void)
+{
+ __asm__ __volatile__
+ (
+ "\tpxor %xmm7, %xmm0\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm6, %xmm3\n"
+ "\tpxor %xmm6, %xmm2\n"
+ "\tpxor %xmm6, %xmm1\n"
+ "\tpxor %xmm6, %xmm0\n"
+ "\tpxor %xmm5, %xmm7\n"
+ "\tpxor %xmm5, %xmm6\n"
+ "\tpxor %xmm5, %xmm3\n"
+ "\tpxor %xmm4, %xmm6\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm4, %xmm2\n"
+ "\tpxor %xmm3, %xmm5\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm3, %xmm1\n"
+ "\tpxor %xmm2, %xmm4\n"
+ "\tpxor %xmm2, %xmm3\n"
+ "\tpxor %xmm2, %xmm0\n"
+ "\tpxor %xmm1, %xmm7\n"
+ "\tpxor %xmm1, %xmm5\n"
+ "\tpxor %xmm1, %xmm3\n"
+ "\tpxor %xmm0, %xmm6\n"
+ "\tpxor %xmm0, %xmm4\n"
+ "\tpxor %xmm0, %xmm2\n"
+ "\tpxor %xmm7, %xmm3\n"
+ "\tpxor %xmm7, %xmm1\n"
+ "\tpxor %xmm7, %xmm0\n"
+ "\tpxor %xmm6, %xmm7\n"
+ "\tpxor %xmm5, %xmm6\n"
+ "\tpxor %xmm4, %xmm5\n"
+ "\tpxor %xmm3, %xmm4\n"
+ "\tpxor %xmm2, %xmm3\n"
+ "\tpxor %xmm1, %xmm2\n"
+ "\tpxor %xmm0, %xmm1\n"
+ );
+}
+
+void (* ec_gf_mul_table[256])(void) =
+{
+ gf8mul_00000000,
+ gf8mul_00000001,
+ gf8mul_00000010,
+ gf8mul_00000011,
+ gf8mul_00000100,
+ gf8mul_00000101,
+ gf8mul_00000110,
+ gf8mul_00000111,
+ gf8mul_00001000,
+ gf8mul_00001001,
+ gf8mul_00001010,
+ gf8mul_00001011,
+ gf8mul_00001100,
+ gf8mul_00001101,
+ gf8mul_00001110,
+ gf8mul_00001111,
+ gf8mul_00010000,
+ gf8mul_00010001,
+ gf8mul_00010010,
+ gf8mul_00010011,
+ gf8mul_00010100,
+ gf8mul_00010101,
+ gf8mul_00010110,
+ gf8mul_00010111,
+ gf8mul_00011000,
+ gf8mul_00011001,
+ gf8mul_00011010,
+ gf8mul_00011011,
+ gf8mul_00011100,
+ gf8mul_00011101,
+ gf8mul_00011110,
+ gf8mul_00011111,
+ gf8mul_00100000,
+ gf8mul_00100001,
+ gf8mul_00100010,
+ gf8mul_00100011,
+ gf8mul_00100100,
+ gf8mul_00100101,
+ gf8mul_00100110,
+ gf8mul_00100111,
+ gf8mul_00101000,
+ gf8mul_00101001,
+ gf8mul_00101010,
+ gf8mul_00101011,
+ gf8mul_00101100,
+ gf8mul_00101101,
+ gf8mul_00101110,
+ gf8mul_00101111,
+ gf8mul_00110000,
+ gf8mul_00110001,
+ gf8mul_00110010,
+ gf8mul_00110011,
+ gf8mul_00110100,
+ gf8mul_00110101,
+ gf8mul_00110110,
+ gf8mul_00110111,
+ gf8mul_00111000,
+ gf8mul_00111001,
+ gf8mul_00111010,
+ gf8mul_00111011,
+ gf8mul_00111100,
+ gf8mul_00111101,
+ gf8mul_00111110,
+ gf8mul_00111111,
+ gf8mul_01000000,
+ gf8mul_01000001,
+ gf8mul_01000010,
+ gf8mul_01000011,
+ gf8mul_01000100,
+ gf8mul_01000101,
+ gf8mul_01000110,
+ gf8mul_01000111,
+ gf8mul_01001000,
+ gf8mul_01001001,
+ gf8mul_01001010,
+ gf8mul_01001011,
+ gf8mul_01001100,
+ gf8mul_01001101,
+ gf8mul_01001110,
+ gf8mul_01001111,
+ gf8mul_01010000,
+ gf8mul_01010001,
+ gf8mul_01010010,
+ gf8mul_01010011,
+ gf8mul_01010100,
+ gf8mul_01010101,
+ gf8mul_01010110,
+ gf8mul_01010111,
+ gf8mul_01011000,
+ gf8mul_01011001,
+ gf8mul_01011010,
+ gf8mul_01011011,
+ gf8mul_01011100,
+ gf8mul_01011101,
+ gf8mul_01011110,
+ gf8mul_01011111,
+ gf8mul_01100000,
+ gf8mul_01100001,
+ gf8mul_01100010,
+ gf8mul_01100011,
+ gf8mul_01100100,
+ gf8mul_01100101,
+ gf8mul_01100110,
+ gf8mul_01100111,
+ gf8mul_01101000,
+ gf8mul_01101001,
+ gf8mul_01101010,
+ gf8mul_01101011,
+ gf8mul_01101100,
+ gf8mul_01101101,
+ gf8mul_01101110,
+ gf8mul_01101111,
+ gf8mul_01110000,
+ gf8mul_01110001,
+ gf8mul_01110010,
+ gf8mul_01110011,
+ gf8mul_01110100,
+ gf8mul_01110101,
+ gf8mul_01110110,
+ gf8mul_01110111,
+ gf8mul_01111000,
+ gf8mul_01111001,
+ gf8mul_01111010,
+ gf8mul_01111011,
+ gf8mul_01111100,
+ gf8mul_01111101,
+ gf8mul_01111110,
+ gf8mul_01111111,
+ gf8mul_10000000,
+ gf8mul_10000001,
+ gf8mul_10000010,
+ gf8mul_10000011,
+ gf8mul_10000100,
+ gf8mul_10000101,
+ gf8mul_10000110,
+ gf8mul_10000111,
+ gf8mul_10001000,
+ gf8mul_10001001,
+ gf8mul_10001010,
+ gf8mul_10001011,
+ gf8mul_10001100,
+ gf8mul_10001101,
+ gf8mul_10001110,
+ gf8mul_10001111,
+ gf8mul_10010000,
+ gf8mul_10010001,
+ gf8mul_10010010,
+ gf8mul_10010011,
+ gf8mul_10010100,
+ gf8mul_10010101,
+ gf8mul_10010110,
+ gf8mul_10010111,
+ gf8mul_10011000,
+ gf8mul_10011001,
+ gf8mul_10011010,
+ gf8mul_10011011,
+ gf8mul_10011100,
+ gf8mul_10011101,
+ gf8mul_10011110,
+ gf8mul_10011111,
+ gf8mul_10100000,
+ gf8mul_10100001,
+ gf8mul_10100010,
+ gf8mul_10100011,
+ gf8mul_10100100,
+ gf8mul_10100101,
+ gf8mul_10100110,
+ gf8mul_10100111,
+ gf8mul_10101000,
+ gf8mul_10101001,
+ gf8mul_10101010,
+ gf8mul_10101011,
+ gf8mul_10101100,
+ gf8mul_10101101,
+ gf8mul_10101110,
+ gf8mul_10101111,
+ gf8mul_10110000,
+ gf8mul_10110001,
+ gf8mul_10110010,
+ gf8mul_10110011,
+ gf8mul_10110100,
+ gf8mul_10110101,
+ gf8mul_10110110,
+ gf8mul_10110111,
+ gf8mul_10111000,
+ gf8mul_10111001,
+ gf8mul_10111010,
+ gf8mul_10111011,
+ gf8mul_10111100,
+ gf8mul_10111101,
+ gf8mul_10111110,
+ gf8mul_10111111,
+ gf8mul_11000000,
+ gf8mul_11000001,
+ gf8mul_11000010,
+ gf8mul_11000011,
+ gf8mul_11000100,
+ gf8mul_11000101,
+ gf8mul_11000110,
+ gf8mul_11000111,
+ gf8mul_11001000,
+ gf8mul_11001001,
+ gf8mul_11001010,
+ gf8mul_11001011,
+ gf8mul_11001100,
+ gf8mul_11001101,
+ gf8mul_11001110,
+ gf8mul_11001111,
+ gf8mul_11010000,
+ gf8mul_11010001,
+ gf8mul_11010010,
+ gf8mul_11010011,
+ gf8mul_11010100,
+ gf8mul_11010101,
+ gf8mul_11010110,
+ gf8mul_11010111,
+ gf8mul_11011000,
+ gf8mul_11011001,
+ gf8mul_11011010,
+ gf8mul_11011011,
+ gf8mul_11011100,
+ gf8mul_11011101,
+ gf8mul_11011110,
+ gf8mul_11011111,
+ gf8mul_11100000,
+ gf8mul_11100001,
+ gf8mul_11100010,
+ gf8mul_11100011,
+ gf8mul_11100100,
+ gf8mul_11100101,
+ gf8mul_11100110,
+ gf8mul_11100111,
+ gf8mul_11101000,
+ gf8mul_11101001,
+ gf8mul_11101010,
+ gf8mul_11101011,
+ gf8mul_11101100,
+ gf8mul_11101101,
+ gf8mul_11101110,
+ gf8mul_11101111,
+ gf8mul_11110000,
+ gf8mul_11110001,
+ gf8mul_11110010,
+ gf8mul_11110011,
+ gf8mul_11110100,
+ gf8mul_11110101,
+ gf8mul_11110110,
+ gf8mul_11110111,
+ gf8mul_11111000,
+ gf8mul_11111001,
+ gf8mul_11111010,
+ gf8mul_11111011,
+ gf8mul_11111100,
+ gf8mul_11111101,
+ gf8mul_11111110,
+ gf8mul_11111111
+};
diff --git a/xlators/cluster/ec/src/ec-gf.h b/xlators/cluster/ec/src/ec-gf.h
new file mode 100644
index 00000000000..664feb46ce5
--- /dev/null
+++ b/xlators/cluster/ec/src/ec-gf.h
@@ -0,0 +1,114 @@
+/*
+ Copyright (c) 2012 DataLab, s.l. <http://www.datalab.es>
+
+ This file is part of the cluster/ec translator for GlusterFS.
+
+ The cluster/ec translator for GlusterFS is free software: you can
+ redistribute it and/or modify it under the terms of the GNU General
+ Public License as published by the Free Software Foundation, either
+ version 3 of the License, or (at your option) any later version.
+
+ The cluster/ec translator for GlusterFS is distributed in the hope
+ that it will be useful, but WITHOUT ANY WARRANTY; without even the
+ implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+ PURPOSE. See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with the cluster/ec translator for GlusterFS. If not, see
+ <http://www.gnu.org/licenses/>.
+*/
+
+/*
+ * File automatically generated on Thu Jan 26 12:08:19 2012
+ *
+ * DO NOT MODIFY
+ *
+ * Multiplications in a GF(2^8) with modulus 0x11D using XOR's
+ *
+ */
+
+#ifndef __EC_GF_H__
+#define __EC_GF_H__
+
+#define EC_GF_BITS 8
+#define EC_GF_MOD 0x11D
+
+#define ec_gf_load(addr) \
+ do \
+ { \
+ __asm__ __volatile__ \
+ ( \
+ "\tmovdqa 0*16(%0), %%xmm0\n" \
+ "\tmovdqa 1*16(%0), %%xmm1\n" \
+ "\tmovdqa 2*16(%0), %%xmm2\n" \
+ "\tmovdqa 3*16(%0), %%xmm3\n" \
+ "\tmovdqa 4*16(%0), %%xmm4\n" \
+ "\tmovdqa 5*16(%0), %%xmm5\n" \
+ "\tmovdqa 6*16(%0), %%xmm6\n" \
+ "\tmovdqa 7*16(%0), %%xmm7\n" \
+ : \
+ : "r" (addr) \
+ : "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7" \
+ ); \
+ } while (0)
+
+#define ec_gf_store(addr) \
+ do \
+ { \
+ __asm__ __volatile__ \
+ ( \
+ "\tmovdqa %%xmm0, 0*16(%0)\n" \
+ "\tmovdqa %%xmm1, 1*16(%0)\n" \
+ "\tmovdqa %%xmm2, 2*16(%0)\n" \
+ "\tmovdqa %%xmm3, 3*16(%0)\n" \
+ "\tmovdqa %%xmm4, 4*16(%0)\n" \
+ "\tmovdqa %%xmm5, 5*16(%0)\n" \
+ "\tmovdqa %%xmm6, 6*16(%0)\n" \
+ "\tmovdqa %%xmm7, 7*16(%0)\n" \
+ : \
+ : "r" (addr) \
+ : "memory" \
+ ); \
+ } while (0)
+
+#define ec_gf_clear() \
+ do \
+ { \
+ __asm__ __volatile__ \
+ ( \
+ "\tpxor %xmm0, %xmm0\n" \
+ "\tpxor %xmm1, %xmm1\n" \
+ "\tpxor %xmm2, %xmm2\n" \
+ "\tpxor %xmm3, %xmm3\n" \
+ "\tpxor %xmm4, %xmm4\n" \
+ "\tpxor %xmm5, %xmm5\n" \
+ "\tpxor %xmm6, %xmm6\n" \
+ "\tpxor %xmm7, %xmm7\n" \
+ : \
+ : \
+ : "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7" \
+ ); \
+ } while (0)
+
+#define ec_gf_xor(addr) \
+ do \
+ { \
+ __asm__ __volatile__ \
+ ( \
+ "\tpxor 0*16(%0), %%xmm0\n" \
+ "\tpxor 1*16(%0), %%xmm1\n" \
+ "\tpxor 2*16(%0), %%xmm2\n" \
+ "\tpxor 3*16(%0), %%xmm3\n" \
+ "\tpxor 4*16(%0), %%xmm4\n" \
+ "\tpxor 5*16(%0), %%xmm5\n" \
+ "\tpxor 6*16(%0), %%xmm6\n" \
+ "\tpxor 7*16(%0), %%xmm7\n" \
+ : \
+ : "r" (addr) \
+ : "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7" \
+ ); \
+ } while (0)
+
+extern void (* ec_gf_mul_table[])(void);
+
+#endif /* __EC_GF_H__ */
diff --git a/xlators/cluster/ec/src/ec-heal.c b/xlators/cluster/ec/src/ec-heal.c
new file mode 100644
index 00000000000..37264f598b9
--- /dev/null
+++ b/xlators/cluster/ec/src/ec-heal.c
@@ -0,0 +1,1470 @@
+/*
+ Copyright (c) 2012 DataLab, s.l. <http://www.datalab.es>
+
+ This file is part of the cluster/ec translator for GlusterFS.
+
+ The cluster/ec translator for GlusterFS is free software: you can
+ redistribute it and/or modify it under the terms of the GNU General
+ Public License as published by the Free Software Foundation, either
+ version 3 of the License, or (at your option) any later version.
+
+ The cluster/ec translator for GlusterFS is distributed in the hope
+ that it will be useful, but WITHOUT ANY WARRANTY; without even the
+ implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+ PURPOSE. See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with the cluster/ec translator for GlusterFS. If not, see
+ <http://www.gnu.org/licenses/>.
+*/
+
+#include "xlator.h"
+#include "defaults.h"
+
+#include "ec-helpers.h"
+#include "ec-common.h"
+#include "ec-combine.h"
+#include "ec-method.h"
+#include "ec-fops.h"
+
+#include "ec-mem-types.h"
+#include "ec-data.h"
+
+/* FOP: heal */
+
+void ec_heal_exclude(ec_heal_t * heal, uintptr_t mask)
+{
+ LOCK(&heal->lock);
+
+ heal->bad &= ~mask;
+
+ UNLOCK(&heal->lock);
+}
+
+void ec_heal_lookup_resume(ec_fop_data_t * fop)
+{
+ ec_heal_t * heal = fop->data;
+ ec_cbk_data_t * cbk;
+ uintptr_t good = 0, bad = 0;
+
+ if (heal->lookup != NULL)
+ {
+ ec_fop_data_release(heal->lookup);
+ }
+ ec_fop_data_acquire(fop);
+
+ list_for_each_entry(cbk, &fop->cbk_list, list)
+ {
+ if ((cbk->op_ret < 0) && (cbk->op_errno == ENOTCONN))
+ {
+ continue;
+ }
+
+ if (cbk == fop->answer)
+ {
+ if (cbk->op_ret >= 0)
+ {
+ heal->iatt = cbk->iatt[0];
+ heal->version = cbk->version;
+ heal->raw_size = cbk->size;
+ heal->fop->pre_size = cbk->iatt[0].ia_size;
+ heal->fop->post_size = cbk->iatt[0].ia_size;
+
+ if (!ec_loc_prepare(heal->xl, &heal->loc, cbk->inode,
+ &cbk->iatt[0]))
+ {
+ fop->answer = NULL;
+ fop->error = EIO;
+
+ bad |= cbk->mask;
+
+ continue;
+ }
+ }
+
+ good |= cbk->mask;
+ }
+ else
+ {
+ bad |= cbk->mask;
+ }
+ }
+
+ heal->good = good;
+ heal->bad = bad;
+
+ heal->lookup = fop;
+
+ ec_resume_parent(fop, fop->answer != NULL ? 0 : fop->error);
+}
+
+int32_t ec_heal_entry_lookup_cbk(call_frame_t * frame, void * cookie,
+ xlator_t * this, int32_t op_ret,
+ int32_t op_errno, inode_t * inode,
+ struct iatt * buf, dict_t * xdata,
+ struct iatt * postparent)
+{
+ ec_heal_lookup_resume(cookie);
+
+ return 0;
+}
+
+int32_t ec_heal_inode_lookup_cbk(call_frame_t * frame, void * cookie,
+ xlator_t * this, int32_t op_ret,
+ int32_t op_errno, inode_t * inode,
+ struct iatt * buf, dict_t * xdata,
+ struct iatt * postparent)
+{
+ ec_heal_lookup_resume(cookie);
+
+ return 0;
+}
+
+uintptr_t ec_heal_check(ec_fop_data_t * fop, uintptr_t * pgood)
+{
+ ec_cbk_data_t * cbk;
+ uintptr_t mask[2] = { 0, 0 };
+
+ list_for_each_entry(cbk, &fop->cbk_list, list)
+ {
+ mask[cbk->op_ret >= 0] |= cbk->mask;
+ }
+
+ if (pgood != NULL)
+ {
+ *pgood = mask[1];
+ }
+
+ return mask[0];
+}
+
+void ec_heal_update(ec_fop_data_t * fop, int32_t is_open)
+{
+ ec_heal_t * heal = fop->data;
+ uintptr_t good, bad;
+
+ bad = ec_heal_check(fop, &good);
+
+ LOCK(&heal->lock);
+
+ heal->bad &= ~bad;
+ if (is_open)
+ {
+ heal->open |= good;
+ }
+
+ UNLOCK(&heal->lock);
+
+ fop->error = 0;
+}
+
+void ec_heal_avoid(ec_fop_data_t * fop)
+{
+ ec_heal_t * heal = fop->data;
+ uintptr_t bad;
+
+ bad = ec_heal_check(fop, NULL);
+
+ LOCK(&heal->lock);
+
+ heal->good &= ~bad;
+
+ UNLOCK(&heal->lock);
+}
+
+int32_t ec_heal_mkdir_cbk(call_frame_t * frame, void * cookie, xlator_t * this,
+ int32_t op_ret, int32_t op_errno, inode_t * inode,
+ struct iatt * buf, struct iatt * preparent,
+ struct iatt * postparent, dict_t * xdata)
+{
+ ec_heal_update(cookie, 0);
+
+ return 0;
+}
+
+int32_t ec_heal_mknod_cbk(call_frame_t * frame, void * cookie, xlator_t * this,
+ int32_t op_ret, int32_t op_errno, inode_t * inode,
+ struct iatt * buf, struct iatt * preparent,
+ struct iatt * postparent, dict_t * xdata)
+{
+ ec_heal_update(cookie, 0);
+
+ return 0;
+}
+
+int32_t ec_heal_symlink_cbk(call_frame_t * frame, void * cookie,
+ xlator_t * this, int32_t op_ret, int32_t op_errno,
+ inode_t * inode, struct iatt * buf,
+ struct iatt * preparent, struct iatt * postparent,
+ dict_t * xdata)
+{
+ ec_heal_update(cookie, 0);
+
+ return 0;
+}
+
+int32_t ec_heal_create_cbk(call_frame_t * frame, void * cookie,
+ xlator_t * this, int32_t op_ret, int32_t op_errno,
+ fd_t * fd, inode_t * inode, struct iatt * buf,
+ struct iatt * preparent, struct iatt * postparent,
+ dict_t * xdata)
+{
+ ec_heal_update(cookie, 1);
+
+ return 0;
+}
+
+int32_t ec_heal_setattr_cbk(call_frame_t * frame, void * cookie,
+ xlator_t * this, int32_t op_ret, int32_t op_errno,
+ struct iatt * preop_stbuf,
+ struct iatt * postop_stbuf,
+ dict_t * xdata)
+{
+ ec_heal_update(cookie, 0);
+
+ return 0;
+}
+
+int32_t ec_heal_setxattr_cbk(call_frame_t * frame, void * cookie,
+ xlator_t * this, int32_t op_ret, int32_t op_errno,
+ dict_t * xdata)
+{
+ ec_heal_update(cookie, 0);
+
+ return 0;
+}
+
+int32_t ec_heal_removexattr_cbk(call_frame_t * frame, void * cookie,
+ xlator_t * this, int32_t op_ret,
+ int32_t op_errno, dict_t * xdata)
+{
+ ec_heal_update(cookie, 0);
+
+ return 0;
+}
+
+int32_t ec_heal_link_cbk(call_frame_t * frame, void * cookie, xlator_t * this,
+ int32_t op_ret, int32_t op_errno, inode_t * inode,
+ struct iatt * buf, struct iatt * preparent,
+ struct iatt * postparent, dict_t * xdata)
+{
+ ec_fop_data_t * fop = cookie;
+ ec_heal_t * heal = fop->data;
+ uintptr_t good, bad;
+
+ bad = ec_heal_check(fop, &good);
+ ec_heal_exclude(heal, good);
+
+ if (bad != 0)
+ {
+ fop->error = 0;
+
+ xdata = fop->xdata;
+ fop = fop->parent;
+
+ ec_create(fop->frame, fop->xl, bad, EC_MINIMUM_ONE,
+ ec_heal_create_cbk, heal, &heal->loc, 0,
+ st_mode_from_ia(heal->iatt.ia_prot, IA_INVAL),
+ 0, heal->fd, xdata);
+ }
+
+ return 0;
+}
+
+int32_t ec_heal_target_open_cbk(call_frame_t * frame, void * cookie,
+ xlator_t * this, int32_t op_ret,
+ int32_t op_errno, fd_t * fd, dict_t * xdata)
+{
+ ec_heal_update(cookie, 1);
+
+ return 0;
+}
+
+int32_t ec_heal_source_open_cbk(call_frame_t * frame, void * cookie,
+ xlator_t * this, int32_t op_ret,
+ int32_t op_errno, fd_t * fd, dict_t * xdata)
+{
+ ec_heal_avoid(cookie);
+
+ return 0;
+}
+
+int32_t ec_heal_reopen_cbk(call_frame_t * frame, void * cookie,
+ xlator_t * this, int32_t op_ret, int32_t op_errno,
+ fd_t * fd, dict_t * xdata)
+{
+ ec_fop_data_t * fop = cookie;
+ ec_fd_t * ctx;
+ uintptr_t good;
+
+ ec_heal_check(fop, &good);
+
+ if (good != 0)
+ {
+ LOCK(&fd->lock);
+
+ ctx = __ec_fd_get(fd, fop->xl);
+ if ((ctx != NULL) && (ctx->loc.inode != NULL))
+ {
+ ctx->bad &= ~good;
+ ctx->open |= good;
+ }
+
+ UNLOCK(&fd->lock);
+ }
+
+ return 0;
+}
+
+int32_t ec_heal_create(ec_heal_t * heal, uintptr_t mask, int32_t try_link)
+{
+ loc_t loc;
+ dict_t * xdata;
+
+ xdata = dict_new();
+ if (xdata == NULL)
+ {
+ return ENOMEM;
+ }
+
+ if (dict_set_static_bin(xdata, "gfid-req", heal->iatt.ia_gfid,
+ sizeof(uuid_t)) != 0)
+ {
+ dict_unref(xdata);
+
+ return ENOMEM;
+ }
+
+ if ((heal->iatt.ia_type == IA_IFREG) && try_link)
+ {
+ memset(&loc, 0, sizeof(loc));
+ loc.inode = heal->loc.inode;
+ uuid_copy(loc.gfid, heal->iatt.ia_gfid);
+
+ ec_link(heal->fop->frame, heal->xl, mask, EC_MINIMUM_ONE,
+ ec_heal_link_cbk, heal, &loc, &heal->loc, xdata);
+
+ dict_unref(xdata);
+
+ return 0;
+ }
+
+ switch (heal->iatt.ia_type)
+ {
+ case IA_IFDIR:
+ ec_mkdir(heal->fop->frame, heal->xl, mask, EC_MINIMUM_ONE,
+ ec_heal_mkdir_cbk, heal, &heal->loc,
+ st_mode_from_ia(heal->iatt.ia_prot, IA_INVAL),
+ 0, xdata);
+
+ break;
+
+ case IA_IFLNK:
+ ec_symlink(heal->fop->frame, heal->xl, mask, EC_MINIMUM_ONE,
+ ec_heal_symlink_cbk, heal, heal->symlink, &heal->loc,
+ 0, xdata);
+
+ break;
+
+ case IA_IFREG:
+ ec_create(heal->fop->frame, heal->xl, mask, EC_MINIMUM_ONE,
+ ec_heal_create_cbk, heal, &heal->loc, 0,
+ st_mode_from_ia(heal->iatt.ia_prot, IA_INVAL),
+ 0, heal->fd, xdata);
+
+ break;
+
+ default:
+ ec_mknod(heal->fop->frame, heal->xl, mask, EC_MINIMUM_ONE,
+ ec_heal_mknod_cbk, heal, &heal->loc,
+ st_mode_from_ia(heal->iatt.ia_prot, IA_INVAL),
+ heal->iatt.ia_rdev, 0, xdata);
+
+ break;
+ }
+
+ dict_unref(xdata);
+
+ return 0;
+}
+
+void ec_heal_recreate(ec_fop_data_t * fop)
+{
+ ec_cbk_data_t * cbk;
+ ec_heal_t * heal = fop->data;
+ uintptr_t mask = 0;
+
+ if (heal->iatt.ia_type == IA_INVAL)
+ {
+ return;
+ }
+
+ list_for_each_entry(cbk, &fop->cbk_list, list)
+ {
+ if ((cbk->op_ret >= 0) || (cbk->op_errno == ENOENT) ||
+ (cbk->op_errno == ENOTDIR))
+ {
+ mask |= cbk->mask;
+ }
+ }
+
+ if (mask != 0)
+ {
+ ec_heal_create(heal, mask, 0);
+ }
+}
+
+int32_t ec_heal_rmdir_cbk(call_frame_t * frame, void * cookie, xlator_t * this,
+ int32_t op_ret, int32_t op_errno,
+ struct iatt * preparent, struct iatt * postparent,
+ dict_t * xdata)
+{
+ ec_heal_update(cookie, 0);
+ ec_heal_recreate(cookie);
+
+ return 0;
+}
+
+int32_t ec_heal_unlink_cbk(call_frame_t * frame, void * cookie,
+ xlator_t * this, int32_t op_ret, int32_t op_errno,
+ struct iatt * preparent, struct iatt * postparent,
+ dict_t * xdata)
+{
+ ec_heal_update(cookie, 0);
+ ec_heal_recreate(cookie);
+
+ return 0;
+}
+
+int32_t ec_heal_init(ec_fop_data_t * fop)
+{
+ ec_t * ec = fop->xl->private;
+ struct iobuf_pool * pool;
+ inode_t * inode;
+ ec_inode_t * ctx;
+ ec_heal_t * heal = NULL;
+ int32_t error = 0;
+
+ inode = fop->loc[0].inode;
+ if (inode == NULL)
+ {
+ gf_log(fop->xl->name, GF_LOG_WARNING, "Unable to start inode healing "
+ "because there is not enough "
+ "information");
+
+ return ENODATA;
+ }
+
+ LOCK(&inode->lock);
+
+ ctx = __ec_inode_get(inode, fop->xl);
+ if (ctx == NULL)
+ {
+ error = EIO;
+
+ goto out;
+ }
+
+ if (ctx->heal != NULL)
+ {
+ error = EEXIST;
+
+ goto out;
+ }
+
+ heal = GF_MALLOC(sizeof(ec_heal_t), ec_mt_ec_heal_t);
+ if (heal == NULL)
+ {
+ error = ENOMEM;
+
+ goto out;
+ }
+
+ memset(heal, 0, sizeof(ec_heal_t));
+
+ if (loc_copy(&heal->loc, &fop->loc[0]) != 0)
+ {
+ error = ENOMEM;
+
+ goto out;
+ }
+ if (uuid_is_null(heal->loc.gfid))
+ {
+ uuid_copy(heal->loc.gfid, heal->loc.inode->gfid);
+ }
+
+ LOCK_INIT(&heal->lock);
+
+ heal->xl = fop->xl;
+ heal->fop = fop;
+ pool = fop->xl->ctx->iobuf_pool;
+ heal->size = iobpool_default_pagesize(pool) * ec->fragments;
+
+ fop->data = heal;
+
+ ctx->heal = heal;
+ heal = NULL;
+
+out:
+ UNLOCK(&inode->lock);
+
+ GF_FREE(heal);
+
+ return error;
+}
+
+void ec_heal_entrylk(ec_heal_t * heal, entrylk_cmd cmd)
+{
+ loc_t loc;
+ char * name;
+ int32_t error;
+
+ error = ec_loc_parent(heal->xl, &heal->loc, &loc, &name);
+ if (error != 0)
+ {
+ ec_fop_set_error(heal->fop, error);
+
+ return;
+ }
+
+ ec_entrylk(heal->fop->frame, heal->xl, -1, EC_MINIMUM_ALL, NULL, NULL,
+ heal->xl->name, &loc, name, cmd, ENTRYLK_WRLCK, NULL);
+
+ loc_wipe(&loc);
+ GF_FREE(name);
+}
+
+void ec_heal_inodelk(ec_heal_t * heal, int32_t type, int32_t use_fd,
+ off_t offset, size_t size)
+{
+ struct gf_flock flock;
+
+ flock.l_type = type;
+ flock.l_whence = SEEK_SET;
+ flock.l_start = offset;
+ flock.l_len = size;
+ flock.l_pid = 0;
+ flock.l_owner.len = 0;
+
+ if (use_fd)
+ {
+ ec_finodelk(heal->fop->frame, heal->xl, heal->fop->mask,
+ EC_MINIMUM_ALL, NULL, NULL, heal->xl->name, heal->fd,
+ F_SETLKW, &flock, NULL);
+ }
+ else
+ {
+ ec_inodelk(heal->fop->frame, heal->xl, heal->fop->mask, EC_MINIMUM_ALL,
+ NULL, NULL, heal->xl->name, &heal->loc, F_SETLKW, &flock,
+ NULL);
+ }
+}
+
+void ec_heal_lookup(ec_heal_t * heal)
+{
+ dict_t * xdata;
+ int32_t error = ENOMEM;
+
+ xdata = dict_new();
+ if (xdata == NULL)
+ {
+ goto out;
+ }
+ if (dict_set_uint64(xdata, "list-xattr", 0) != 0)
+ {
+ goto out;
+ }
+
+ ec_lookup(heal->fop->frame, heal->xl, heal->fop->mask, EC_MINIMUM_MIN,
+ ec_heal_inode_lookup_cbk, heal, &heal->loc, xdata);
+
+ error = 0;
+
+out:
+ if (xdata != NULL)
+ {
+ dict_unref(xdata);
+ }
+
+ ec_fop_set_error(heal->fop, error);
+}
+
+void ec_heal_remove(ec_heal_t * heal, ec_cbk_data_t * cbk)
+{
+ if (cbk->iatt[0].ia_type == IA_IFDIR)
+ {
+ // TODO: Remove directory recursively ?
+ ec_rmdir(heal->fop->frame, heal->xl, cbk->mask, EC_MINIMUM_ONE,
+ ec_heal_rmdir_cbk, heal, &heal->loc, 0, NULL);
+ }
+ else
+ {
+ ec_unlink(heal->fop->frame, heal->xl, cbk->mask, EC_MINIMUM_ONE,
+ ec_heal_unlink_cbk, heal, &heal->loc, 0, NULL);
+ }
+}
+
+void ec_heal_remove_others(ec_heal_t * heal)
+{
+ struct list_head * item;
+ ec_cbk_data_t * cbk;
+
+ item = heal->lookup->cbk_list.next;
+ do
+ {
+ item = item->next;
+ cbk = list_entry(item, ec_cbk_data_t, list);
+
+ if (cbk->op_ret < 0)
+ {
+ if ((cbk->op_errno != ENOENT) && (cbk->op_errno != ENOTDIR))
+ {
+ gf_log(heal->xl->name, GF_LOG_WARNING, "Don't know how to "
+ "remove inode with "
+ "error %d",
+ cbk->op_errno);
+ }
+
+ ec_heal_exclude(heal, cbk->mask);
+
+ continue;
+ }
+
+ ec_heal_remove(heal, cbk);
+ } while (item->next != &heal->lookup->cbk_list);
+}
+
+void ec_heal_prepare_others(ec_heal_t * heal)
+{
+ struct list_head * item;
+ ec_cbk_data_t * cbk;
+
+ item = heal->lookup->cbk_list.next;
+ while (item->next != &heal->lookup->cbk_list)
+ {
+ item = item->next;
+ cbk = list_entry(item, ec_cbk_data_t, list);
+
+ if (cbk->op_ret < 0)
+ {
+ if (cbk->op_errno == ENOENT)
+ {
+ ec_heal_create(heal, cbk->mask, 1);
+ }
+ else
+ {
+ gf_log(heal->xl->name, GF_LOG_ERROR, "Don't know how to "
+ "heal error %d",
+ cbk->op_errno);
+
+ ec_heal_exclude(heal, cbk->mask);
+ }
+ }
+ else
+ {
+ if ((heal->iatt.ia_type != cbk->iatt[0].ia_type) ||
+ (uuid_compare(heal->iatt.ia_gfid, cbk->iatt[0].ia_gfid) != 0))
+ {
+ ec_heal_remove(heal, cbk);
+ }
+ }
+ }
+}
+
+int32_t ec_heal_readlink_cbk(call_frame_t * frame, void * cookie,
+ xlator_t * this, int32_t op_ret, int32_t op_errno,
+ const char * path, struct iatt * buf,
+ dict_t * xdata)
+{
+ ec_fop_data_t * fop = cookie;
+ ec_heal_t * heal = fop->data;
+
+ if (op_ret >= 0)
+ {
+ heal->symlink = gf_strdup(path);
+ if (heal->symlink != NULL)
+ {
+ ec_heal_prepare_others(heal);
+ }
+ else
+ {
+ ec_fop_set_error(fop, EIO);
+ }
+ }
+
+ return 0;
+}
+
+ec_cbk_data_t * ec_heal_lookup_check(ec_heal_t * heal, uintptr_t * pgood,
+ uintptr_t * pbad)
+{
+ ec_fop_data_t * fop = heal->lookup;
+ ec_cbk_data_t * cbk = NULL, * ans = NULL;
+ uintptr_t good = 0, bad = 0;
+
+ list_for_each_entry(ans, &fop->cbk_list, list)
+ {
+ if ((ans->op_ret < 0) && (ans->op_errno == ENOTCONN))
+ {
+ continue;
+ }
+
+ if (ans == fop->answer)
+ {
+ good |= ans->mask;
+ cbk = ans;
+ }
+ else
+ {
+ bad |= ans->mask;
+ }
+ }
+
+ *pgood = good;
+ *pbad = bad;
+
+ return cbk;
+}
+
+void ec_heal_prepare(ec_heal_t * heal)
+{
+ ec_cbk_data_t * cbk;
+ ec_fd_t * ctx;
+ int32_t error = ENOMEM;
+
+ heal->available = heal->good;
+
+ cbk = heal->lookup->answer;
+ if (cbk->op_ret < 0)
+ {
+ if ((cbk->op_errno == ENOENT) || (cbk->op_errno == ENOTDIR))
+ {
+ ec_heal_remove_others(heal);
+ }
+ else
+ {
+ gf_log(heal->xl->name, GF_LOG_ERROR, "Don't know how to heal "
+ "error %d",
+ cbk->op_errno);
+ }
+ }
+ else
+ {
+ if (heal->iatt.ia_type == IA_IFREG)
+ {
+ heal->fd = fd_create(heal->loc.inode, heal->fop->frame->root->pid);
+ if (heal->fd == NULL)
+ {
+ gf_log(heal->xl->name, GF_LOG_ERROR, "Unable to create a new "
+ "file descriptor");
+
+ goto out;
+ }
+ ctx = ec_fd_get(heal->fd, heal->xl);
+ if ((ctx == NULL) || (loc_copy(&ctx->loc, &heal->loc) != 0))
+ {
+ goto out;
+ }
+
+ ctx->flags = O_RDWR;
+ }
+
+ if (heal->iatt.ia_type == IA_IFLNK)
+ {
+ ec_readlink(heal->fop->frame, heal->xl, cbk->mask, EC_MINIMUM_ONE,
+ ec_heal_readlink_cbk, heal, &heal->loc,
+ heal->iatt.ia_size, NULL);
+ }
+ else
+ {
+ ec_heal_prepare_others(heal);
+ }
+ }
+
+ error = 0;
+
+out:
+ ec_fop_set_error(heal->fop, error);
+}
+
+int32_t ec_heal_open_others(ec_heal_t * heal)
+{
+ struct list_head * item;
+ ec_cbk_data_t * cbk;
+ uintptr_t mask = 0, open = heal->open;
+
+ item = heal->lookup->cbk_list.next;
+ while (item->next != &heal->lookup->cbk_list)
+ {
+ item = item->next;
+ cbk = list_entry(item, ec_cbk_data_t, list);
+
+ if ((cbk->op_ret < 0) || (cbk->iatt[0].ia_type != IA_IFREG) ||
+ (uuid_compare(heal->iatt.ia_gfid, cbk->iatt[0].ia_gfid) != 0))
+ {
+ ec_heal_exclude(heal, cbk->mask);
+ }
+ else
+ {
+ mask |= cbk->mask & ~heal->open;
+ }
+ }
+
+ if (mask != 0)
+ {
+ ec_open(heal->fop->frame, heal->xl, mask, EC_MINIMUM_ONE,
+ ec_heal_target_open_cbk, heal, &heal->loc, O_RDWR | O_TRUNC,
+ heal->fd, NULL);
+
+ open |= mask;
+ }
+
+ return (open != 0);
+}
+
+void ec_heal_setxattr_others(ec_heal_t * heal)
+{
+ ec_cbk_data_t * cbk;
+ dict_t * xdata;
+ int32_t error = ENOMEM;
+
+ if ((heal->good != 0) && (heal->bad != 0))
+ {
+ cbk = heal->lookup->answer;
+ xdata = cbk->xdata;
+
+ if ((cbk->iatt[0].ia_type == IA_IFREG) ||
+ (cbk->iatt[0].ia_type == IA_IFDIR))
+ {
+ if (ec_dict_set_number(xdata, EC_XATTR_VERSION, cbk->version) != 0)
+ {
+ goto out;
+ }
+ if (cbk->iatt[0].ia_type == IA_IFREG)
+ {
+ if (ec_dict_set_number(xdata, EC_XATTR_SIZE,
+ cbk->iatt[0].ia_size) != 0)
+ {
+ goto out;
+ }
+ }
+ }
+
+ ec_setxattr(heal->fop->frame, heal->xl, heal->bad, EC_MINIMUM_ONE,
+ ec_heal_setxattr_cbk, heal, &heal->loc, xdata, 0, NULL);
+ }
+
+ error = 0;
+
+out:
+ ec_fop_set_error(heal->fop, error);
+}
+
+int32_t ec_heal_xattr_clean(dict_t * dict, char * key, data_t * data,
+ void * arg)
+{
+ dict_t * base = arg;
+
+ if (dict_get(base, key) == NULL)
+ {
+ if (dict_set_static_bin(dict, key, dict, 0) != 0)
+ {
+ return -1;
+ }
+ }
+ else
+ {
+ dict_del(dict, key);
+ }
+
+ return 0;
+}
+
+void ec_heal_removexattr_others(ec_heal_t * heal)
+{
+ struct list_head * item;
+ ec_cbk_data_t * cbk;
+ dict_t * xdata;
+
+ if ((heal->good == 0) || (heal->bad == 0))
+ {
+ return;
+ }
+
+ xdata = heal->lookup->answer->xdata;
+ item = heal->lookup->cbk_list.next;
+ while (item->next != &heal->lookup->cbk_list)
+ {
+ item = item->next;
+ cbk = list_entry(item, ec_cbk_data_t, list);
+
+ if (cbk->op_ret >= 0)
+ {
+ if (dict_foreach(cbk->xdata, ec_heal_xattr_clean, xdata) == 0)
+ {
+ ec_removexattr(heal->fop->frame, heal->xl, cbk->mask,
+ EC_MINIMUM_ONE, ec_heal_removexattr_cbk, heal,
+ &heal->loc, "", cbk->xdata);
+ }
+ }
+ }
+}
+
+void ec_heal_attr(ec_heal_t * heal)
+{
+ if ((heal->good != 0) && (heal->bad != 0))
+ {
+ ec_setattr(heal->fop->frame, heal->xl, heal->bad, EC_MINIMUM_ONE,
+ ec_heal_setattr_cbk, heal, &heal->loc, &heal->iatt,
+ GF_SET_ATTR_MODE | GF_SET_ATTR_UID | GF_SET_ATTR_GID |
+ GF_SET_ATTR_ATIME | GF_SET_ATTR_MTIME, NULL);
+ }
+}
+
+int32_t ec_heal_needs_data_rebuild(ec_heal_t * heal)
+{
+ ec_fop_data_t * fop = heal->lookup;
+ ec_cbk_data_t * cbk = NULL;
+ uintptr_t bad = 0;
+
+ if ((heal->fop->error != 0) || (heal->good == 0) ||
+ (heal->iatt.ia_type != IA_IFREG))
+ {
+ return 0;
+ }
+
+ list_for_each_entry(cbk, &fop->cbk_list, list)
+ {
+ if ((cbk->op_ret >= 0) &&
+ ((cbk->size != heal->raw_size) || (cbk->version != heal->version)))
+ {
+ bad |= cbk->mask;
+ }
+ }
+
+ heal->bad = bad;
+
+ return (bad != 0);
+}
+
+void ec_heal_open(ec_heal_t * heal)
+{
+ if (!ec_heal_needs_data_rebuild(heal))
+ {
+ return;
+ }
+
+ if (ec_heal_open_others(heal))
+ {
+ ec_open(heal->fop->frame, heal->xl, heal->good, EC_MINIMUM_MIN,
+ ec_heal_source_open_cbk, heal, &heal->loc, O_RDONLY, heal->fd,
+ NULL);
+ }
+}
+
+void ec_heal_reopen_fd(ec_heal_t * heal)
+{
+ inode_t * inode;
+ fd_t * fd;
+ ec_fd_t * ctx;
+ uintptr_t mask;
+ int32_t flags;
+
+ inode = heal->loc.inode;
+
+ LOCK(&inode->lock);
+
+ list_for_each_entry(fd, &inode->fd_list, inode_list)
+ {
+ ctx = ec_fd_get(fd, heal->xl);
+ if ((ctx != NULL) && (ctx->loc.inode != NULL))
+ {
+ mask = heal->bad & ~ctx->open;
+ if (mask != 0)
+ {
+ UNLOCK(&inode->lock);
+
+ if (heal->iatt.ia_type == IA_IFDIR)
+ {
+ ec_opendir(heal->fop->frame, heal->xl, mask,
+ EC_MINIMUM_ONE, ec_heal_reopen_cbk, NULL,
+ &heal->loc, fd, NULL);
+ }
+ else
+ {
+ flags = ctx->flags & ~O_TRUNC;
+ if ((flags & O_ACCMODE) == O_WRONLY)
+ {
+ flags &= ~O_ACCMODE;
+ flags |= O_RDWR;
+ }
+
+ ec_open(heal->fop->frame, heal->xl, mask, EC_MINIMUM_ONE,
+ ec_heal_reopen_cbk, NULL, &heal->loc, flags, fd,
+ NULL);
+ }
+
+ LOCK(&inode->lock);
+ }
+ }
+ }
+
+ UNLOCK(&inode->lock);
+}
+
+int32_t ec_heal_writev_cbk(call_frame_t * frame, void * cookie,
+ xlator_t * this, int32_t op_ret, int32_t op_errno,
+ struct iatt * prebuf, struct iatt * postbuf,
+ dict_t * xdata)
+{
+ ec_trace("WRITE_CBK", cookie, "ret=%d, errno=%d", op_ret, op_errno);
+
+ ec_heal_update(cookie, 0);
+
+ return 0;
+}
+
+int32_t ec_heal_readv_cbk(call_frame_t * frame, void * cookie, xlator_t * this,
+ int32_t op_ret, int32_t op_errno,
+ struct iovec * vector, int32_t count,
+ struct iatt * stbuf, struct iobref * iobref,
+ dict_t * xdata)
+{
+ ec_fop_data_t * fop = cookie;
+ ec_heal_t * heal = fop->data;
+
+ ec_trace("READ_CBK", fop, "ret=%d, errno=%d", op_ret, op_errno);
+
+ ec_heal_avoid(fop);
+
+ if (op_ret > 0)
+ {
+ ec_writev(heal->fop->frame, heal->xl, heal->bad, EC_MINIMUM_ONE,
+ ec_heal_writev_cbk, heal, heal->fd, vector, count,
+ heal->offset, 0, iobref, NULL);
+ }
+ else
+ {
+ heal->done = 1;
+ }
+
+ return 0;
+}
+
+void ec_heal_data(ec_heal_t * heal)
+{
+ ec_trace("DATA", heal->fop, "good=%lX, bad=%lX", heal->good, heal->bad);
+
+ if ((heal->good != 0) && (heal->bad != 0) &&
+ (heal->iatt.ia_type == IA_IFREG))
+ {
+ ec_readv(heal->fop->frame, heal->xl, heal->good, EC_MINIMUM_MIN,
+ ec_heal_readv_cbk, heal, heal->fd, heal->size, heal->offset,
+ 0, NULL);
+ }
+}
+
+void ec_heal_dispatch(ec_heal_t * heal)
+{
+ ec_fop_data_t * fop = heal->fop;
+ ec_cbk_data_t * cbk;
+ inode_t * inode;
+ ec_inode_t * ctx;
+ int32_t error;
+
+ inode = heal->loc.inode;
+
+ LOCK(&inode->lock);
+
+ ctx = __ec_inode_get(inode, heal->xl);
+ if (ctx != NULL)
+ {
+ ctx->bad &= ~heal->good;
+ ctx->heal = NULL;
+ }
+
+ fop->data = NULL;
+
+ UNLOCK(&inode->lock);
+
+ error = fop->error;
+
+ cbk = ec_cbk_data_allocate(fop->frame, heal->xl, fop, fop->id, 0,
+ error == 0 ? 0 : -1, error);
+ if (cbk != NULL)
+ {
+ cbk->uintptr[0] = heal->available;
+ cbk->uintptr[1] = heal->good;
+ cbk->uintptr[2] = heal->bad;
+
+ ec_combine(cbk, NULL);
+
+ fop->answer = cbk;
+ }
+ else if (error == 0)
+ {
+ error = ENOMEM;
+ }
+
+ if (heal->lookup != NULL)
+ {
+ ec_fop_data_release(heal->lookup);
+ }
+ if (heal->fd != NULL)
+ {
+ fd_unref(heal->fd);
+ }
+ GF_FREE(heal->symlink);
+ loc_wipe(&heal->loc);
+
+ LOCK_DESTROY(&heal->lock);
+
+ GF_FREE(heal);
+
+ ec_fop_set_error(heal->fop, error);
+}
+
+void ec_wind_heal(ec_t * ec, ec_fop_data_t * fop, int32_t idx)
+{
+ ec_cbk_data_t * cbk;
+ ec_heal_t * heal = fop->data;
+
+ ec_trace("WIND", fop, "idx=%d", idx);
+
+ cbk = ec_cbk_data_allocate(fop->req_frame, fop->xl, fop, EC_FOP_HEAL, idx,
+ fop->error == 0 ? 0 : -1, fop->error);
+ if (cbk != NULL)
+ {
+ cbk->uintptr[0] = heal->available;
+ cbk->uintptr[1] = heal->good;
+ cbk->uintptr[2] = heal->bad;
+
+ ec_combine(cbk, NULL);
+ }
+
+ ec_complete(fop);
+}
+
+int32_t ec_manager_heal(ec_fop_data_t * fop, int32_t state)
+{
+ ec_cbk_data_t * cbk;
+ ec_heal_t * heal = fop->data;
+
+ switch (state)
+ {
+ case EC_STATE_INIT:
+ ec_owner_set(fop->frame, fop->frame->root);
+
+ fop->error = ec_heal_init(fop);
+ if (fop->error != 0)
+ {
+ return EC_STATE_REPORT;
+ }
+
+ case EC_STATE_DISPATCH:
+ ec_heal_entrylk(fop->data, ENTRYLK_LOCK);
+
+ return EC_STATE_HEAL_ENTRY_LOOKUP;
+
+ case EC_STATE_HEAL_ENTRY_LOOKUP:
+ ec_lookup(fop->frame, heal->xl, fop->mask, EC_MINIMUM_MIN,
+ ec_heal_entry_lookup_cbk, heal, &heal->loc, NULL);
+
+ return EC_STATE_HEAL_ENTRY_PREPARE;
+
+ case EC_STATE_HEAL_ENTRY_PREPARE:
+ ec_heal_prepare(heal);
+
+ return EC_STATE_HEAL_PRE_INODELK_LOCK;
+
+ case EC_STATE_HEAL_PRE_INODELK_LOCK:
+ ec_heal_inodelk(heal, F_WRLCK, 0, 0, 0);
+
+ return EC_STATE_HEAL_PRE_INODE_LOOKUP;
+
+ case EC_STATE_HEAL_PRE_INODE_LOOKUP:
+ ec_heal_lookup(heal);
+
+ return EC_STATE_HEAL_XATTRIBUTES_REMOVE;
+
+ case EC_STATE_HEAL_XATTRIBUTES_REMOVE:
+ ec_heal_removexattr_others(heal);
+
+ return EC_STATE_HEAL_XATTRIBUTES_SET;
+
+ case EC_STATE_HEAL_XATTRIBUTES_SET:
+ ec_heal_setxattr_others(heal);
+
+ return EC_STATE_HEAL_ATTRIBUTES;
+
+ case EC_STATE_HEAL_ATTRIBUTES:
+ ec_heal_attr(heal);
+
+ return EC_STATE_HEAL_OPEN;
+
+ case EC_STATE_HEAL_OPEN:
+ ec_heal_open(heal);
+
+ return EC_STATE_HEAL_REOPEN_FD;
+
+ case EC_STATE_HEAL_REOPEN_FD:
+ ec_heal_reopen_fd(heal);
+
+ return EC_STATE_HEAL_UNLOCK;
+
+ case -EC_STATE_HEAL_XATTRIBUTES_REMOVE:
+ case -EC_STATE_HEAL_XATTRIBUTES_SET:
+ case -EC_STATE_HEAL_ATTRIBUTES:
+ case -EC_STATE_HEAL_OPEN:
+ case -EC_STATE_HEAL_REOPEN_FD:
+ case -EC_STATE_HEAL_UNLOCK:
+ case EC_STATE_HEAL_UNLOCK:
+ ec_heal_inodelk(heal, F_UNLCK, 0, 0, 0);
+
+ case -EC_STATE_HEAL_ENTRY_PREPARE:
+ case -EC_STATE_HEAL_PRE_INODELK_LOCK:
+ case -EC_STATE_HEAL_PRE_INODE_LOOKUP:
+ ec_heal_entrylk(heal, ENTRYLK_UNLOCK);
+
+ if (ec_heal_needs_data_rebuild(heal))
+ {
+ return EC_STATE_HEAL_DATA_LOCK;
+ }
+
+ return EC_STATE_HEAL_DISPATCH;
+
+ case EC_STATE_HEAL_DATA_LOCK:
+ if (heal->done)
+ {
+ return EC_STATE_HEAL_POST_INODELK_LOCK;
+ }
+
+ ec_heal_inodelk(heal, F_WRLCK, 1, heal->offset, heal->size);
+
+ return EC_STATE_HEAL_DATA_COPY;
+
+ case EC_STATE_HEAL_DATA_COPY:
+ ec_heal_data(heal);
+
+ return EC_STATE_HEAL_DATA_UNLOCK;
+
+ case -EC_STATE_HEAL_DATA_COPY:
+ case -EC_STATE_HEAL_DATA_UNLOCK:
+ case EC_STATE_HEAL_DATA_UNLOCK:
+ ec_heal_inodelk(heal, F_UNLCK, 1, heal->offset, heal->size);
+
+ heal->offset += heal->size;
+
+ return EC_STATE_HEAL_DATA_LOCK;
+
+ case EC_STATE_HEAL_POST_INODELK_LOCK:
+ ec_heal_inodelk(heal, F_WRLCK, 1, 0, 0);
+
+ return EC_STATE_HEAL_POST_INODE_LOOKUP;
+
+ case EC_STATE_HEAL_POST_INODE_LOOKUP:
+ ec_heal_lookup(heal);
+
+ return EC_STATE_HEAL_SETATTR;
+
+ case EC_STATE_HEAL_SETATTR:
+ ec_setattr(heal->fop->frame, heal->xl, heal->bad, EC_MINIMUM_ONE,
+ ec_heal_setattr_cbk, heal, &heal->loc, &heal->iatt,
+ GF_SET_ATTR_MODE | GF_SET_ATTR_UID | GF_SET_ATTR_GID |
+ GF_SET_ATTR_ATIME | GF_SET_ATTR_MTIME, NULL);
+
+ return EC_STATE_HEAL_POST_INODELK_UNLOCK;
+
+ case -EC_STATE_HEAL_SETATTR:
+ case -EC_STATE_HEAL_POST_INODELK_UNLOCK:
+ case EC_STATE_HEAL_POST_INODELK_UNLOCK:
+ ec_heal_inodelk(heal, F_UNLCK, 1, 0, 0);
+
+ return EC_STATE_HEAL_DISPATCH;
+
+ case -EC_STATE_HEAL_POST_INODELK_LOCK:
+ case -EC_STATE_HEAL_POST_INODE_LOOKUP:
+ case -EC_STATE_HEAL_ENTRY_LOOKUP:
+ case -EC_STATE_HEAL_DATA_LOCK:
+ case -EC_STATE_HEAL_DISPATCH:
+ case EC_STATE_HEAL_DISPATCH:
+ ec_heal_dispatch(heal);
+
+ return EC_STATE_PREPARE_ANSWER;
+
+ case EC_STATE_PREPARE_ANSWER:
+ cbk = fop->answer;
+ if (cbk != NULL)
+ {
+ if (!ec_dict_combine(cbk, EC_COMBINE_XDATA))
+ {
+ if (cbk->op_ret >= 0)
+ {
+ cbk->op_ret = -1;
+ cbk->op_errno = EIO;
+ }
+ }
+ if (cbk->op_ret < 0)
+ {
+ ec_fop_set_error(fop, cbk->op_errno);
+ }
+ }
+ else
+ {
+ ec_fop_set_error(fop, EIO);
+ }
+
+ return EC_STATE_REPORT;
+
+ case EC_STATE_REPORT:
+ cbk = fop->answer;
+
+ GF_ASSERT(cbk != NULL);
+
+ if (fop->fd == NULL)
+ {
+ if (fop->cbks.heal != NULL)
+ {
+ fop->cbks.heal(fop->req_frame, fop, fop->xl, cbk->op_ret,
+ cbk->op_errno, cbk->uintptr[0],
+ cbk->uintptr[1], cbk->uintptr[2],
+ cbk->xdata);
+ }
+ }
+ else
+ {
+ if (fop->cbks.fheal != NULL)
+ {
+ fop->cbks.fheal(fop->req_frame, fop, fop->xl, cbk->op_ret,
+ cbk->op_errno, cbk->uintptr[0],
+ cbk->uintptr[1], cbk->uintptr[2],
+ cbk->xdata);
+ }
+ }
+
+ return EC_STATE_END;
+
+ case -EC_STATE_DISPATCH:
+ case -EC_STATE_PREPARE_ANSWER:
+ case -EC_STATE_REPORT:
+ GF_ASSERT(fop->error != 0);
+
+ if (fop->fd == NULL)
+ {
+ if (fop->cbks.heal != NULL)
+ {
+ fop->cbks.heal(fop->req_frame, fop, fop->xl, -1,
+ fop->error, 0, 0, 0, NULL);
+ }
+ }
+ else
+ {
+ if (fop->cbks.fheal != NULL)
+ {
+ fop->cbks.fheal(fop->req_frame, fop, fop->xl, -1,
+ fop->error, 0, 0, 0, NULL);
+ }
+ }
+
+ return EC_STATE_END;
+
+ default:
+ gf_log(fop->xl->name, GF_LOG_ERROR, "Unhandled state %d for %s",
+ state, ec_fop_name(fop->id));
+
+ return EC_STATE_END;
+ }
+}
+
+void ec_heal(call_frame_t * frame, xlator_t * this, uintptr_t target,
+ int32_t minimum, fop_heal_cbk_t func, void * data, loc_t * loc,
+ dict_t * xdata)
+{
+ ec_cbk_t callback = { .heal = func };
+ ec_fop_data_t * fop = NULL;
+ int32_t error = EIO;
+
+ gf_log("ec", GF_LOG_TRACE, "EC(HEAL) %p", frame);
+
+ VALIDATE_OR_GOTO(this, out);
+ GF_VALIDATE_OR_GOTO(this->name, frame, out);
+ GF_VALIDATE_OR_GOTO(this->name, this->private, out);
+
+ fop = ec_fop_data_allocate(NULL, this, EC_FOP_HEAL,
+ EC_FLAG_UPDATE_LOC_INODE, target, minimum,
+ ec_wind_heal, ec_manager_heal, callback, data);
+ if (fop == NULL)
+ {
+ goto out;
+ }
+
+ if (loc != NULL)
+ {
+ if (loc_copy(&fop->loc[0], loc) != 0)
+ {
+ gf_log(this->name, GF_LOG_ERROR, "Failed to copy a location.");
+
+ goto out;
+ }
+ }
+ if (xdata != NULL)
+ {
+ fop->xdata = dict_ref(xdata);
+ if (fop->xdata == NULL)
+ {
+ gf_log(this->name, GF_LOG_ERROR, "Failed to reference a "
+ "dictionary.");
+
+ goto out;
+ }
+ }
+
+ error = 0;
+
+out:
+ if (fop != NULL)
+ {
+ ec_manager(fop, error);
+ }
+ else
+ {
+ func(frame, NULL, this, -1, EIO, 0, 0, 0, NULL);
+ }
+}
+
+/* FOP: fheal */
+
+void ec_wind_fheal(ec_t * ec, ec_fop_data_t * fop, int32_t idx)
+{
+ ec_cbk_data_t * cbk;
+ ec_heal_t * heal = fop->data;
+
+ ec_trace("WIND", fop, "idx=%d", idx);
+
+ cbk = ec_cbk_data_allocate(fop->req_frame, fop->xl, fop, EC_FOP_FHEAL, idx,
+ fop->error == 0 ? 0 : -1, fop->error);
+ if (cbk != NULL)
+ {
+ cbk->uintptr[0] = heal->available;
+ cbk->uintptr[1] = heal->good;
+ cbk->uintptr[2] = heal->bad;
+
+ ec_combine(cbk, NULL);
+ }
+
+ ec_complete(fop);
+}
+
+void ec_fheal(call_frame_t * frame, xlator_t * this, uintptr_t target,
+ int32_t minimum, fop_fheal_cbk_t func, void * data, fd_t * fd,
+ dict_t * xdata)
+{
+ ec_fd_t * ctx = ec_fd_get(fd, this);
+
+ if ((ctx != NULL) && (ctx->loc.inode != NULL))
+ {
+ gf_log("ec", GF_LOG_DEBUG, "FHEAL ctx: flags=%X, open=%lX, bad=%lX",
+ ctx->flags, ctx->open, ctx->bad);
+ ec_heal(frame, this, target, minimum, func, data, &ctx->loc, xdata);
+ }
+}
diff --git a/xlators/cluster/ec/src/ec-helpers.c b/xlators/cluster/ec/src/ec-helpers.c
new file mode 100644
index 00000000000..771faf5b013
--- /dev/null
+++ b/xlators/cluster/ec/src/ec-helpers.c
@@ -0,0 +1,594 @@
+/*
+ Copyright (c) 2012 DataLab, s.l. <http://www.datalab.es>
+
+ This file is part of the cluster/ec translator for GlusterFS.
+
+ The cluster/ec translator for GlusterFS is free software: you can
+ redistribute it and/or modify it under the terms of the GNU General
+ Public License as published by the Free Software Foundation, either
+ version 3 of the License, or (at your option) any later version.
+
+ The cluster/ec translator for GlusterFS is distributed in the hope
+ that it will be useful, but WITHOUT ANY WARRANTY; without even the
+ implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+ PURPOSE. See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with the cluster/ec translator for GlusterFS. If not, see
+ <http://www.gnu.org/licenses/>.
+*/
+
+#include <libgen.h>
+
+#include "byte-order.h"
+
+#include "ec-mem-types.h"
+#include "ec-fops.h"
+#include "ec-helpers.h"
+
+#define BACKEND_D_OFF_BITS 63
+#define PRESENT_D_OFF_BITS 63
+
+#define ONE 1ULL
+#define MASK (~0ULL)
+#define PRESENT_MASK (MASK >> (64 - PRESENT_D_OFF_BITS))
+#define BACKEND_MASK (MASK >> (64 - BACKEND_D_OFF_BITS))
+
+#define TOP_BIT (ONE << (PRESENT_D_OFF_BITS - 1))
+#define SHIFT_BITS (max(0, (BACKEND_D_OFF_BITS - PRESENT_D_OFF_BITS + 1)))
+
+static const char * ec_fop_list[] =
+{
+ [-EC_FOP_HEAL] = "HEAL"
+};
+
+const char * ec_bin(char * str, size_t size, uint64_t value, int32_t digits)
+{
+ str += size;
+
+ if (size-- < 1)
+ {
+ goto failed;
+ }
+ *--str = 0;
+
+ while ((value != 0) || (digits > 0))
+ {
+ if (size-- < 1)
+ {
+ goto failed;
+ }
+ *--str = '0' + (value & 1);
+ digits--;
+ value >>= 1;
+ }
+
+ return str;
+
+failed:
+ return "<buffer too small>";
+}
+
+const char * ec_fop_name(int32_t id)
+{
+ if (id >= 0)
+ {
+ return gf_fop_list[id];
+ }
+
+ return ec_fop_list[-id];
+}
+
+void ec_trace(const char * event, ec_fop_data_t * fop, const char * fmt, ...)
+{
+ char str1[32], str2[32], str3[32];
+ char * msg;
+ ec_t * ec = fop->xl->private;
+ va_list args;
+ int32_t ret;
+
+ va_start(args, fmt);
+ ret = vasprintf(&msg, fmt, args);
+ va_end(args);
+
+ if (ret < 0)
+ {
+ msg = "<memory allocation error>";
+ }
+
+ gf_log("ec", GF_LOG_TRACE, "%s(%s) %p(%p) [refs=%d, winds=%d, jobs=%d] "
+ "frame=%p/%p, min/exp=%d/%d, err=%d state=%d "
+ "{%s:%s:%s} %s",
+ event, ec_fop_name(fop->id), fop, fop->parent, fop->refs,
+ fop->winds, fop->jobs, fop->req_frame, fop->frame, fop->minimum,
+ fop->expected, fop->error, fop->state,
+ ec_bin(str1, sizeof(str1), fop->mask, ec->nodes),
+ ec_bin(str2, sizeof(str2), fop->remaining, ec->nodes),
+ ec_bin(str3, sizeof(str3), fop->bad, ec->nodes), msg);
+
+ if (ret >= 0)
+ {
+ free(msg);
+ }
+}
+
+uint64_t ec_itransform(ec_t * ec, int32_t idx, uint64_t offset)
+{
+ int32_t bits;
+
+ if (offset == -1ULL)
+ {
+ return -1ULL;
+ }
+
+ bits = ec->bits_for_nodes;
+ if ((offset & ~(PRESENT_MASK >> (bits + 1))) != 0)
+ {
+ return TOP_BIT | ((offset >> SHIFT_BITS) & (MASK << bits)) | idx;
+ }
+
+ return (offset * ec->nodes) + idx;
+}
+
+uint64_t ec_deitransform(ec_t * ec, int32_t * idx, uint64_t offset)
+{
+ uint64_t mask = 0;
+
+ if ((offset & TOP_BIT) != 0)
+ {
+ mask = MASK << ec->bits_for_nodes;
+
+ *idx = offset & ~mask;
+ return ((offset & ~TOP_BIT) & mask) << SHIFT_BITS;
+ }
+
+ *idx = offset % ec->nodes;
+
+ return offset / ec->nodes;
+}
+
+int32_t ec_bits_count(uint64_t n)
+{
+ n -= (n >> 1) & 0x5555555555555555ULL;
+ n = ((n >> 2) & 0x3333333333333333ULL) + (n & 0x3333333333333333ULL);
+ n = (n + (n >> 4)) & 0x0F0F0F0F0F0F0F0FULL;
+ n += n >> 8;
+ n += n >> 16;
+ n += n >> 32;
+
+ return n & 0xFF;
+}
+
+int32_t ec_bits_index(uint64_t n)
+{
+ return ffsll(n) - 1;
+}
+
+int32_t ec_bits_consume(uint64_t * n)
+{
+ uint64_t tmp;
+
+ tmp = *n;
+ tmp &= -tmp;
+ *n ^= tmp;
+
+ return ffsll(tmp) - 1;
+}
+
+size_t ec_iov_copy_to(void * dst, struct iovec * vector, int32_t count,
+ off_t offset, size_t size)
+{
+ int32_t i = 0;
+ size_t total = 0, len = 0;
+
+ while (i < count)
+ {
+ if (offset < vector[i].iov_len)
+ {
+ while ((i < count) && (size > 0))
+ {
+ len = size;
+ if (len > vector[i].iov_len - offset)
+ {
+ len = vector[i].iov_len - offset;
+ }
+ memcpy(dst, vector[i++].iov_base + offset, len);
+ offset = 0;
+ dst += len;
+ total += len;
+ size -= len;
+ }
+
+ break;
+ }
+
+ offset -= vector[i].iov_len;
+ i++;
+ }
+
+ return total;
+}
+
+int32_t ec_dict_set_number(dict_t * dict, char * key, uint64_t value)
+{
+ uint64_t * ptr;
+
+ ptr = GF_MALLOC(sizeof(value), gf_common_mt_char);
+ if (ptr == NULL)
+ {
+ return -1;
+ }
+
+ *ptr = hton64(value);
+
+ return dict_set_bin(dict, key, ptr, sizeof(value));
+}
+
+int32_t ec_dict_del_number(dict_t * dict, char * key, uint64_t * value)
+{
+ void * ptr;
+ int32_t len;
+
+ if ((dict == NULL) || (dict_get_ptr_and_len(dict, key, &ptr, &len) != 0) ||
+ (len != sizeof(uint64_t)))
+ {
+ return -1;
+ }
+
+ *value = ntoh64(*(uint64_t *)ptr);
+
+ dict_del(dict, key);
+
+ return 0;
+}
+
+int32_t ec_loc_gfid_check(xlator_t * xl, uuid_t dst, uuid_t src)
+{
+ if (uuid_is_null(src))
+ {
+ return 1;
+ }
+
+ if (uuid_is_null(dst))
+ {
+ uuid_copy(dst, src);
+
+ return 1;
+ }
+
+ if (uuid_compare(dst, src) != 0)
+ {
+ gf_log(xl->name, GF_LOG_WARNING, "Mismatching GFID's in loc");
+
+ return 0;
+ }
+
+ return 1;
+}
+
+int32_t ec_loc_parent(xlator_t * xl, loc_t * loc, loc_t * parent, char ** name)
+{
+ char * str = NULL;
+ int32_t error = 0;
+
+ memset(parent, 0, sizeof(loc_t));
+
+ if (loc->path == NULL)
+ {
+ gf_log(xl->name, GF_LOG_ERROR, "inode path missing in loc_t: %p", loc->parent);
+
+ return EINVAL;
+ }
+
+ if (loc->parent == NULL)
+ {
+ if ((loc->inode == NULL) || !__is_root_gfid(loc->inode->gfid) ||
+ (strcmp(loc->path, "/") != 0))
+ {
+ gf_log(xl->name, GF_LOG_ERROR, "Parent inode missing for "
+ "loc_t (path=%s, name=%s)",
+ loc->path, loc->name);
+
+ return EINVAL;
+ }
+
+ if (loc_copy(parent, loc) != 0)
+ {
+ return ENOMEM;
+ }
+
+ parent->name = NULL;
+
+ if (name != NULL)
+ {
+ *name = NULL;
+ }
+ }
+ else
+ {
+ if (uuid_is_null(loc->parent->gfid) && (uuid_is_null(loc->pargfid)))
+ {
+ gf_log(xl->name, GF_LOG_ERROR, "Invalid parent inode "
+ "(path=%s, name=%s)",
+ loc->path, loc->name);
+
+ return EINVAL;
+ }
+ uuid_copy(parent->gfid, loc->pargfid);
+
+ str = gf_strdup(loc->path);
+ if (str == NULL)
+ {
+ gf_log(xl->name, GF_LOG_ERROR, "Unable to duplicate path "
+ "'%s'", str);
+
+ return ENOMEM;
+ }
+ if (name != NULL)
+ {
+ *name = gf_strdup(basename(str));
+ if (*name == NULL)
+ {
+ gf_log(xl->name, GF_LOG_ERROR, "Unable to get basename "
+ "of '%s'", str);
+
+ error = ENOMEM;
+
+ goto out;
+ }
+ strcpy(str, loc->path);
+ }
+ parent->path = gf_strdup(dirname(str));
+ if (parent->path == NULL)
+ {
+ gf_log(xl->name, GF_LOG_ERROR, "Unable to get dirname of "
+ "'%s'", str);
+
+ error = ENOMEM;
+
+ goto out;
+ }
+ parent->name = strrchr(parent->path, '/');
+ if (parent->name == NULL)
+ {
+ gf_log(xl->name, GF_LOG_ERROR, "Invalid path name (%s)",
+ parent->path);
+
+ error = EINVAL;
+
+ goto out;
+ }
+ parent->name++;
+ parent->inode = inode_ref(loc->parent);
+ }
+
+ if ((loc->inode == NULL) ||
+ ec_loc_gfid_check(xl, loc->gfid, loc->inode->gfid))
+ {
+ parent = NULL;
+ }
+
+out:
+ GF_FREE(str);
+
+ if (parent != NULL)
+ {
+ loc_wipe(parent);
+ }
+
+ return error;
+}
+
+int32_t ec_loc_prepare(xlator_t * xl, loc_t * loc, inode_t * inode,
+ struct iatt * iatt)
+{
+ if ((inode != NULL) && (loc->inode != inode))
+ {
+ if (loc->inode != NULL)
+ {
+ inode_unref(loc->inode);
+ }
+ loc->inode = inode_ref(inode);
+
+ uuid_copy(loc->gfid, inode->gfid);
+ }
+ else if (loc->inode != NULL)
+ {
+ if (!ec_loc_gfid_check(xl, loc->gfid, loc->inode->gfid))
+ {
+ return 0;
+ }
+ }
+
+ if (iatt != NULL)
+ {
+ if (!ec_loc_gfid_check(xl, loc->gfid, iatt->ia_gfid))
+ {
+ return 0;
+ }
+ }
+
+ if (loc->parent != NULL)
+ {
+ if (!ec_loc_gfid_check(xl, loc->pargfid, loc->parent->gfid))
+ {
+ return 0;
+ }
+
+ }
+
+ if (uuid_is_null(loc->gfid))
+ {
+ gf_log(xl->name, GF_LOG_WARNING, "GFID not available for inode");
+ }
+
+ return 1;
+}
+
+int32_t ec_loc_from_fd(xlator_t * xl, loc_t * loc, fd_t * fd)
+{
+ ec_fd_t * ctx;
+
+ memset(loc, 0, sizeof(*loc));
+
+ ctx = ec_fd_get(fd, xl);
+ if (ctx != NULL)
+ {
+ if (loc_copy(loc, &ctx->loc) != 0)
+ {
+ return 0;
+ }
+ }
+
+ if (ec_loc_prepare(xl, loc, fd->inode, NULL))
+ {
+ return 1;
+ }
+
+ loc_wipe(loc);
+
+ return 0;
+}
+
+int32_t ec_loc_from_loc(xlator_t * xl, loc_t * dst, loc_t * src)
+{
+ memset(dst, 0, sizeof(*dst));
+
+ if (loc_copy(dst, src) != 0)
+ {
+ return 0;
+ }
+
+ if (ec_loc_prepare(xl, dst, NULL, NULL))
+ {
+ return 1;
+ }
+
+ loc_wipe(dst);
+
+ return 0;
+}
+
+void ec_owner_set(call_frame_t * frame, void * owner)
+{
+ set_lk_owner_from_ptr(&frame->root->lk_owner, owner);
+}
+
+void ec_owner_copy(call_frame_t * frame, gf_lkowner_t * owner)
+{
+ frame->root->lk_owner.len = owner->len;
+ memcpy(frame->root->lk_owner.data, owner->data, owner->len);
+}
+
+ec_inode_t * __ec_inode_get(inode_t * inode, xlator_t * xl)
+{
+ ec_inode_t * ctx = NULL;
+ uint64_t value = 0;
+
+ if ((__inode_ctx_get(inode, xl, &value) != 0) || (value == 0))
+ {
+ ctx = GF_MALLOC(sizeof(*ctx), ec_mt_ec_inode_t);
+ if (ctx != NULL)
+ {
+ memset(ctx, 0, sizeof(*ctx));
+
+ value = (uint64_t)(uintptr_t)ctx;
+ if (__inode_ctx_set(inode, xl, &value) != 0)
+ {
+ GF_FREE(ctx);
+
+ return NULL;
+ }
+ }
+ }
+ else
+ {
+ ctx = (ec_inode_t *)(uintptr_t)value;
+ }
+
+ return ctx;
+}
+
+ec_inode_t * ec_inode_get(inode_t * inode, xlator_t * xl)
+{
+ ec_inode_t * ctx = NULL;
+
+ LOCK(&inode->lock);
+
+ ctx = __ec_inode_get(inode, xl);
+
+ UNLOCK(&inode->lock);
+
+ return ctx;
+}
+
+ec_fd_t * __ec_fd_get(fd_t * fd, xlator_t * xl)
+{
+ ec_fd_t * ctx = NULL;
+ uint64_t value = 0;
+
+ if ((__fd_ctx_get(fd, xl, &value) != 0) || (value == 0))
+ {
+ ctx = GF_MALLOC(sizeof(*ctx), ec_mt_ec_fd_t);
+ if (ctx != NULL)
+ {
+ memset(ctx, 0, sizeof(*ctx));
+
+ value = (uint64_t)(uintptr_t)ctx;
+ if (__fd_ctx_set(fd, xl, value) != 0)
+ {
+ GF_FREE(ctx);
+
+ return NULL;
+ }
+ }
+ }
+ else
+ {
+ ctx = (ec_fd_t *)(uintptr_t)value;
+ }
+
+ return ctx;
+}
+
+ec_fd_t * ec_fd_get(fd_t * fd, xlator_t * xl)
+{
+ ec_fd_t * ctx = NULL;
+
+ LOCK(&fd->lock);
+
+ ctx = __ec_fd_get(fd, xl);
+
+ UNLOCK(&fd->lock);
+
+ return ctx;
+}
+
+size_t ec_adjust_offset(ec_t * ec, off_t * offset, int32_t scale)
+{
+ size_t head, tmp;
+
+ tmp = *offset;
+ head = tmp % ec->stripe_size;
+ tmp -= head;
+ if (scale)
+ {
+ tmp /= ec->fragments;
+ }
+
+ *offset = tmp;
+
+ return head;
+}
+
+size_t ec_adjust_size(ec_t * ec, size_t size, int32_t scale)
+{
+ size += ec->stripe_size - 1;
+ size -= size % ec->stripe_size;
+ if (scale)
+ {
+ size /= ec->fragments;
+ }
+
+ return size;
+}
diff --git a/xlators/cluster/ec/src/ec-helpers.h b/xlators/cluster/ec/src/ec-helpers.h
new file mode 100644
index 00000000000..6625ade4b08
--- /dev/null
+++ b/xlators/cluster/ec/src/ec-helpers.h
@@ -0,0 +1,59 @@
+/*
+ Copyright (c) 2012 DataLab, s.l. <http://www.datalab.es>
+
+ This file is part of the cluster/ec translator for GlusterFS.
+
+ The cluster/ec translator for GlusterFS is free software: you can
+ redistribute it and/or modify it under the terms of the GNU General
+ Public License as published by the Free Software Foundation, either
+ version 3 of the License, or (at your option) any later version.
+
+ The cluster/ec translator for GlusterFS is distributed in the hope
+ that it will be useful, but WITHOUT ANY WARRANTY; without even the
+ implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+ PURPOSE. See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with the cluster/ec translator for GlusterFS. If not, see
+ <http://www.gnu.org/licenses/>.
+*/
+
+#ifndef __EC_HELPERS_H__
+#define __EC_HELPERS_H__
+
+#include "ec-data.h"
+
+const char * ec_bin(char * str, size_t size, uint64_t value, int32_t digits);
+const char * ec_fop_name(int32_t id);
+void ec_trace(const char * event, ec_fop_data_t * fop, const char * fmt, ...);
+uint64_t ec_itransform(ec_t * ec, int32_t idx, uint64_t offset);
+uint64_t ec_deitransform(ec_t * ec, int32_t * idx, uint64_t offset);
+int32_t ec_bits_count(uint64_t n);
+int32_t ec_bits_index(uint64_t n);
+int32_t ec_bits_consume(uint64_t * n);
+size_t ec_iov_copy_to(void * dst, struct iovec * vector, int32_t count,
+ off_t offset, size_t size);
+
+int32_t ec_dict_set_number(dict_t * dict, char * key, uint64_t value);
+int32_t ec_dict_del_number(dict_t * dict, char * key, uint64_t * value);
+
+int32_t ec_loc_parent(xlator_t * xl, loc_t * loc, loc_t * parent,
+ char ** name);
+int32_t ec_loc_prepare(xlator_t * xl, loc_t * loc, inode_t * inode,
+ struct iatt * iatt);
+
+int32_t ec_loc_from_fd(xlator_t * xl, loc_t * loc, fd_t * fd);
+int32_t ec_loc_from_loc(xlator_t * xl, loc_t * dst, loc_t * src);
+
+void ec_owner_set(call_frame_t * frame, void * owner);
+void ec_owner_copy(call_frame_t * frame, gf_lkowner_t * owner);
+
+ec_inode_t * __ec_inode_get(inode_t * inode, xlator_t * xl);
+ec_inode_t * ec_inode_get(inode_t * inode, xlator_t * xl);
+ec_fd_t * __ec_fd_get(fd_t * fd, xlator_t * xl);
+ec_fd_t * ec_fd_get(fd_t * fd, xlator_t * xl);
+
+size_t ec_adjust_offset(ec_t * ec, off_t * offset, int32_t scale);
+size_t ec_adjust_size(ec_t * ec, size_t size, int32_t scale);
+
+#endif /* __EC_HELPERS_H__ */
diff --git a/xlators/cluster/ec/src/ec-inode-read.c b/xlators/cluster/ec/src/ec-inode-read.c
new file mode 100644
index 00000000000..b1db9c9fbb7
--- /dev/null
+++ b/xlators/cluster/ec/src/ec-inode-read.c
@@ -0,0 +1,1764 @@
+
+/*
+ Copyright (c) 2012 DataLab, s.l. <http://www.datalab.es>
+
+ This file is part of the cluster/ec translator for GlusterFS.
+
+ The cluster/ec translator for GlusterFS is free software: you can
+ redistribute it and/or modify it under the terms of the GNU General
+ Public License as published by the Free Software Foundation, either
+ version 3 of the License, or (at your option) any later version.
+
+ The cluster/ec translator for GlusterFS is distributed in the hope
+ that it will be useful, but WITHOUT ANY WARRANTY; without even the
+ implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+ PURPOSE. See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with the cluster/ec translator for GlusterFS. If not, see
+ <http://www.gnu.org/licenses/>.
+*/
+
+#include "xlator.h"
+#include "defaults.h"
+
+#include "ec-helpers.h"
+#include "ec-common.h"
+#include "ec-combine.h"
+#include "ec-method.h"
+#include "ec-fops.h"
+
+/* FOP: access */
+
+int32_t ec_access_cbk(call_frame_t * frame, void * cookie, xlator_t * this,
+ int32_t op_ret, int32_t op_errno, dict_t * xdata)
+{
+ ec_fop_data_t * fop = NULL;
+ int32_t idx = (int32_t)(uintptr_t)cookie;
+
+ VALIDATE_OR_GOTO(this, out);
+ GF_VALIDATE_OR_GOTO(this->name, frame, out);
+ GF_VALIDATE_OR_GOTO(this->name, frame->local, out);
+ GF_VALIDATE_OR_GOTO(this->name, this->private, out);
+
+ fop = frame->local;
+
+ ec_trace("CBK", fop, "idx=%d, frame=%p, op_ret=%d, op_errno=%d", idx,
+ frame, op_ret, op_errno);
+
+ if (!ec_dispatch_one_retry(fop, idx, op_ret, op_errno))
+ {
+ if (fop->cbks.access != NULL)
+ {
+ fop->cbks.access(fop->req_frame, fop, this, op_ret, op_errno,
+ xdata);
+ }
+ }
+
+out:
+ if (fop != NULL)
+ {
+ ec_complete(fop);
+ }
+
+ return 0;
+}
+
+void ec_wind_access(ec_t * ec, ec_fop_data_t * fop, int32_t idx)
+{
+ ec_trace("WIND", fop, "idx=%d", idx);
+
+ STACK_WIND_COOKIE(fop->frame, ec_access_cbk, (void *)(uintptr_t)idx,
+ ec->xl_list[idx], ec->xl_list[idx]->fops->access,
+ &fop->loc[0], fop->int32, fop->xdata);
+}
+
+int32_t ec_manager_access(ec_fop_data_t * fop, int32_t state)
+{
+ switch (state)
+ {
+ case EC_STATE_INIT:
+ case EC_STATE_DISPATCH:
+ ec_dispatch_one(fop);
+
+ return EC_STATE_PREPARE_ANSWER;
+
+ case -EC_STATE_REPORT:
+ if (fop->cbks.access != NULL)
+ {
+ fop->cbks.access(fop->req_frame, fop, fop->xl, -1, fop->error,
+ NULL);
+ }
+
+ case EC_STATE_REPORT:
+ return EC_STATE_END;
+
+ default:
+ gf_log(fop->xl->name, GF_LOG_ERROR, "Unhandled state %d for %s",
+ state, ec_fop_name(fop->id));
+
+ return EC_STATE_END;
+ }
+}
+
+void ec_access(call_frame_t * frame, xlator_t * this, uintptr_t target,
+ int32_t minimum, fop_access_cbk_t func, void * data,
+ loc_t * loc, int32_t mask, dict_t * xdata)
+{
+ ec_cbk_t callback = { .access = func };
+ ec_fop_data_t * fop = NULL;
+ int32_t error = EIO;
+
+ gf_log("ec", GF_LOG_TRACE, "EC(ACCESS) %p", frame);
+
+ VALIDATE_OR_GOTO(this, out);
+ GF_VALIDATE_OR_GOTO(this->name, frame, out);
+ GF_VALIDATE_OR_GOTO(this->name, this->private, out);
+
+ fop = ec_fop_data_allocate(frame, this, GF_FOP_ACCESS, 0, target, minimum,
+ ec_wind_access, ec_manager_access, callback,
+ data);
+ if (fop == NULL)
+ {
+ goto out;
+ }
+
+ fop->int32 = mask;
+
+ if (loc != NULL)
+ {
+ if (loc_copy(&fop->loc[0], loc) != 0)
+ {
+ gf_log(this->name, GF_LOG_ERROR, "Failed to copy a location.");
+
+ goto out;
+ }
+ }
+ if (xdata != NULL)
+ {
+ fop->xdata = dict_ref(xdata);
+ if (fop->xdata == NULL)
+ {
+ gf_log(this->name, GF