kumquat-buildroot/package/rocksdb/0002-POWER7-8-compatiblity-fix-CRC32-C-implementation.patch

3788 lines
137 KiB
Diff
Raw Normal View History

From 7eab5c3bf8449673de7bf0d3446a669694f71f1a Mon Sep 17 00:00:00 2001
From: Daniel Black <daniel@linux.ibm.com>
Date: Thu, 16 Apr 2020 12:23:02 +1000
Subject: [PATCH] POWER7/8 compatiblity fix + CRC32 C implementation
When running on a POWER7/8, the compiler will successfully
report being able to compile for POWER9 compatibility. It will
however fail to run due to POWER9+ only instructions being used.
To preserve the goals of ensuring an optimization for the
local hardware, -mcpu=native is used, but only if we don't
already have C{,XX}_FLAGS with cpu/arch/tune set. PORTABLE
flag that was added for x86 is reused to ensure that the minimum
viable cpu is selected, POWER7 if compiling on big endian and
POWER8 if on little endian.
This ensures that if a binary distributor compiles on a
POWER9, and aims to support POWER8 packages, then hopefully
they will have the correct C{,XX}_FLAGS, and as such, will
not result in a broken package on POWER8.
Fixes: 8fc20ac468b266a53083175025375985ec04b796
We also replace POWER crc32c with C implementation.
The presence of clang compile failures on POWER due to
missing ppc-asm.h prompted the replacement of the ASM
CRC32 implementation with the C implementation.
We include this when the compile flags enable it to be build.
For example on POWER7 the optimized crc32 will be built,
however it won't be used because of the runtime detection.
If the executable is moved to a POWER8 big endian it will
work correctly.
https://github.com/antonblanchard/crc32-vpmsum/blob/master/vec_crc32.c
is used with only small include path change with the local copyright header
maintained.
util/crc32c.cc removes arch_ppc_crc32 which was only ever
used in a local context. Also incorporated significant advice from
tchaikov in #2869. Replace all compile time checks with runtime checks.
Corrects getauxval detection from 8bbd76edbf by including the header from the
right directory.
[Retrieved (and slightly updated for 6.13.3) from:
https://github.com/facebook/rocksdb/pull/7079/commits/7eab5c3bf8449673de7bf0d3446a669694f71f1a]
Signed-off-by: Fabrice Fontaine <fontaine.fabrice@gmail.com>
---
CMakeLists.txt | 59 +-
Makefile | 35 +-
src.mk | 11 +-
util/crc32c.cc | 63 +-
util/crc32c_ppc.c | 94 --
util/crc32c_ppc.cc | 662 +++++++++++++
util/crc32c_ppc.h | 19 -
util/crc32c_ppc_asm.S | 752 --------------
util/crc32c_ppc_clang_workaround.h | 85 ++
util/crc32c_ppc_constants.h | 1457 +++++++++++++++++-----------
util/crc32c_test.cc | 3 +
11 files changed, 1684 insertions(+), 1556 deletions(-)
delete mode 100644 util/crc32c_ppc.c
create mode 100644 util/crc32c_ppc.cc
delete mode 100644 util/crc32c_ppc.h
delete mode 100644 util/crc32c_ppc_asm.S
create mode 100644 util/crc32c_ppc_clang_workaround.h
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 91647d83bb..5adf0cf3a5 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -39,7 +39,7 @@ include(ReadVersion)
get_rocksdb_version(rocksdb_VERSION)
project(rocksdb
VERSION ${rocksdb_VERSION}
- LANGUAGES CXX C ASM)
+ LANGUAGES CXX C)
if(POLICY CMP0042)
cmake_policy(SET CMP0042 NEW)
@@ -223,26 +223,6 @@ else()
endif()
include(CheckCCompilerFlag)
-if(CMAKE_SYSTEM_PROCESSOR MATCHES "^(powerpc|ppc)64")
- CHECK_C_COMPILER_FLAG("-mcpu=power9" HAS_POWER9)
- if(HAS_POWER9)
- set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mcpu=power9 -mtune=power9")
- set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mcpu=power9 -mtune=power9")
- else()
- CHECK_C_COMPILER_FLAG("-mcpu=power8" HAS_POWER8)
- if(HAS_POWER8)
- set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mcpu=power8 -mtune=power8")
- set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mcpu=power8 -mtune=power8")
- endif(HAS_POWER8)
- endif(HAS_POWER9)
- CHECK_C_COMPILER_FLAG("-maltivec" HAS_ALTIVEC)
- if(HAS_ALTIVEC)
- message(STATUS " HAS_ALTIVEC yes")
- set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -maltivec")
- set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -maltivec")
- endif(HAS_ALTIVEC)
-endif(CMAKE_SYSTEM_PROCESSOR MATCHES "^(powerpc|ppc)64")
-
if(CMAKE_SYSTEM_PROCESSOR MATCHES "aarch64|AARCH64")
CHECK_C_COMPILER_FLAG("-march=armv8-a+crc+crypto" HAS_ARMV8_CRC)
if(HAS_ARMV8_CRC)
@@ -260,21 +240,40 @@ if(PORTABLE)
if(FORCE_SSE42 AND NOT MSVC)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -msse4.2 -mpclmul")
endif()
+ if(CMAKE_SYSTEM_PROCESSOR MATCHES "^(powerpc|ppc)64")
+ if(CMAKE_SYSTEM_PROCESSOR MATCHES "^(powerpc|ppc)64le")
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mcpu=power8")
+ else()
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mcpu=power7")
+ endif()
+ endif()
else()
if(MSVC)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /arch:AVX2")
else()
- if(NOT CMAKE_SYSTEM_PROCESSOR MATCHES "^(powerpc|ppc)64" AND NOT HAS_ARMV8_CRC)
- set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -march=native")
+ if(NOT CMAKE_CXX_FLAGS MATCHES "m(cpu|tune|arch)")
+ if(CMAKE_SYSTEM_PROCESSOR MATCHES "^(powerpc|ppc)64")
+ # Power doesn't have a march option - https://gcc.gnu.org/onlinedocs/gcc/RS_002f6000-and-PowerPC-Options.html
+ # -mcpu activates all available options
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mcpu=native")
+ else()
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -march=native")
+ endif()
endif()
endif()
endif()
-include(CheckCXXSourceCompiles)
+if(CMAKE_SYSTEM_PROCESSOR MATCHES "^(powerpc|ppc)64")
+ include(CheckCXXCompilerFlag)
+ SET(POWER_CRC32_FLAGS "-mcrypto -mpower8-vector -maltivec -mvsx")
+ CHECK_CXX_COMPILER_FLAG("${POWER_CRC32_FLAGS}" HAS_POWER_CRC32_FLAGS)
+endif()
+
if(NOT MSVC)
set(CMAKE_REQUIRED_FLAGS "-msse4.2 -mpclmul")
endif()
+include(CheckCXXSourceCompiles)
CHECK_CXX_SOURCE_COMPILES("
#include <cstdint>
#include <nmmintrin.h>
@@ -525,7 +524,7 @@ if(HAVE_SCHED_GETCPU)
add_definitions(-DROCKSDB_SCHED_GETCPU_PRESENT)
endif()
-check_cxx_symbol_exists(getauxval auvx.h HAVE_AUXV_GETAUXVAL)
+check_cxx_symbol_exists(getauxval sys/auxv.h HAVE_AUXV_GETAUXVAL)
if(HAVE_AUXV_GETAUXVAL)
add_definitions(-DROCKSDB_AUXV_GETAUXVAL_PRESENT)
endif()
@@ -796,11 +795,13 @@ if(HAVE_SSE42 AND NOT MSVC)
PROPERTIES COMPILE_FLAGS "-msse4.2 -mpclmul")
endif()
-if(CMAKE_SYSTEM_PROCESSOR MATCHES "^(powerpc|ppc)64")
+if(HAS_POWER_CRC32_FLAGS)
list(APPEND SOURCES
- util/crc32c_ppc.c
- util/crc32c_ppc_asm.S)
-endif(CMAKE_SYSTEM_PROCESSOR MATCHES "^(powerpc|ppc)64")
+ util/crc32c_ppc.cc)
+ set_source_files_properties(
+ util/crc32c_ppc.cc
+ PROPERTIES COMPILE_FLAGS "${POWER_CRC32_FLAGS}")
+endif()
if(HAS_ARMV8_CRC)
list(APPEND SOURCES
diff --git a/Makefile b/Makefile
index a19d2ff964..7da8f71085 100644
--- a/Makefile
+++ b/Makefile
@@ -144,16 +144,9 @@ OPT += -momit-leaf-frame-pointer
endif
endif
-ifeq (,$(shell $(CXX) -fsyntax-only -maltivec -xc /dev/null 2>&1))
-CXXFLAGS += -DHAS_ALTIVEC
-CFLAGS += -DHAS_ALTIVEC
-HAS_ALTIVEC=1
-endif
-
-ifeq (,$(shell $(CXX) -fsyntax-only -mcpu=power8 -xc /dev/null 2>&1))
-CXXFLAGS += -DHAVE_POWER8
-CFLAGS += -DHAVE_POWER8
-HAVE_POWER8=1
+ifeq (,$(shell $(CXX) -fsyntax-only -mcrypto -mpower8-vector -maltivec -mvsx -xc /dev/null 2>&1))
+POWER_CRC32_FLAGS=-mcrypto -mpower8-vector -maltivec -mvsx
+HAVE_POWER_CRC32_FLAGS=1
endif
ifeq (,$(shell $(CXX) -fsyntax-only -march=armv8-a+crc+crypto -xc /dev/null 2>&1))
@@ -461,10 +454,6 @@ endif
OBJ_DIR?=.
LIB_OBJECTS = $(patsubst %.cc, $(OBJ_DIR)/%.o, $(LIB_SOURCES))
-ifeq ($(HAVE_POWER8),1)
-LIB_OBJECTS += $(patsubst %.c, $(OBJ_DIR)/%.o, $(LIB_SOURCES_C))
-LIB_OBJECTS += $(patsubst %.S, $(OBJ_DIR)/%.o, $(LIB_SOURCES_ASM))
-endif
ifeq ($(USE_FOLLY_DISTRIBUTED_MUTEX),1)
LIB_OBJECTS += $(patsubst %.cpp, $(OBJ_DIR)/%.o, $(FOLLY_SOURCES))
@@ -2254,13 +2242,12 @@ IOSVERSION=$(shell defaults read $(PLATFORMSROOT)/iPhoneOS.platform/version CFBu
lipo ios-x86/$@ ios-arm/$@ -create -output $@
else
-ifeq ($(HAVE_POWER8),1)
-$(OBJ_DIR)/util/crc32c_ppc.o: util/crc32c_ppc.c
- $(AM_V_CC)$(CC) $(CFLAGS) -c $< -o $@
+ifeq ($(HAVE_POWER_CRC32_FLAGS),1)
+$(OBJ_DIR)/util/crc32c_ppc.o: util/crc32c_ppc.cc
+ $(AM_V_CC)$(CC) $(CFLAGS) $(POWER_CRC32_FLAGS) -c $< -o $@
-$(OBJ_DIR)/util/crc32c_ppc_asm.o: util/crc32c_ppc_asm.S
- $(AM_V_CC)$(CC) $(CFLAGS) -c $< -o $@
endif
+
$(OBJ_DIR)/%.o: %.cc
$(AM_V_CC)mkdir -p $(@D) && $(CXX) $(CXXFLAGS) -c $< -o $@ $(COVERAGEFLAGS)
diff --git a/src.mk b/src.mk
index c4fec346e4..0f6a19a17c 100644
--- a/src.mk
+++ b/src.mk
@@ -255,14 +255,9 @@ LIB_SOURCES +=\
util/crc32c_arm64.cc
endif
-ifeq (,$(shell $(CXX) -fsyntax-only -maltivec -xc /dev/null 2>&1))
-LIB_SOURCES_ASM =\
- util/crc32c_ppc_asm.S
-LIB_SOURCES_C = \
- util/crc32c_ppc.c
-else
-LIB_SOURCES_ASM =
-LIB_SOURCES_C =
+ifeq ($(HAVE_POWER_CRC32_FLAGS),1)
+LIB_SOURCES += \
+ util/crc32c_ppc.cc
endif
TOOL_LIB_SOURCES = \
diff --git a/util/crc32c.cc b/util/crc32c.cc
index a709e9b1ce..b24a4f0e0f 100644
--- a/util/crc32c.cc
+++ b/util/crc32c.cc
@@ -20,15 +20,13 @@
#include "util/crc32c_arm64.h"
-#ifdef __powerpc64__
-#include "util/crc32c_ppc.h"
-#include "util/crc32c_ppc_constants.h"
-
-#if __linux__
#ifdef ROCKSDB_AUXV_GETAUXVAL_PRESENT
#include <sys/auxv.h>
#endif
+#ifdef __powerpc64__
+extern uint32_t crc32c_ppc(uint32_t crc, unsigned char const* buffer,
+ unsigned long len);
#ifndef PPC_FEATURE2_VEC_CRYPTO
#define PPC_FEATURE2_VEC_CRYPTO 0x02000000
#endif
@@ -37,19 +35,11 @@
#define AT_HWCAP2 26
#endif
-#endif /* __linux__ */
-
#endif
namespace ROCKSDB_NAMESPACE {
namespace crc32c {
-#if defined(HAVE_POWER8) && defined(HAS_ALTIVEC)
-#ifdef __powerpc64__
-static int arch_ppc_crc32 = 0;
-#endif /* __powerpc64__ */
-#endif
-
static const uint32_t table0_[256] = {
0x00000000, 0xf26b8303, 0xe13b70f7, 0x1350f3f4,
0xc79a971f, 0x35f1141c, 0x26a1e7e8, 0xd4ca64eb,
@@ -342,6 +332,7 @@ static inline void Slow_CRC32(uint64_t* l, uint8_t const **p) {
table0_[c >> 24];
}
+#if !defined(HAVE_ARM64_CRC) && !defined(__powerpc64__)
static inline void Fast_CRC32(uint64_t* l, uint8_t const **p) {
#ifndef HAVE_SSE42
Slow_CRC32(l, p);
@@ -355,6 +346,7 @@ static inline void Fast_CRC32(uint64_t* l, uint8_t const **p) {
*p += 4;
#endif
}
+#endif
template<void (*CRC32)(uint64_t*, uint8_t const**)>
uint32_t ExtendImpl(uint32_t crc, const char* buf, size_t size) {
@@ -400,10 +392,8 @@ uint32_t ExtendImpl(uint32_t crc, const char* buf, size_t size) {
return static_cast<uint32_t>(l ^ 0xffffffffu);
}
-// Detect if ARM64 CRC or not.
-#ifndef HAVE_ARM64_CRC
+#if !defined(HAVE_ARM64_CRC) && !defined(__powerpc64__)
// Detect if SS42 or not.
-#ifndef HAVE_POWER8
static bool isSSE42() {
#ifndef HAVE_SSE42
@@ -439,36 +429,22 @@ static bool isPCLMULQDQ() {
#endif
}
-#endif // HAVE_POWER8
-#endif // HAVE_ARM64_CRC
+#endif // !__power64__ && !HAVE_ARM64_CRC
typedef uint32_t (*Function)(uint32_t, const char*, size_t);
-#if defined(HAVE_POWER8) && defined(HAS_ALTIVEC)
+#if defined(__powerpc64__)
uint32_t ExtendPPCImpl(uint32_t crc, const char *buf, size_t size) {
return crc32c_ppc(crc, (const unsigned char *)buf, size);
}
-#if __linux__
-static int arch_ppc_probe(void) {
- arch_ppc_crc32 = 0;
-
-#if defined(__powerpc64__) && defined(ROCKSDB_AUXV_GETAUXVAL_PRESENT)
- if (getauxval(AT_HWCAP2) & PPC_FEATURE2_VEC_CRYPTO) arch_ppc_crc32 = 1;
-#endif /* __powerpc64__ */
-
- return arch_ppc_crc32;
-}
-#endif // __linux__
-
static bool isAltiVec() {
- if (arch_ppc_probe()) {
- return true;
- } else {
- return false;
- }
-}
+#if defined(__linux__) && defined(ROCKSDB_AUXV_GETAUXVAL_PRESENT)
+ if (getauxval(AT_HWCAP2) & PPC_FEATURE2_VEC_CRYPTO) return true;
#endif
+ return false;
+}
+#endif // __power64__:
#if defined(__linux__) && defined(HAVE_ARM64_CRC)
uint32_t ExtendARMImpl(uint32_t crc, const char *buf, size_t size) {
@@ -480,16 +456,9 @@ std::string IsFastCrc32Supported() {
bool has_fast_crc = false;
std::string fast_zero_msg;
std::string arch;
-#ifdef HAVE_POWER8
-#ifdef HAS_ALTIVEC
- if (arch_ppc_probe()) {
- has_fast_crc = true;
- arch = "PPC";
- }
-#else
- has_fast_crc = false;
+#ifdef __powerpc64__
+ has_fast_crc = isAltiVec();
arch = "PPC";
-#endif
#elif defined(__linux__) && defined(HAVE_ARM64_CRC)
if (crc32c_runtime_check()) {
has_fast_crc = true;
@@ -1220,7 +1189,7 @@ uint32_t crc32c_3way(uint32_t crc, const char* buf, size_t len) {
#endif //HAVE_SSE42 && HAVE_PCLMUL
static inline Function Choose_Extend() {
-#ifdef HAVE_POWER8
+#ifdef __powerpc64__
return isAltiVec() ? ExtendPPCImpl : ExtendImpl<Slow_CRC32>;
#elif defined(__linux__) && defined(HAVE_ARM64_CRC)
if(crc32c_runtime_check()) {
diff --git a/util/crc32c_ppc.c b/util/crc32c_ppc.c
deleted file mode 100644
index 888a4943ea..0000000000
--- a/util/crc32c_ppc.c
+++ /dev/null
@@ -1,94 +0,0 @@
-// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
-// Copyright (c) 2017 International Business Machines Corp.
-// All rights reserved.
-// This source code is licensed under both the GPLv2 (found in the
-// COPYING file in the root directory) and Apache 2.0 License
-// (found in the LICENSE.Apache file in the root directory).
-
-#define CRC_TABLE
-#include <stdint.h>
-#include <stdlib.h>
-#include <strings.h>
-#include "util/crc32c_ppc_constants.h"
-
-#define VMX_ALIGN 16
-#define VMX_ALIGN_MASK (VMX_ALIGN - 1)
-
-#ifdef REFLECT
-static unsigned int crc32_align(unsigned int crc, unsigned char const *p,
- unsigned long len) {
- while (len--) crc = crc_table[(crc ^ *p++) & 0xff] ^ (crc >> 8);
- return crc;
-}
-#endif
-
-#ifdef HAVE_POWER8
-unsigned int __crc32_vpmsum(unsigned int crc, unsigned char const *p,
- unsigned long len);
-
-static uint32_t crc32_vpmsum(uint32_t crc, unsigned char const *data,
- unsigned len) {
- unsigned int prealign;
- unsigned int tail;
-
-#ifdef CRC_XOR
- crc ^= 0xffffffff;
-#endif
-
- if (len < VMX_ALIGN + VMX_ALIGN_MASK) {
- crc = crc32_align(crc, data, (unsigned long)len);
- goto out;
- }
-
- if ((unsigned long)data & VMX_ALIGN_MASK) {
- prealign = VMX_ALIGN - ((unsigned long)data & VMX_ALIGN_MASK);
- crc = crc32_align(crc, data, prealign);
- len -= prealign;
- data += prealign;
- }
-
- crc = __crc32_vpmsum(crc, data, (unsigned long)len & ~VMX_ALIGN_MASK);
-
- tail = len & VMX_ALIGN_MASK;
- if (tail) {
- data += len & ~VMX_ALIGN_MASK;
- crc = crc32_align(crc, data, tail);
- }
-
-out:
-#ifdef CRC_XOR
- crc ^= 0xffffffff;
-#endif
-
- return crc;
-}
-
-/* This wrapper function works around the fact that crc32_vpmsum
- * does not gracefully handle the case where the data pointer is NULL. There
- * may be room for performance improvement here.
- */
-uint32_t crc32c_ppc(uint32_t crc, unsigned char const *data, unsigned len) {
- unsigned char *buf2;
-
- if (!data) {
- buf2 = (unsigned char *)malloc(len);
- bzero(buf2, len);
- crc = crc32_vpmsum(crc, buf2, len);
- free(buf2);
- } else {
- crc = crc32_vpmsum(crc, data, (unsigned long)len);
- }
- return crc;
-}
-
-#else /* HAVE_POWER8 */
-
-/* This symbol has to exist on non-ppc architectures (and on legacy
- * ppc systems using power7 or below) in order to compile properly
- * there, even though it won't be called.
- */
-uint32_t crc32c_ppc(uint32_t crc, unsigned char const *data, unsigned len) {
- return 0;
-}
-
-#endif /* HAVE_POWER8 */
diff --git a/util/crc32c_ppc.cc b/util/crc32c_ppc.cc
new file mode 100644
index 0000000000..44487eb80b
--- /dev/null
+++ b/util/crc32c_ppc.cc
@@ -0,0 +1,662 @@
+/*
+ * Calculate the checksum of data that is 16 byte aligned and a multiple of
+ * 16 bytes.
+ *
+ * The first step is to reduce it to 1024 bits. We do this in 8 parallel
+ * chunks in order to mask the latency of the vpmsum instructions. If we
+ * have more than 32 kB of data to checksum we repeat this step multiple
+ * times, passing in the previous 1024 bits.
+ *
+ * The next step is to reduce the 1024 bits to 64 bits. This step adds
+ * 32 bits of 0s to the end - this matches what a CRC does. We just
+ * calculate constants that land the data in this 32 bits.
+ *
+ * We then use fixed point Barrett reduction to compute a mod n over GF(2)
+ * for n = CRC using POWER8 instructions. We use x = 32.
+ *
+ * http://en.wikipedia.org/wiki/Barrett_reduction
+ *
+ * This code uses gcc vector builtins instead using assembly directly.
+ *
+ * Copyright (C) 2017 Rogerio Alves <rogealve@br.ibm.com>, IBM
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of either:
+ *
+ * a) the GNU General Public License as published by the Free Software
+ * Foundation; either version 2 of the License, or (at your option)
+ * any later version, or
+ * b) the Apache License, Version 2.0
+ */
+
+#include <altivec.h>
+
+#define POWER8_INTRINSICS
+#define CRC_TABLE
+
+#include "crc32c_ppc_constants.h"
+
+#define VMX_ALIGN 16
+#define VMX_ALIGN_MASK (VMX_ALIGN - 1)
+
+#ifdef REFLECT
+static unsigned int crc32_align(unsigned int crc, const unsigned char *p,
+ unsigned long len) {
+ while (len--) crc = crc_table[(crc ^ *p++) & 0xff] ^ (crc >> 8);
+ return crc;
+}
+#else
+static unsigned int crc32_align(unsigned int crc, const unsigned char *p,
+ unsigned long len) {
+ while (len--) crc = crc_table[((crc >> 24) ^ *p++) & 0xff] ^ (crc << 8);
+ return crc;
+}
+#endif
+
+static unsigned int __attribute__((aligned(32)))
+__crc32_vpmsum(unsigned int crc, const void *p, unsigned long len);
+
+#ifndef CRC32_FUNCTION
+#define CRC32_FUNCTION crc32c_ppc
+#endif
+
+unsigned int CRC32_FUNCTION(unsigned int crc, const unsigned char *p,
+ unsigned long len) {
+ unsigned int prealign;
+ unsigned int tail;
+
+#ifdef CRC_XOR
+ crc ^= 0xffffffff;
+#endif
+
+ if (len < VMX_ALIGN + VMX_ALIGN_MASK) {
+ crc = crc32_align(crc, p, len);
+ goto out;
+ }
+
+ if ((unsigned long)p & VMX_ALIGN_MASK) {
+ prealign = VMX_ALIGN - ((unsigned long)p & VMX_ALIGN_MASK);
+ crc = crc32_align(crc, p, prealign);
+ len -= prealign;
+ p += prealign;
+ }
+
+ crc = __crc32_vpmsum(crc, p, len & ~VMX_ALIGN_MASK);
+
+ tail = len & VMX_ALIGN_MASK;
+ if (tail) {
+ p += len & ~VMX_ALIGN_MASK;
+ crc = crc32_align(crc, p, tail);
+ }
+
+out:
+#ifdef CRC_XOR
+ crc ^= 0xffffffff;
+#endif
+
+ return crc;
+}
+
+#if defined(__clang__)
+#include "crc32c_ppc_clang_workaround.h"
+#else
+#define __builtin_pack_vector(a, b) __builtin_pack_vector_int128((a), (b))
+#define __builtin_unpack_vector_0(a) \
+ __builtin_unpack_vector_int128((vector __int128_t)(a), 0)
+#define __builtin_unpack_vector_1(a) \
+ __builtin_unpack_vector_int128((vector __int128_t)(a), 1)
+#endif
+
+/* When we have a load-store in a single-dispatch group and address overlap
+ * such that foward is not allowed (load-hit-store) the group must be flushed.
+ * A group ending NOP prevents the flush.
+ */
+#define GROUP_ENDING_NOP asm("ori 2,2,0" ::: "memory")
+
+#if defined(__BIG_ENDIAN__) && defined(REFLECT)
+#define BYTESWAP_DATA
+#elif defined(__LITTLE_ENDIAN__) && !defined(REFLECT)
+#define BYTESWAP_DATA
+#endif
+
+#ifdef BYTESWAP_DATA
+#define VEC_PERM(vr, va, vb, vc) \
+ vr = vec_perm(va, vb, (__vector unsigned char)vc)
+#if defined(__LITTLE_ENDIAN__)
+/* Byte reverse permute constant LE. */
+static const __vector unsigned long long vperm_const
+ __attribute__((aligned(16))) = {0x08090A0B0C0D0E0FUL, 0x0001020304050607UL};
+#else
+static const __vector unsigned long long vperm_const
+ __attribute__((aligned(16))) = {0x0F0E0D0C0B0A0908UL, 0X0706050403020100UL};
+#endif
+#else
+#define VEC_PERM(vr, va, vb, vc)
+#endif
+
+static unsigned int __attribute__((aligned(32)))
+__crc32_vpmsum(unsigned int crc, const void *p, unsigned long len) {
+ const __vector unsigned long long vzero = {0, 0};
+ const __vector unsigned long long vones = {0xffffffffffffffffUL,
+ 0xffffffffffffffffUL};
+
+#ifdef REFLECT
+ const __vector unsigned long long vmask_32bit =
+ (__vector unsigned long long)vec_sld((__vector unsigned char)vzero,
+ (__vector unsigned char)vones, 4);
+#endif
+
+ const __vector unsigned long long vmask_64bit =
+ (__vector unsigned long long)vec_sld((__vector unsigned char)vzero,
+ (__vector unsigned char)vones, 8);
+
+ __vector unsigned long long vcrc;
+
+ __vector unsigned long long vconst1, vconst2;
+
+ /* vdata0-vdata7 will contain our data (p). */
+ __vector unsigned long long vdata0, vdata1, vdata2, vdata3, vdata4, vdata5,
+ vdata6, vdata7;
+
+ /* v0-v7 will contain our checksums */
+ __vector unsigned long long v0 = {0, 0};
+ __vector unsigned long long v1 = {0, 0};
+ __vector unsigned long long v2 = {0, 0};
+ __vector unsigned long long v3 = {0, 0};
+ __vector unsigned long long v4 = {0, 0};
+ __vector unsigned long long v5 = {0, 0};
+ __vector unsigned long long v6 = {0, 0};
+ __vector unsigned long long v7 = {0, 0};
+
+ /* Vector auxiliary variables. */
+ __vector unsigned long long va0, va1, va2, va3, va4, va5, va6, va7;
+
+ unsigned int result = 0;
+ unsigned int offset; /* Constant table offset. */
+
+ unsigned long i; /* Counter. */
+ unsigned long chunks;
+
+ unsigned long block_size;
+ int next_block = 0;
+
+ /* Align by 128 bits. The last 128 bit block will be processed at end. */
+ unsigned long length = len & 0xFFFFFFFFFFFFFF80UL;
+
+#ifdef REFLECT
+ vcrc = (__vector unsigned long long)__builtin_pack_vector(0UL, crc);
+#else
+ vcrc = (__vector unsigned long long)__builtin_pack_vector(crc, 0UL);
+
+ /* Shift into top 32 bits */
+ vcrc = (__vector unsigned long long)vec_sld((__vector unsigned char)vcrc,
+ (__vector unsigned char)vzero, 4);
+#endif
+
+ /* Short version. */
+ if (len < 256) {
+ /* Calculate where in the constant table we need to start. */
+ offset = 256 - len;
+
+ vconst1 = vec_ld(offset, vcrc_short_const);
+ vdata0 = vec_ld(0, (__vector unsigned long long *)p);
+ VEC_PERM(vdata0, vdata0, vconst1, vperm_const);
+
+ /* xor initial value*/
+ vdata0 = vec_xor(vdata0, vcrc);
+
+ vdata0 = (__vector unsigned long long)__builtin_crypto_vpmsumw(
+ (__vector unsigned int)vdata0, (__vector unsigned int)vconst1);
+ v0 = vec_xor(v0, vdata0);
+
+ for (i = 16; i < len; i += 16) {
+ vconst1 = vec_ld(offset + i, vcrc_short_const);
+ vdata0 = vec_ld(i, (__vector unsigned long long *)p);
+ VEC_PERM(vdata0, vdata0, vconst1, vperm_const);
+ vdata0 = (__vector unsigned long long)__builtin_crypto_vpmsumw(
+ (__vector unsigned int)vdata0, (__vector unsigned int)vconst1);
+ v0 = vec_xor(v0, vdata0);
+ }
+ } else {
+ /* Load initial values. */
+ vdata0 = vec_ld(0, (__vector unsigned long long *)p);
+ vdata1 = vec_ld(16, (__vector unsigned long long *)p);
+
+ VEC_PERM(vdata0, vdata0, vdata0, vperm_const);
+ VEC_PERM(vdata1, vdata1, vdata1, vperm_const);
+
+ vdata2 = vec_ld(32, (__vector unsigned long long *)p);
+ vdata3 = vec_ld(48, (__vector unsigned long long *)p);
+
+ VEC_PERM(vdata2, vdata2, vdata2, vperm_const);
+ VEC_PERM(vdata3, vdata3, vdata3, vperm_const);
+
+ vdata4 = vec_ld(64, (__vector unsigned long long *)p);
+ vdata5 = vec_ld(80, (__vector unsigned long long *)p);
+
+ VEC_PERM(vdata4, vdata4, vdata4, vperm_const);
+ VEC_PERM(vdata5, vdata5, vdata5, vperm_const);
+
+ vdata6 = vec_ld(96, (__vector unsigned long long *)p);
+ vdata7 = vec_ld(112, (__vector unsigned long long *)p);
+
+ VEC_PERM(vdata6, vdata6, vdata6, vperm_const);
+ VEC_PERM(vdata7, vdata7, vdata7, vperm_const);
+
+ /* xor in initial value */
+ vdata0 = vec_xor(vdata0, vcrc);
+
+ p = (char *)p + 128;
+
+ do {
+ /* Checksum in blocks of MAX_SIZE. */
+ block_size = length;
+ if (block_size > MAX_SIZE) {
+ block_size = MAX_SIZE;
+ }
+
+ length = length - block_size;
+
+ /*
+ * Work out the offset into the constants table to start at. Each
+ * constant is 16 bytes, and it is used against 128 bytes of input
+ * data - 128 / 16 = 8
+ */
+ offset = (MAX_SIZE / 8) - (block_size / 8);
+ /* We reduce our final 128 bytes in a separate step */
+ chunks = (block_size / 128) - 1;
+
+ vconst1 = vec_ld(offset, vcrc_const);
+
+ va0 = __builtin_crypto_vpmsumd((__vector unsigned long long)vdata0,
+ (__vector unsigned long long)vconst1);
+ va1 = __builtin_crypto_vpmsumd((__vector unsigned long long)vdata1,
+ (__vector unsigned long long)vconst1);
+ va2 = __builtin_crypto_vpmsumd((__vector unsigned long long)vdata2,
+ (__vector unsigned long long)vconst1);
+ va3 = __builtin_crypto_vpmsumd((__vector unsigned long long)vdata3,
+ (__vector unsigned long long)vconst1);
+ va4 = __builtin_crypto_vpmsumd((__vector unsigned long long)vdata4,
+ (__vector unsigned long long)vconst1);
+ va5 = __builtin_crypto_vpmsumd((__vector unsigned long long)vdata5,
+ (__vector unsigned long long)vconst1);
+ va6 = __builtin_crypto_vpmsumd((__vector unsigned long long)vdata6,
+ (__vector unsigned long long)vconst1);
+ va7 = __builtin_crypto_vpmsumd((__vector unsigned long long)vdata7,
+ (__vector unsigned long long)vconst1);
+
+ if (chunks > 1) {
+ offset += 16;
+ vconst2 = vec_ld(offset, vcrc_const);
+ GROUP_ENDING_NOP;
+
+ vdata0 = vec_ld(0, (__vector unsigned long long *)p);
+ VEC_PERM(vdata0, vdata0, vdata0, vperm_const);
+
+ vdata1 = vec_ld(16, (__vector unsigned long long *)p);
+ VEC_PERM(vdata1, vdata1, vdata1, vperm_const);
+
+ vdata2 = vec_ld(32, (__vector unsigned long long *)p);
+ VEC_PERM(vdata2, vdata2, vdata2, vperm_const);
+
+ vdata3 = vec_ld(48, (__vector unsigned long long *)p);
+ VEC_PERM(vdata3, vdata3, vdata3, vperm_const);
+
+ vdata4 = vec_ld(64, (__vector unsigned long long *)p);
+ VEC_PERM(vdata4, vdata4, vdata4, vperm_const);
+
+ vdata5 = vec_ld(80, (__vector unsigned long long *)p);
+ VEC_PERM(vdata5, vdata5, vdata5, vperm_const);
+
+ vdata6 = vec_ld(96, (__vector unsigned long long *)p);
+ VEC_PERM(vdata6, vdata6, vdata6, vperm_const);
+
+ vdata7 = vec_ld(112, (__vector unsigned long long *)p);
+ VEC_PERM(vdata7, vdata7, vdata7, vperm_const);
+
+ p = (char *)p + 128;
+
+ /*
+ * main loop. We modulo schedule it such that it takes three
+ * iterations to complete - first iteration load, second
+ * iteration vpmsum, third iteration xor.
+ */
+ for (i = 0; i < chunks - 2; i++) {
+ vconst1 = vec_ld(offset, vcrc_const);
+ offset += 16;
+ GROUP_ENDING_NOP;
+
+ v0 = vec_xor(v0, va0);
+ va0 = __builtin_crypto_vpmsumd((__vector unsigned long long)vdata0,
+ (__vector unsigned long long)vconst2);
+ vdata0 = vec_ld(0, (__vector unsigned long long *)p);
+ VEC_PERM(vdata0, vdata0, vdata0, vperm_const);
+ GROUP_ENDING_NOP;
+
+ v1 = vec_xor(v1, va1);
+ va1 = __builtin_crypto_vpmsumd((__vector unsigned long long)vdata1,
+ (__vector unsigned long long)vconst2);
+ vdata1 = vec_ld(16, (__vector unsigned long long *)p);
+ VEC_PERM(vdata1, vdata1, vdata1, vperm_const);
+ GROUP_ENDING_NOP;
+
+ v2 = vec_xor(v2, va2);
+ va2 = __builtin_crypto_vpmsumd((__vector unsigned long long)vdata2,
+ (__vector unsigned long long)vconst2);
+ vdata2 = vec_ld(32, (__vector unsigned long long *)p);
+ VEC_PERM(vdata2, vdata2, vdata2, vperm_const);
+ GROUP_ENDING_NOP;
+
+ v3 = vec_xor(v3, va3);
+ va3 = __builtin_crypto_vpmsumd((__vector unsigned long long)vdata3,
+ (__vector unsigned long long)vconst2);
+ vdata3 = vec_ld(48, (__vector unsigned long long *)p);
+ VEC_PERM(vdata3, vdata3, vdata3, vperm_const);
+
+ vconst2 = vec_ld(offset, vcrc_const);
+ GROUP_ENDING_NOP;
+
+ v4 = vec_xor(v4, va4);
+ va4 = __builtin_crypto_vpmsumd((__vector unsigned long long)vdata4,
+ (__vector unsigned long long)vconst1);
+ vdata4 = vec_ld(64, (__vector unsigned long long *)p);
+ VEC_PERM(vdata4, vdata4, vdata4, vperm_const);
+ GROUP_ENDING_NOP;
+
+ v5 = vec_xor(v5, va5);
+ va5 = __builtin_crypto_vpmsumd((__vector unsigned long long)vdata5,
+ (__vector unsigned long long)vconst1);
+ vdata5 = vec_ld(80, (__vector unsigned long long *)p);
+ VEC_PERM(vdata5, vdata5, vdata5, vperm_const);
+ GROUP_ENDING_NOP;
+
+ v6 = vec_xor(v6, va6);
+ va6 = __builtin_crypto_vpmsumd((__vector unsigned long long)vdata6,
+ (__vector unsigned long long)vconst1);
+ vdata6 = vec_ld(96, (__vector unsigned long long *)p);
+ VEC_PERM(vdata6, vdata6, vdata6, vperm_const);
+ GROUP_ENDING_NOP;
+
+ v7 = vec_xor(v7, va7);
+ va7 = __builtin_crypto_vpmsumd((__vector unsigned long long)vdata7,
+ (__vector unsigned long long)vconst1);
+ vdata7 = vec_ld(112, (__vector unsigned long long *)p);
+ VEC_PERM(vdata7, vdata7, vdata7, vperm_const);
+
+ p = (char *)p + 128;
+ }
+
+ /* First cool down*/
+ vconst1 = vec_ld(offset, vcrc_const);
+ offset += 16;
+
+ v0 = vec_xor(v0, va0);
+ va0 = __builtin_crypto_vpmsumd((__vector unsigned long long)vdata0,
+ (__vector unsigned long long)vconst1);
+ GROUP_ENDING_NOP;
+
+ v1 = vec_xor(v1, va1);
+ va1 = __builtin_crypto_vpmsumd((__vector unsigned long long)vdata1,
+ (__vector unsigned long long)vconst1);
+ GROUP_ENDING_NOP;
+
+ v2 = vec_xor(v2, va2);
+ va2 = __builtin_crypto_vpmsumd((__vector unsigned long long)vdata2,
+ (__vector unsigned long long)vconst1);
+ GROUP_ENDING_NOP;
+
+ v3 = vec_xor(v3, va3);
+ va3 = __builtin_crypto_vpmsumd((__vector unsigned long long)vdata3,
+ (__vector unsigned long long)vconst1);
+ GROUP_ENDING_NOP;
+
+ v4 = vec_xor(v4, va4);
+ va4 = __builtin_crypto_vpmsumd((__vector unsigned long long)vdata4,
+ (__vector unsigned long long)vconst1);
+ GROUP_ENDING_NOP;
+
+ v5 = vec_xor(v5, va5);
+ va5 = __builtin_crypto_vpmsumd((__vector unsigned long long)vdata5,
+ (__vector unsigned long long)vconst1);
+ GROUP_ENDING_NOP;
+
+ v6 = vec_xor(v6, va6);
+ va6 = __builtin_crypto_vpmsumd((__vector unsigned long long)vdata6,
+ (__vector unsigned long long)vconst1);
+ GROUP_ENDING_NOP;
+
+ v7 = vec_xor(v7, va7);
+ va7 = __builtin_crypto_vpmsumd((__vector unsigned long long)vdata7,
+ (__vector unsigned long long)vconst1);
+ } /* else */
+
+ /* Second cool down. */
+ v0 = vec_xor(v0, va0);
+ v1 = vec_xor(v1, va1);
+ v2 = vec_xor(v2, va2);
+ v3 = vec_xor(v3, va3);
+ v4 = vec_xor(v4, va4);
+ v5 = vec_xor(v5, va5);
+ v6 = vec_xor(v6, va6);
+ v7 = vec_xor(v7, va7);
+
+#ifdef REFLECT
+ /*
+ * vpmsumd produces a 96 bit result in the least significant bits
+ * of the register. Since we are bit reflected we have to shift it
+ * left 32 bits so it occupies the least significant bits in the
+ * bit reflected domain.
+ */
+ v0 = (__vector unsigned long long)vec_sld(
+ (__vector unsigned char)v0, (__vector unsigned char)vzero, 4);
+ v1 = (__vector unsigned long long)vec_sld(
+ (__vector unsigned char)v1, (__vector unsigned char)vzero, 4);
+ v2 = (__vector unsigned long long)vec_sld(
+ (__vector unsigned char)v2, (__vector unsigned char)vzero, 4);
+ v3 = (__vector unsigned long long)vec_sld(
+ (__vector unsigned char)v3, (__vector unsigned char)vzero, 4);
+ v4 = (__vector unsigned long long)vec_sld(
+ (__vector unsigned char)v4, (__vector unsigned char)vzero, 4);
+ v5 = (__vector unsigned long long)vec_sld(
+ (__vector unsigned char)v5, (__vector unsigned char)vzero, 4);
+ v6 = (__vector unsigned long long)vec_sld(
+ (__vector unsigned char)v6, (__vector unsigned char)vzero, 4);
+ v7 = (__vector unsigned long long)vec_sld(
+ (__vector unsigned char)v7, (__vector unsigned char)vzero, 4);
+#endif
+
+ /* xor with the last 1024 bits. */
+ va0 = vec_ld(0, (__vector unsigned long long *)p);
+ VEC_PERM(va0, va0, va0, vperm_const);
+
+ va1 = vec_ld(16, (__vector unsigned long long *)p);
+ VEC_PERM(va1, va1, va1, vperm_const);
+
+ va2 = vec_ld(32, (__vector unsigned long long *)p);
+ VEC_PERM(va2, va2, va2, vperm_const);
+
+ va3 = vec_ld(48, (__vector unsigned long long *)p);
+ VEC_PERM(va3, va3, va3, vperm_const);
+
+ va4 = vec_ld(64, (__vector unsigned long long *)p);
+ VEC_PERM(va4, va4, va4, vperm_const);
+
+ va5 = vec_ld(80, (__vector unsigned long long *)p);
+ VEC_PERM(va5, va5, va5, vperm_const);
+
+ va6 = vec_ld(96, (__vector unsigned long long *)p);
+ VEC_PERM(va6, va6, va6, vperm_const);
+
+ va7 = vec_ld(112, (__vector unsigned long long *)p);
+ VEC_PERM(va7, va7, va7, vperm_const);
+
+ p = (char *)p + 128;
+
+ vdata0 = vec_xor(v0, va0);
+ vdata1 = vec_xor(v1, va1);
+ vdata2 = vec_xor(v2, va2);
+ vdata3 = vec_xor(v3, va3);
+ vdata4 = vec_xor(v4, va4);
+ vdata5 = vec_xor(v5, va5);
+ vdata6 = vec_xor(v6, va6);
+ vdata7 = vec_xor(v7, va7);
+
+ /* Check if we have more blocks to process */
+ next_block = 0;
+ if (length != 0) {
+ next_block = 1;
+
+ /* zero v0-v7 */
+ v0 = vec_xor(v0, v0);
+ v1 = vec_xor(v1, v1);
+ v2 = vec_xor(v2, v2);
+ v3 = vec_xor(v3, v3);
+ v4 = vec_xor(v4, v4);
+ v5 = vec_xor(v5, v5);
+ v6 = vec_xor(v6, v6);
+ v7 = vec_xor(v7, v7);
+ }
+ length = length + 128;
+
+ } while (next_block);
+
+ /* Calculate how many bytes we have left. */
+ length = (len & 127);
+
+ /* Calculate where in (short) constant table we need to start. */
+ offset = 128 - length;
+
+ v0 = vec_ld(offset, vcrc_short_const);
+ v1 = vec_ld(offset + 16, vcrc_short_const);
+ v2 = vec_ld(offset + 32, vcrc_short_const);
+ v3 = vec_ld(offset + 48, vcrc_short_const);
+ v4 = vec_ld(offset + 64, vcrc_short_const);
+ v5 = vec_ld(offset + 80, vcrc_short_const);
+ v6 = vec_ld(offset + 96, vcrc_short_const);
+ v7 = vec_ld(offset + 112, vcrc_short_const);
+
+ offset += 128;
+
+ v0 = (__vector unsigned long long)__builtin_crypto_vpmsumw(
+ (__vector unsigned int)vdata0, (__vector unsigned int)v0);
+ v1 = (__vector unsigned long long)__builtin_crypto_vpmsumw(
+ (__vector unsigned int)vdata1, (__vector unsigned int)v1);
+ v2 = (__vector unsigned long long)__builtin_crypto_vpmsumw(
+ (__vector unsigned int)vdata2, (__vector unsigned int)v2);
+ v3 = (__vector unsigned long long)__builtin_crypto_vpmsumw(
+ (__vector unsigned int)vdata3, (__vector unsigned int)v3);
+ v4 = (__vector unsigned long long)__builtin_crypto_vpmsumw(
+ (__vector unsigned int)vdata4, (__vector unsigned int)v4);
+ v5 = (__vector unsigned long long)__builtin_crypto_vpmsumw(
+ (__vector unsigned int)vdata5, (__vector unsigned int)v5);
+ v6 = (__vector unsigned long long)__builtin_crypto_vpmsumw(
+ (__vector unsigned int)vdata6, (__vector unsigned int)v6);
+ v7 = (__vector unsigned long long)__builtin_crypto_vpmsumw(
+ (__vector unsigned int)vdata7, (__vector unsigned int)v7);
+
+ /* Now reduce the tail (0-112 bytes). */
+ for (i = 0; i < length; i += 16) {
+ vdata0 = vec_ld(i, (__vector unsigned long long *)p);
+ VEC_PERM(vdata0, vdata0, vdata0, vperm_const);
+ va0 = vec_ld(offset + i, vcrc_short_const);
+ va0 = (__vector unsigned long long)__builtin_crypto_vpmsumw(
+ (__vector unsigned int)vdata0, (__vector unsigned int)va0);
+ v0 = vec_xor(v0, va0);
+ }
+
+ /* xor all parallel chunks together. */
+ v0 = vec_xor(v0, v1);
+ v2 = vec_xor(v2, v3);
+ v4 = vec_xor(v4, v5);
+ v6 = vec_xor(v6, v7);
+
+ v0 = vec_xor(v0, v2);
+ v4 = vec_xor(v4, v6);
+
+ v0 = vec_xor(v0, v4);
+ }
+
+ /* Barrett Reduction */
+ vconst1 = vec_ld(0, v_Barrett_const);
+ vconst2 = vec_ld(16, v_Barrett_const);
+
+ v1 = (__vector unsigned long long)vec_sld((__vector unsigned char)v0,
+ (__vector unsigned char)v0, 8);
+ v0 = vec_xor(v1, v0);
+
+#ifdef REFLECT
+ /* shift left one bit */
+ __vector unsigned char vsht_splat = vec_splat_u8(1);
+ v0 = (__vector unsigned long long)vec_sll((__vector unsigned char)v0,
+ vsht_splat);
+#endif
+
+ v0 = vec_and(v0, vmask_64bit);
+
+#ifndef REFLECT
+
+ /*
+ * Now for the actual algorithm. The idea is to calculate q,
+ * the multiple of our polynomial that we need to subtract. By
+ * doing the computation 2x bits higher (ie 64 bits) and shifting the
+ * result back down 2x bits, we round down to the nearest multiple.
+ */
+
+ /* ma */
+ v1 = __builtin_crypto_vpmsumd((__vector unsigned long long)v0,
+ (__vector unsigned long long)vconst1);
+ /* q = floor(ma/(2^64)) */
+ v1 = (__vector unsigned long long)vec_sld((__vector unsigned char)vzero,
+ (__vector unsigned char)v1, 8);
+ /* qn */
+ v1 = __builtin_crypto_vpmsumd((__vector unsigned long long)v1,
+ (__vector unsigned long long)vconst2);
+ /* a - qn, subtraction is xor in GF(2) */
+ v0 = vec_xor(v0, v1);
+ /*
+ * Get the result into r3. We need to shift it left 8 bytes:
+ * V0 [ 0 1 2 X ]
+ * V0 [ 0 X 2 3 ]
+ */
+ result = __builtin_unpack_vector_1(v0);
+#else
+
+ /*
+ * The reflected version of Barrett reduction. Instead of bit
+ * reflecting our data (which is expensive to do), we bit reflect our
+ * constants and our algorithm, which means the intermediate data in
+ * our vector registers goes from 0-63 instead of 63-0. We can reflect
+ * the algorithm because we don't carry in mod 2 arithmetic.
+ */
+
+ /* bottom 32 bits of a */
+ v1 = vec_and(v0, vmask_32bit);
+
+ /* ma */
+ v1 = __builtin_crypto_vpmsumd((__vector unsigned long long)v1,
+ (__vector unsigned long long)vconst1);
+
+ /* bottom 32bits of ma */
+ v1 = vec_and(v1, vmask_32bit);
+ /* qn */
+ v1 = __builtin_crypto_vpmsumd((__vector unsigned long long)v1,
+ (__vector unsigned long long)vconst2);
+ /* a - qn, subtraction is xor in GF(2) */
+ v0 = vec_xor(v0, v1);
+
+ /*
+ * Since we are bit reflected, the result (ie the low 32 bits) is in
+ * the high 32 bits. We just need to shift it left 4 bytes
+ * V0 [ 0 1 X 3 ]
+ * V0 [ 0 X 2 3 ]
+ */
+
+ /* shift result into top 64 bits of */
+ v0 = (__vector unsigned long long)vec_sld((__vector unsigned char)v0,
+ (__vector unsigned char)vzero, 4);
+
+ result = __builtin_unpack_vector_0(v0);
+#endif
+
+ return result;
+}
diff --git a/util/crc32c_ppc.h b/util/crc32c_ppc.h
deleted file mode 100644
index c359061c61..0000000000
--- a/util/crc32c_ppc.h
+++ /dev/null
@@ -1,19 +0,0 @@
-// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
-// Copyright (c) 2017 International Business Machines Corp.
-// All rights reserved.
-// This source code is licensed under both the GPLv2 (found in the
-// COPYING file in the root directory) and Apache 2.0 License
-// (found in the LICENSE.Apache file in the root directory).
-
-#pragma once
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-extern uint32_t crc32c_ppc(uint32_t crc, unsigned char const *buffer,
- unsigned len);
-
-#ifdef __cplusplus
-}
-#endif
diff --git a/util/crc32c_ppc_asm.S b/util/crc32c_ppc_asm.S
deleted file mode 100644
index a317bf96b8..0000000000
--- a/util/crc32c_ppc_asm.S
+++ /dev/null
@@ -1,752 +0,0 @@
-// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
-// Copyright (c) 2015 Anton Blanchard <anton@au.ibm.com>, IBM
-// Copyright (c) 2017 International Business Machines Corp.
-// All rights reserved.
-// This source code is licensed under both the GPLv2 (found in the
-// COPYING file in the root directory) and Apache 2.0 License
-// (found in the LICENSE.Apache file in the root directory).
-
-#include <ppc-asm.h>
-#include "ppc-opcode.h"
-
-#undef toc
-
-#ifndef r1
-#define r1 1
-#endif
-
-#ifndef r2
-#define r2 2
-#endif
-
- .section .rodata
-.balign 16
-
-.byteswap_constant:
- /* byte reverse permute constant */
- .octa 0x0F0E0D0C0B0A09080706050403020100
-
-#define __ASSEMBLY__
-#include "crc32c_ppc_constants.h"
-
- .text
-
-#if defined(__BIG_ENDIAN__) && defined(REFLECT)
-#define BYTESWAP_DATA
-#elif defined(__LITTLE_ENDIAN__) && !defined(REFLECT)
-#define BYTESWAP_DATA
-#else
-#undef BYTESWAP_DATA
-#endif
-
-#define off16 r25
-#define off32 r26
-#define off48 r27
-#define off64 r28
-#define off80 r29
-#define off96 r30
-#define off112 r31
-
-#define const1 v24
-#define const2 v25
-
-#define byteswap v26
-#define mask_32bit v27
-#define mask_64bit v28
-#define zeroes v29
-
-#ifdef BYTESWAP_DATA
-#define VPERM(A, B, C, D) vperm A, B, C, D
-#else
-#define VPERM(A, B, C, D)
-#endif
-
-/* unsigned int __crc32_vpmsum(unsigned int crc, void *p, unsigned long len) */
-FUNC_START(__crc32_vpmsum)
- std r31,-8(r1)
- std r30,-16(r1)
- std r29,-24(r1)
- std r28,-32(r1)
- std r27,-40(r1)
- std r26,-48(r1)
- std r25,-56(r1)
-
- li off16,16
- li off32,32
- li off48,48
- li off64,64
- li off80,80
- li off96,96
- li off112,112
- li r0,0
-
- /* Enough room for saving 10 non volatile VMX registers */
- subi r6,r1,56+10*16
- subi r7,r1,56+2*16
-
- stvx v20,0,r6
- stvx v21,off16,r6
- stvx v22,off32,r6
- stvx v23,off48,r6
- stvx v24,off64,r6
- stvx v25,off80,r6
- stvx v26,off96,r6
- stvx v27,off112,r6
- stvx v28,0,r7
- stvx v29,off16,r7
-
- mr r10,r3
-
- vxor zeroes,zeroes,zeroes
- vspltisw v0,-1
-
- vsldoi mask_32bit,zeroes,v0,4
- vsldoi mask_64bit,zeroes,v0,8
-
- /* Get the initial value into v8 */
- vxor v8,v8,v8
- MTVRD(v8, r3)
-#ifdef REFLECT
- vsldoi v8,zeroes,v8,8 /* shift into bottom 32 bits */
-#else
- vsldoi v8,v8,zeroes,4 /* shift into top 32 bits */
-#endif
-
-#ifdef BYTESWAP_DATA
- addis r3,r2,.byteswap_constant@toc@ha
- addi r3,r3,.byteswap_constant@toc@l
-
- lvx byteswap,0,r3
- addi r3,r3,16
-#endif
-
- cmpdi r5,256
- blt .Lshort
-
- rldicr r6,r5,0,56
-
- /* Checksum in blocks of MAX_SIZE */
-1: lis r7,MAX_SIZE@h
- ori r7,r7,MAX_SIZE@l
- mr r9,r7
- cmpd r6,r7
- bgt 2f
- mr r7,r6
-2: subf r6,r7,r6
-
- /* our main loop does 128 bytes at a time */
- srdi r7,r7,7
-
- /*
- * Work out the offset into the constants table to start at. Each
- * constant is 16 bytes, and it is used against 128 bytes of input
- * data - 128 / 16 = 8
- */
- sldi r8,r7,4
- srdi r9,r9,3
- subf r8,r8,r9
-
- /* We reduce our final 128 bytes in a separate step */
- addi r7,r7,-1
- mtctr r7
-
- addis r3,r2,.constants@toc@ha
- addi r3,r3,.constants@toc@l
-
- /* Find the start of our constants */
- add r3,r3,r8
-
- /* zero v0-v7 which will contain our checksums */
- vxor v0,v0,v0
- vxor v1,v1,v1
- vxor v2,v2,v2
- vxor v3,v3,v3
- vxor v4,v4,v4
- vxor v5,v5,v5
- vxor v6,v6,v6
- vxor v7,v7,v7
-
- lvx const1,0,r3
-
- /*
- * If we are looping back to consume more data we use the values
- * already in v16-v23.
- */
- cmpdi r0,1
- beq 2f
-
- /* First warm up pass */
- lvx v16,0,r4
- lvx v17,off16,r4
- VPERM(v16,v16,v16,byteswap)
- VPERM(v17,v17,v17,byteswap)
- lvx v18,off32,r4
- lvx v19,off48,r4
- VPERM(v18,v18,v18,byteswap)
- VPERM(v19,v19,v19,byteswap)
- lvx v20,off64,r4
- lvx v21,off80,r4
- VPERM(v20,v20,v20,byteswap)
- VPERM(v21,v21,v21,byteswap)
- lvx v22,off96,r4
- lvx v23,off112,r4
- VPERM(v22,v22,v22,byteswap)
- VPERM(v23,v23,v23,byteswap)
- addi r4,r4,8*16
-
- /* xor in initial value */
- vxor v16,v16,v8
-
-2: bdz .Lfirst_warm_up_done
-
- addi r3,r3,16
- lvx const2,0,r3
-
- /* Second warm up pass */
- VPMSUMD(v8,v16,const1)
- lvx v16,0,r4
- VPERM(v16,v16,v16,byteswap)
- ori r2,r2,0
-
- VPMSUMD(v9,v17,const1)
- lvx v17,off16,r4
- VPERM(v17,v17,v17,byteswap)
- ori r2,r2,0
-
- VPMSUMD(v10,v18,const1)
- lvx v18,off32,r4
- VPERM(v18,v18,v18,byteswap)
- ori r2,r2,0
-
- VPMSUMD(v11,v19,const1)
- lvx v19,off48,r4
- VPERM(v19,v19,v19,byteswap)
- ori r2,r2,0
-
- VPMSUMD(v12,v20,const1)
- lvx v20,off64,r4
- VPERM(v20,v20,v20,byteswap)
- ori r2,r2,0
-
- VPMSUMD(v13,v21,const1)
- lvx v21,off80,r4
- VPERM(v21,v21,v21,byteswap)
- ori r2,r2,0
-
- VPMSUMD(v14,v22,const1)
- lvx v22,off96,r4
- VPERM(v22,v22,v22,byteswap)
- ori r2,r2,0
-
- VPMSUMD(v15,v23,const1)
- lvx v23,off112,r4
- VPERM(v23,v23,v23,byteswap)
-
- addi r4,r4,8*16
-
- bdz .Lfirst_cool_down
-
- /*
- * main loop. We modulo schedule it such that it takes three iterations
- * to complete - first iteration load, second iteration vpmsum, third
- * iteration xor.
- */
- .balign 16
-4: lvx const1,0,r3
- addi r3,r3,16
- ori r2,r2,0
-
- vxor v0,v0,v8
- VPMSUMD(v8,v16,const2)
- lvx v16,0,r4
- VPERM(v16,v16,v16,byteswap)
- ori r2,r2,0
-
- vxor v1,v1,v9
- VPMSUMD(v9,v17,const2)
- lvx v17,off16,r4
- VPERM(v17,v17,v17,byteswap)
- ori r2,r2,0
-
- vxor v2,v2,v10
- VPMSUMD(v10,v18,const2)
- lvx v18,off32,r4
- VPERM(v18,v18,v18,byteswap)
- ori r2,r2,0
-
- vxor v3,v3,v11
- VPMSUMD(v11,v19,const2)
- lvx v19,off48,r4
- VPERM(v19,v19,v19,byteswap)
- lvx const2,0,r3
- ori r2,r2,0
-
- vxor v4,v4,v12
- VPMSUMD(v12,v20,const1)
- lvx v20,off64,r4
- VPERM(v20,v20,v20,byteswap)
- ori r2,r2,0
-
- vxor v5,v5,v13
- VPMSUMD(v13,v21,const1)
- lvx v21,off80,r4
- VPERM(v21,v21,v21,byteswap)
- ori r2,r2,0
-
- vxor v6,v6,v14
- VPMSUMD(v14,v22,const1)
- lvx v22,off96,r4
- VPERM(v22,v22,v22,byteswap)
- ori r2,r2,0
-
- vxor v7,v7,v15
- VPMSUMD(v15,v23,const1)
- lvx v23,off112,r4
- VPERM(v23,v23,v23,byteswap)
-
- addi r4,r4,8*16
-
- bdnz 4b
-
-.Lfirst_cool_down:
- /* First cool down pass */
- lvx const1,0,r3
- addi r3,r3,16
-
- vxor v0,v0,v8
- VPMSUMD(v8,v16,const1)
- ori r2,r2,0
-
- vxor v1,v1,v9
- VPMSUMD(v9,v17,const1)
- ori r2,r2,0
-
- vxor v2,v2,v10
- VPMSUMD(v10,v18,const1)
- ori r2,r2,0
-
- vxor v3,v3,v11
- VPMSUMD(v11,v19,const1)
- ori r2,r2,0
-
- vxor v4,v4,v12
- VPMSUMD(v12,v20,const1)
- ori r2,r2,0
-
- vxor v5,v5,v13
- VPMSUMD(v13,v21,const1)
- ori r2,r2,0
-
- vxor v6,v6,v14
- VPMSUMD(v14,v22,const1)
- ori r2,r2,0
-
- vxor v7,v7,v15
- VPMSUMD(v15,v23,const1)
- ori r2,r2,0
-
-.Lsecond_cool_down:
- /* Second cool down pass */
- vxor v0,v0,v8
- vxor v1,v1,v9
- vxor v2,v2,v10
- vxor v3,v3,v11
- vxor v4,v4,v12
- vxor v5,v5,v13
- vxor v6,v6,v14
- vxor v7,v7,v15
-
-#ifdef REFLECT
- /*
- * vpmsumd produces a 96 bit result in the least significant bits
- * of the register. Since we are bit reflected we have to shift it
- * left 32 bits so it occupies the least significant bits in the
- * bit reflected domain.
- */
- vsldoi v0,v0,zeroes,4
- vsldoi v1,v1,zeroes,4
- vsldoi v2,v2,zeroes,4
- vsldoi v3,v3,zeroes,4
- vsldoi v4,v4,zeroes,4
- vsldoi v5,v5,zeroes,4
- vsldoi v6,v6,zeroes,4
- vsldoi v7,v7,zeroes,4
-#endif
-
- /* xor with last 1024 bits */
- lvx v8,0,r4
- lvx v9,off16,r4
- VPERM(v8,v8,v8,byteswap)
- VPERM(v9,v9,v9,byteswap)
- lvx v10,off32,r4
- lvx v11,off48,r4
- VPERM(v10,v10,v10,byteswap)
- VPERM(v11,v11,v11,byteswap)
- lvx v12,off64,r4
- lvx v13,off80,r4
- VPERM(v12,v12,v12,byteswap)
- VPERM(v13,v13,v13,byteswap)
- lvx v14,off96,r4
- lvx v15,off112,r4
- VPERM(v14,v14,v14,byteswap)
- VPERM(v15,v15,v15,byteswap)
-
- addi r4,r4,8*16
-
- vxor v16,v0,v8
- vxor v17,v1,v9
- vxor v18,v2,v10
- vxor v19,v3,v11
- vxor v20,v4,v12
- vxor v21,v5,v13
- vxor v22,v6,v14
- vxor v23,v7,v15
-
- li r0,1
- cmpdi r6,0
- addi r6,r6,128
- bne 1b
-
- /* Work out how many bytes we have left */
- andi. r5,r5,127
-
- /* Calculate where in the constant table we need to start */
- subfic r6,r5,128
- add r3,r3,r6
-
- /* How many 16 byte chunks are in the tail */
- srdi r7,r5,4
- mtctr r7
-
- /*
- * Reduce the previously calculated 1024 bits to 64 bits, shifting
- * 32 bits to include the trailing 32 bits of zeros
- */
- lvx v0,0,r3
- lvx v1,off16,r3
- lvx v2,off32,r3
- lvx v3,off48,r3
- lvx v4,off64,r3
- lvx v5,off80,r3
- lvx v6,off96,r3
- lvx v7,off112,r3
- addi r3,r3,8*16
-
- VPMSUMW(v0,v16,v0)
- VPMSUMW(v1,v17,v1)
- VPMSUMW(v2,v18,v2)
- VPMSUMW(v3,v19,v3)
- VPMSUMW(v4,v20,v4)
- VPMSUMW(v5,v21,v5)
- VPMSUMW(v6,v22,v6)
- VPMSUMW(v7,v23,v7)
-
- /* Now reduce the tail (0 - 112 bytes) */
- cmpdi r7,0
- beq 1f
-
- lvx v16,0,r4
- lvx v17,0,r3
- VPERM(v16,v16,v16,byteswap)
- VPMSUMW(v16,v16,v17)
- vxor v0,v0,v16
- bdz 1f
-
- lvx v16,off16,r4
- lvx v17,off16,r3
- VPERM(v16,v16,v16,byteswap)
- VPMSUMW(v16,v16,v17)
- vxor v0,v0,v16
- bdz 1f
-
- lvx v16,off32,r4
- lvx v17,off32,r3
- VPERM(v16,v16,v16,byteswap)
- VPMSUMW(v16,v16,v17)
- vxor v0,v0,v16
- bdz 1f
-
- lvx v16,off48,r4
- lvx v17,off48,r3
- VPERM(v16,v16,v16,byteswap)
- VPMSUMW(v16,v16,v17)
- vxor v0,v0,v16
- bdz 1f
-
- lvx v16,off64,r4
- lvx v17,off64,r3
- VPERM(v16,v16,v16,byteswap)
- VPMSUMW(v16,v16,v17)
- vxor v0,v0,v16
- bdz 1f
-
- lvx v16,off80,r4
- lvx v17,off80,r3
- VPERM(v16,v16,v16,byteswap)
- VPMSUMW(v16,v16,v17)
- vxor v0,v0,v16
- bdz 1f
-
- lvx v16,off96,r4
- lvx v17,off96,r3
- VPERM(v16,v16,v16,byteswap)
- VPMSUMW(v16,v16,v17)
- vxor v0,v0,v16
-
- /* Now xor all the parallel chunks together */
-1: vxor v0,v0,v1
- vxor v2,v2,v3
- vxor v4,v4,v5
- vxor v6,v6,v7
-
- vxor v0,v0,v2
- vxor v4,v4,v6
-
- vxor v0,v0,v4
-
-.Lbarrett_reduction:
- /* Barrett constants */
- addis r3,r2,.barrett_constants@toc@ha
- addi r3,r3,.barrett_constants@toc@l
-
- lvx const1,0,r3
- lvx const2,off16,r3
-
- vsldoi v1,v0,v0,8
- vxor v0,v0,v1 /* xor two 64 bit results together */
-
-#ifdef REFLECT
- /* shift left one bit */
- vspltisb v1,1
- vsl v0,v0,v1
-#endif
-
- vand v0,v0,mask_64bit
-
-#ifndef REFLECT
- /*
- * Now for the Barrett reduction algorithm. The idea is to calculate q,
- * the multiple of our polynomial that we need to subtract. By
- * doing the computation 2x bits higher (ie 64 bits) and shifting the
- * result back down 2x bits, we round down to the nearest multiple.
- */
- VPMSUMD(v1,v0,const1) /* ma */
- vsldoi v1,zeroes,v1,8 /* q = floor(ma/(2^64)) */
- VPMSUMD(v1,v1,const2) /* qn */
- vxor v0,v0,v1 /* a - qn, subtraction is xor in GF(2) */
-
- /*
- * Get the result into r3. We need to shift it left 8 bytes:
- * V0 [ 0 1 2 X ]
- * V0 [ 0 X 2 3 ]
- */
- vsldoi v0,v0,zeroes,8 /* shift result into top 64 bits */
-#else
- /*
- * The reflected version of Barrett reduction. Instead of bit
- * reflecting our data (which is expensive to do), we bit reflect our
- * constants and our algorithm, which means the intermediate data in
- * our vector registers goes from 0-63 instead of 63-0. We can reflect
- * the algorithm because we don't carry in mod 2 arithmetic.
- */
- vand v1,v0,mask_32bit /* bottom 32 bits of a */
- VPMSUMD(v1,v1,const1) /* ma */
- vand v1,v1,mask_32bit /* bottom 32bits of ma */
- VPMSUMD(v1,v1,const2) /* qn */
- vxor v0,v0,v1 /* a - qn, subtraction is xor in GF(2) */
-
- /*
- * Since we are bit reflected, the result (ie the low 32 bits) is in
- * the high 32 bits. We just need to shift it left 4 bytes
- * V0 [ 0 1 X 3 ]
- * V0 [ 0 X 2 3 ]
- */
- vsldoi v0,v0,zeroes,4 /* shift result into top 64 bits of */
-#endif
-
- /* Get it into r3 */
- MFVRD(r3, v0)
-
-.Lout:
- subi r6,r1,56+10*16
- subi r7,r1,56+2*16
-
- lvx v20,0,r6
- lvx v21,off16,r6
- lvx v22,off32,r6
- lvx v23,off48,r6
- lvx v24,off64,r6
- lvx v25,off80,r6
- lvx v26,off96,r6
- lvx v27,off112,r6
- lvx v28,0,r7
- lvx v29,off16,r7
-
- ld r31,-8(r1)
- ld r30,-16(r1)
- ld r29,-24(r1)
- ld r28,-32(r1)
- ld r27,-40(r1)
- ld r26,-48(r1)
- ld r25,-56(r1)
-
- blr
-
-.Lfirst_warm_up_done:
- lvx const1,0,r3
- addi r3,r3,16
-
- VPMSUMD(v8,v16,const1)
- VPMSUMD(v9,v17,const1)
- VPMSUMD(v10,v18,const1)
- VPMSUMD(v11,v19,const1)
- VPMSUMD(v12,v20,const1)
- VPMSUMD(v13,v21,const1)
- VPMSUMD(v14,v22,const1)
- VPMSUMD(v15,v23,const1)
-
- b .Lsecond_cool_down
-
-.Lshort:
- cmpdi r5,0
- beq .Lzero
-
- addis r3,r2,.short_constants@toc@ha
- addi r3,r3,.short_constants@toc@l
-
- /* Calculate where in the constant table we need to start */
- subfic r6,r5,256
- add r3,r3,r6
-
- /* How many 16 byte chunks? */
- srdi r7,r5,4
- mtctr r7
-
- vxor v19,v19,v19
- vxor v20,v20,v20
-
- lvx v0,0,r4
- lvx v16,0,r3
- VPERM(v0,v0,v16,byteswap)
- vxor v0,v0,v8 /* xor in initial value */
- VPMSUMW(v0,v0,v16)
- bdz .Lv0
-
- lvx v1,off16,r4
- lvx v17,off16,r3
- VPERM(v1,v1,v17,byteswap)
- VPMSUMW(v1,v1,v17)
- bdz .Lv1
-
- lvx v2,off32,r4
- lvx v16,off32,r3
- VPERM(v2,v2,v16,byteswap)
- VPMSUMW(v2,v2,v16)
- bdz .Lv2
-
- lvx v3,off48,r4
- lvx v17,off48,r3
- VPERM(v3,v3,v17,byteswap)
- VPMSUMW(v3,v3,v17)
- bdz .Lv3
-
- lvx v4,off64,r4
- lvx v16,off64,r3
- VPERM(v4,v4,v16,byteswap)
- VPMSUMW(v4,v4,v16)
- bdz .Lv4
-
- lvx v5,off80,r4
- lvx v17,off80,r3
- VPERM(v5,v5,v17,byteswap)
- VPMSUMW(v5,v5,v17)
- bdz .Lv5
-
- lvx v6,off96,r4
- lvx v16,off96,r3
- VPERM(v6,v6,v16,byteswap)
- VPMSUMW(v6,v6,v16)
- bdz .Lv6
-
- lvx v7,off112,r4
- lvx v17,off112,r3
- VPERM(v7,v7,v17,byteswap)
- VPMSUMW(v7,v7,v17)
- bdz .Lv7
-
- addi r3,r3,128
- addi r4,r4,128
-
- lvx v8,0,r4
- lvx v16,0,r3
- VPERM(v8,v8,v16,byteswap)
- VPMSUMW(v8,v8,v16)
- bdz .Lv8
-
- lvx v9,off16,r4
- lvx v17,off16,r3
- VPERM(v9,v9,v17,byteswap)
- VPMSUMW(v9,v9,v17)
- bdz .Lv9
-
- lvx v10,off32,r4
- lvx v16,off32,r3
- VPERM(v10,v10,v16,byteswap)
- VPMSUMW(v10,v10,v16)
- bdz .Lv10
-
- lvx v11,off48,r4
- lvx v17,off48,r3
- VPERM(v11,v11,v17,byteswap)
- VPMSUMW(v11,v11,v17)
- bdz .Lv11
-
- lvx v12,off64,r4
- lvx v16,off64,r3
- VPERM(v12,v12,v16,byteswap)
- VPMSUMW(v12,v12,v16)
- bdz .Lv12
-
- lvx v13,off80,r4
- lvx v17,off80,r3
- VPERM(v13,v13,v17,byteswap)
- VPMSUMW(v13,v13,v17)
- bdz .Lv13
-
- lvx v14,off96,r4
- lvx v16,off96,r3
- VPERM(v14,v14,v16,byteswap)
- VPMSUMW(v14,v14,v16)
- bdz .Lv14
-
- lvx v15,off112,r4
- lvx v17,off112,r3
- VPERM(v15,v15,v17,byteswap)
- VPMSUMW(v15,v15,v17)
-
-.Lv15: vxor v19,v19,v15
-.Lv14: vxor v20,v20,v14
-.Lv13: vxor v19,v19,v13
-.Lv12: vxor v20,v20,v12
-.Lv11: vxor v19,v19,v11
-.Lv10: vxor v20,v20,v10
-.Lv9: vxor v19,v19,v9
-.Lv8: vxor v20,v20,v8
-.Lv7: vxor v19,v19,v7
-.Lv6: vxor v20,v20,v6
-.Lv5: vxor v19,v19,v5
-.Lv4: vxor v20,v20,v4
-.Lv3: vxor v19,v19,v3
-.Lv2: vxor v20,v20,v2
-.Lv1: vxor v19,v19,v1
-.Lv0: vxor v20,v20,v0
-
- vxor v0,v19,v20
-
- b .Lbarrett_reduction
-
-.Lzero:
- mr r3,r10
- b .Lout
-
-FUNC_END(__crc32_vpmsum)
diff --git a/util/crc32c_ppc_clang_workaround.h b/util/crc32c_ppc_clang_workaround.h
new file mode 100644
index 0000000000..110cb31782
--- /dev/null
+++ b/util/crc32c_ppc_clang_workaround.h
@@ -0,0 +1,85 @@
+// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
+// Copyright (C) 2015, 2017 International Business Machines Corp.
+// All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+#ifndef CLANG_WORKAROUND_H
+#define CLANG_WORKAROUND_H
+
+/*
+ * These stubs fix clang incompatibilities with GCC builtins.
+ */
+
+#ifndef __builtin_crypto_vpmsumw
+#define __builtin_crypto_vpmsumw __builtin_crypto_vpmsumb
+#endif
+#ifndef __builtin_crypto_vpmsumd
+#define __builtin_crypto_vpmsumd __builtin_crypto_vpmsumb
+#endif
+
+static inline __vector unsigned long long __attribute__((overloadable))
+vec_ld(int __a, const __vector unsigned long long* __b) {
+ return (__vector unsigned long long)__builtin_altivec_lvx(__a, __b);
+}
+
+/*
+ * GCC __builtin_pack_vector_int128 returns a vector __int128_t but Clang
+ * does not recognize this type. On GCC this builtin is translated to a
+ * xxpermdi instruction that only moves the registers __a, __b instead generates
+ * a load.
+ *
+ * Clang has vec_xxpermdi intrinsics. It was implemented in 4.0.0.
+ */
+static inline __vector unsigned long long __builtin_pack_vector(
+ unsigned long __a, unsigned long __b) {
+#if defined(__BIG_ENDIAN__)
+ __vector unsigned long long __v = {__a, __b};
+#else
+ __vector unsigned long long __v = {__b, __a};
+#endif
+ return __v;
+}
+
+/*
+ * Clang 7 changed the behavior of vec_xxpermdi in order to provide the same
+ * behavior of GCC. That means code adapted to Clang >= 7 does not work on
+ * Clang <= 6. So, fallback to __builtin_unpack_vector() on Clang <= 6.
+ */
+#if !defined vec_xxpermdi || __clang_major__ <= 6
+
+static inline unsigned long __builtin_unpack_vector(
+ __vector unsigned long long __v, int __o) {
+ return __v[__o];
+}
+
+#if defined(__BIG_ENDIAN__)
+#define __builtin_unpack_vector_0(a) __builtin_unpack_vector((a), 0)
+#define __builtin_unpack_vector_1(a) __builtin_unpack_vector((a), 1)
+#else
+#define __builtin_unpack_vector_0(a) __builtin_unpack_vector((a), 1)
+#define __builtin_unpack_vector_1(a) __builtin_unpack_vector((a), 0)
+#endif
+
+#else
+
+static inline unsigned long __builtin_unpack_vector_0(
+ __vector unsigned long long __v) {
+#if defined(__BIG_ENDIAN__)
+ return vec_xxpermdi(__v, __v, 0x0)[0];
+#else
+ return vec_xxpermdi(__v, __v, 0x3)[0];
+#endif
+}
+
+static inline unsigned long __builtin_unpack_vector_1(
+ __vector unsigned long long __v) {
+#if defined(__BIG_ENDIAN__)
+ return vec_xxpermdi(__v, __v, 0x3)[0];
+#else
+ return vec_xxpermdi(__v, __v, 0x0)[0];
+#endif
+}
+#endif /* vec_xxpermdi */
+
+#endif
diff --git a/util/crc32c_ppc_constants.h b/util/crc32c_ppc_constants.h
index f6494cd01c..14df7ccda5 100644
--- a/util/crc32c_ppc_constants.h
+++ b/util/crc32c_ppc_constants.h
@@ -5,13 +5,21 @@
// COPYING file in the root directory) and Apache 2.0 License
// (found in the LICENSE.Apache file in the root directory).
-#pragma once
+/*
+*
+* THIS FILE IS GENERATED WITH
+./crc32_constants -r -x 0x1edc6f41
+
+* This is from https://github.com/antonblanchard/crc32-vpmsum/
+* DO NOT MODIFY IT MANUALLY!
+*
+*/
#define CRC 0x1edc6f41
-#define REFLECT
#define CRC_XOR
+#define REFLECT
+#define MAX_SIZE 32768
-#ifndef __ASSEMBLY__
#ifdef CRC_TABLE
static const unsigned int crc_table[] = {
0x00000000, 0xf26b8303, 0xe13b70f7, 0x1350f3f4, 0xc79a971f, 0x35f1141c,
@@ -59,842 +67,1125 @@ static const unsigned int crc_table[] = {
0xbe2da0a5, 0x4c4623a6, 0x5f16d052, 0xad7d5351,
};
-#endif
+#endif /* CRC_TABLE */
+#ifdef POWER8_INTRINSICS
-#else
-#define MAX_SIZE 32768
-.constants :
+/* Constants */
- /* Reduce 262144 kbits to 1024 bits */
+/* Reduce 262144 kbits to 1024 bits */
+static const __vector unsigned long long vcrc_const[255]
+ __attribute__((aligned(16))) = {
+#ifdef __LITTLE_ENDIAN__
/* x^261120 mod p(x)` << 1, x^261184 mod p(x)` << 1 */
- .octa 0x00000000b6ca9e20000000009c37c408
-
+ {0x000000009c37c408, 0x00000000b6ca9e20},
/* x^260096 mod p(x)` << 1, x^260160 mod p(x)` << 1 */
- .octa 0x00000000350249a800000001b51df26c
-
+ {0x00000001b51df26c, 0x00000000350249a8},
/* x^259072 mod p(x)` << 1, x^259136 mod p(x)` << 1 */
- .octa 0x00000001862dac54000000000724b9d0
-
+ {0x000000000724b9d0, 0x00000001862dac54},
/* x^258048 mod p(x)` << 1, x^258112 mod p(x)` << 1 */
- .octa 0x00000001d87fb48c00000001c00532fe
-
+ {0x00000001c00532fe, 0x00000001d87fb48c},
/* x^257024 mod p(x)` << 1, x^257088 mod p(x)` << 1 */
- .octa 0x00000001f39b699e00000000f05a9362
-
+ {0x00000000f05a9362, 0x00000001f39b699e},
/* x^256000 mod p(x)` << 1, x^256064 mod p(x)` << 1 */
- .octa 0x0000000101da11b400000001e1007970
-
+ {0x00000001e1007970, 0x0000000101da11b4},
/* x^254976 mod p(x)` << 1, x^255040 mod p(x)` << 1 */
- .octa 0x00000001cab571e000000000a57366ee
-
+ {0x00000000a57366ee, 0x00000001cab571e0},
/* x^253952 mod p(x)` << 1, x^254016 mod p(x)` << 1 */
- .octa 0x00000000c7020cfe0000000192011284
-
+ {0x0000000192011284, 0x00000000c7020cfe},
/* x^252928 mod p(x)` << 1, x^252992 mod p(x)` << 1 */
- .octa 0x00000000cdaed1ae0000000162716d9a
-
+ {0x0000000162716d9a, 0x00000000cdaed1ae},
/* x^251904 mod p(x)` << 1, x^251968 mod p(x)` << 1 */
- .octa 0x00000001e804effc00000000cd97ecde
-
+ {0x00000000cd97ecde, 0x00000001e804effc},
/* x^250880 mod p(x)` << 1, x^250944 mod p(x)` << 1 */
- .octa 0x0000000077c3ea3a0000000058812bc0
-
+ {0x0000000058812bc0, 0x0000000077c3ea3a},
/* x^249856 mod p(x)` << 1, x^249920 mod p(x)` << 1 */
- .octa 0x0000000068df31b40000000088b8c12e
-
+ {0x0000000088b8c12e, 0x0000000068df31b4},
/* x^248832 mod p(x)` << 1, x^248896 mod p(x)` << 1 */
- .octa 0x00000000b059b6c200000001230b234c
-
+ {0x00000001230b234c, 0x00000000b059b6c2},
/* x^247808 mod p(x)` << 1, x^247872 mod p(x)` << 1 */
- .octa 0x0000000145fb8ed800000001120b416e
-
+ {0x00000001120b416e, 0x0000000145fb8ed8},
/* x^246784 mod p(x)` << 1, x^246848 mod p(x)` << 1 */
- .octa 0x00000000cbc0916800000001974aecb0
-
+ {0x00000001974aecb0, 0x00000000cbc09168},
/* x^245760 mod p(x)` << 1, x^245824 mod p(x)` << 1 */
- .octa 0x000000005ceeedc2000000008ee3f226
-
+ {0x000000008ee3f226, 0x000000005ceeedc2},
/* x^244736 mod p(x)` << 1, x^244800 mod p(x)` << 1 */
- .octa 0x0000000047d74e8600000001089aba9a
-
+ {0x00000001089aba9a, 0x0000000047d74e86},
/* x^243712 mod p(x)` << 1, x^243776 mod p(x)` << 1 */
- .octa 0x00000001407e9e220000000065113872
-
+ {0x0000000065113872, 0x00000001407e9e22},
/* x^242688 mod p(x)` << 1, x^242752 mod p(x)` << 1 */
- .octa 0x00000001da967bda000000005c07ec10
-
+ {0x000000005c07ec10, 0x00000001da967bda},
/* x^241664 mod p(x)` << 1, x^241728 mod p(x)` << 1 */
- .octa 0x000000006c8983680000000187590924
-
+ {0x0000000187590924, 0x000000006c898368},
/* x^240640 mod p(x)` << 1, x^240704 mod p(x)` << 1 */
- .octa 0x00000000f2d14c9800000000e35da7c6
-
+ {0x00000000e35da7c6, 0x00000000f2d14c98},
/* x^239616 mod p(x)` << 1, x^239680 mod p(x)` << 1 */
- .octa 0x00000001993c6ad4000000000415855a
-
+ {0x000000000415855a, 0x00000001993c6ad4},
/* x^238592 mod p(x)` << 1, x^238656 mod p(x)` << 1 */
- .octa 0x000000014683d1ac0000000073617758
-
+ {0x0000000073617758, 0x000000014683d1ac},
/* x^237568 mod p(x)` << 1, x^237632 mod p(x)` << 1 */
- .octa 0x00000001a7c93e6c0000000176021d28
-
+ {0x0000000176021d28, 0x00000001a7c93e6c},
/* x^236544 mod p(x)` << 1, x^236608 mod p(x)` << 1 */
- .octa 0x000000010211e90a00000001c358fd0a
-
+ {0x00000001c358fd0a, 0x000000010211e90a},
/* x^235520 mod p(x)` << 1, x^235584 mod p(x)` << 1 */
- .octa 0x000000001119403e00000001ff7a2c18
-
+ {0x00000001ff7a2c18, 0x000000001119403e},
/* x^234496 mod p(x)` << 1, x^234560 mod p(x)` << 1 */
- .octa 0x000000001c3261aa00000000f2d9f7e4
-
+ {0x00000000f2d9f7e4, 0x000000001c3261aa},
/* x^233472 mod p(x)` << 1, x^233536 mod p(x)` << 1 */
- .octa 0x000000014e37a634000000016cf1f9c8
-
+ {0x000000016cf1f9c8, 0x000000014e37a634},
/* x^232448 mod p(x)` << 1, x^232512 mod p(x)` << 1 */
- .octa 0x0000000073786c0c000000010af9279a
-
+ {0x000000010af9279a, 0x0000000073786c0c},
/* x^231424 mod p(x)` << 1, x^231488 mod p(x)` << 1 */
- .octa 0x000000011dc037f80000000004f101e8
-
+ {0x0000000004f101e8, 0x000000011dc037f8},
/* x^230400 mod p(x)` << 1, x^230464 mod p(x)` << 1 */
- .octa 0x0000000031433dfc0000000070bcf184
-
+ {0x0000000070bcf184, 0x0000000031433dfc},
/* x^229376 mod p(x)` << 1, x^229440 mod p(x)` << 1 */
- .octa 0x000000009cde8348000000000a8de642
-
+ {0x000000000a8de642, 0x000000009cde8348},
/* x^228352 mod p(x)` << 1, x^228416 mod p(x)` << 1 */
- .octa 0x0000000038d3c2a60000000062ea130c
-
+ {0x0000000062ea130c, 0x0000000038d3c2a6},
/* x^227328 mod p(x)` << 1, x^227392 mod p(x)` << 1 */
- .octa 0x000000011b25f26000000001eb31cbb2
-
+ {0x00000001eb31cbb2, 0x000000011b25f260},
/* x^226304 mod p(x)` << 1, x^226368 mod p(x)` << 1 */
- .octa 0x000000001629e6f00000000170783448
-
+ {0x0000000170783448, 0x000000001629e6f0},
/* x^225280 mod p(x)` << 1, x^225344 mod p(x)` << 1 */
- .octa 0x0000000160838b4c00000001a684b4c6
-
+ {0x00000001a684b4c6, 0x0000000160838b4c},
/* x^224256 mod p(x)` << 1, x^224320 mod p(x)` << 1 */
- .octa 0x000000007a44011c00000000253ca5b4
-
+ {0x00000000253ca5b4, 0x000000007a44011c},
/* x^223232 mod p(x)` << 1, x^223296 mod p(x)` << 1 */
- .octa 0x00000000226f417a0000000057b4b1e2
-
+ {0x0000000057b4b1e2, 0x00000000226f417a},
/* x^222208 mod p(x)` << 1, x^222272 mod p(x)` << 1 */
- .octa 0x0000000045eb2eb400000000b6bd084c
-
+ {0x00000000b6bd084c, 0x0000000045eb2eb4},
/* x^221184 mod p(x)` << 1, x^221248 mod p(x)` << 1 */
- .octa 0x000000014459d70c0000000123c2d592
-
+ {0x0000000123c2d592, 0x000000014459d70c},
/* x^220160 mod p(x)` << 1, x^220224 mod p(x)` << 1 */
- .octa 0x00000001d406ed8200000000159dafce
-
+ {0x00000000159dafce, 0x00000001d406ed82},
/* x^219136 mod p(x)` << 1, x^219200 mod p(x)` << 1 */
- .octa 0x0000000160c8e1a80000000127e1a64e
-
+ {0x0000000127e1a64e, 0x0000000160c8e1a8},
/* x^218112 mod p(x)` << 1, x^218176 mod p(x)` << 1 */
- .octa 0x0000000027ba80980000000056860754
-
+ {0x0000000056860754, 0x0000000027ba8098},
/* x^217088 mod p(x)` << 1, x^217152 mod p(x)` << 1 */
- .octa 0x000000006d92d01800000001e661aae8
-
+ {0x00000001e661aae8, 0x000000006d92d018},
/* x^216064 mod p(x)` << 1, x^216128 mod p(x)` << 1 */
- .octa 0x000000012ed7e3f200000000f82c6166
-
+ {0x00000000f82c6166, 0x000000012ed7e3f2},
/* x^215040 mod p(x)` << 1, x^215104 mod p(x)` << 1 */
- .octa 0x000000002dc8778800000000c4f9c7ae
-
+ {0x00000000c4f9c7ae, 0x000000002dc87788},
/* x^214016 mod p(x)` << 1, x^214080 mod p(x)` << 1 */
- .octa 0x0000000018240bb80000000074203d20
-
+ {0x0000000074203d20, 0x0000000018240bb8},
/* x^212992 mod p(x)` << 1, x^213056 mod p(x)` << 1 */
- .octa 0x000000001ad381580000000198173052
-
+ {0x0000000198173052, 0x000000001ad38158},
/* x^211968 mod p(x)` << 1, x^212032 mod p(x)` << 1 */
- .octa 0x00000001396b78f200000001ce8aba54
-
+ {0x00000001ce8aba54, 0x00000001396b78f2},
/* x^210944 mod p(x)` << 1, x^211008 mod p(x)` << 1 */
- .octa 0x000000011a68133400000001850d5d94
-
+ {0x00000001850d5d94, 0x000000011a681334},
/* x^209920 mod p(x)` << 1, x^209984 mod p(x)` << 1 */
- .octa 0x000000012104732e00000001d609239c
-
+ {0x00000001d609239c, 0x000000012104732e},
/* x^208896 mod p(x)` << 1, x^208960 mod p(x)` << 1 */
- .octa 0x00000000a140d90c000000001595f048
-
+ {0x000000001595f048, 0x00000000a140d90c},
/* x^207872 mod p(x)` << 1, x^207936 mod p(x)` << 1 */
- .octa 0x00000001b7215eda0000000042ccee08
-
+ {0x0000000042ccee08, 0x00000001b7215eda},
/* x^206848 mod p(x)` << 1, x^206912 mod p(x)` << 1 */
- .octa 0x00000001aaf1df3c000000010a389d74
-
+ {0x000000010a389d74, 0x00000001aaf1df3c},
/* x^205824 mod p(x)` << 1, x^205888 mod p(x)` << 1 */
- .octa 0x0000000029d15b8a000000012a840da6
-
+ {0x000000012a840da6, 0x0000000029d15b8a},
/* x^204800 mod p(x)` << 1, x^204864 mod p(x)` << 1 */
- .octa 0x00000000f1a96922000000001d181c0c
-
+ {0x000000001d181c0c, 0x00000000f1a96922},
/* x^203776 mod p(x)` << 1, x^203840 mod p(x)` << 1 */
- .octa 0x00000001ac80d03c0000000068b7d1f6
-
+ {0x0000000068b7d1f6, 0x00000001ac80d03c},
/* x^202752 mod p(x)` << 1, x^202816 mod p(x)` << 1 */
- .octa 0x000000000f11d56a000000005b0f14fc
-
+ {0x000000005b0f14fc, 0x000000000f11d56a},
/* x^201728 mod p(x)` << 1, x^201792 mod p(x)` << 1 */
- .octa 0x00000001f1c022a20000000179e9e730
-
+ {0x0000000179e9e730, 0x00000001f1c022a2},
/* x^200704 mod p(x)` << 1, x^200768 mod p(x)` << 1 */
- .octa 0x0000000173d00ae200000001ce1368d6
-
+ {0x00000001ce1368d6, 0x0000000173d00ae2},
/* x^199680 mod p(x)` << 1, x^199744 mod p(x)` << 1 */
- .octa 0x00000001d4ffe4ac0000000112c3a84c
-
+ {0x0000000112c3a84c, 0x00000001d4ffe4ac},
/* x^198656 mod p(x)` << 1, x^198720 mod p(x)` << 1 */
- .octa 0x000000016edc5ae400000000de940fee
-
+ {0x00000000de940fee, 0x000000016edc5ae4},
/* x^197632 mod p(x)` << 1, x^197696 mod p(x)` << 1 */
- .octa 0x00000001f1a0214000000000fe896b7e
-
+ {0x00000000fe896b7e, 0x00000001f1a02140},
/* x^196608 mod p(x)` << 1, x^196672 mod p(x)` << 1 */
- .octa 0x00000000ca0b28a000000001f797431c
-
+ {0x00000001f797431c, 0x00000000ca0b28a0},
/* x^195584 mod p(x)` << 1, x^195648 mod p(x)` << 1 */
- .octa 0x00000001928e30a20000000053e989ba
-
+ {0x0000000053e989ba, 0x00000001928e30a2},
/* x^194560 mod p(x)` << 1, x^194624 mod p(x)` << 1 */
- .octa 0x0000000097b1b002000000003920cd16
-
+ {0x000000003920cd16, 0x0000000097b1b002},
/* x^193536 mod p(x)` << 1, x^193600 mod p(x)` << 1 */
- .octa 0x00000000b15bf90600000001e6f579b8
-
+ {0x00000001e6f579b8, 0x00000000b15bf906},
/* x^192512 mod p(x)` << 1, x^192576 mod p(x)` << 1 */
- .octa 0x00000000411c5d52000000007493cb0a
-
+ {0x000000007493cb0a, 0x00000000411c5d52},
/* x^191488 mod p(x)` << 1, x^191552 mod p(x)` << 1 */
- .octa 0x00000001c36f330000000001bdd376d8
-
+ {0x00000001bdd376d8, 0x00000001c36f3300},
/* x^190464 mod p(x)` << 1, x^190528 mod p(x)` << 1 */
- .octa 0x00000001119227e0000000016badfee6
-
+ {0x000000016badfee6, 0x00000001119227e0},
/* x^189440 mod p(x)` << 1, x^189504 mod p(x)` << 1 */
- .octa 0x00000000114d47020000000071de5c58
-
+ {0x0000000071de5c58, 0x00000000114d4702},
/* x^188416 mod p(x)` << 1, x^188480 mod p(x)` << 1 */
- .octa 0x00000000458b5b9800000000453f317c
-
+ {0x00000000453f317c, 0x00000000458b5b98},
/* x^187392 mod p(x)` << 1, x^187456 mod p(x)` << 1 */
- .octa 0x000000012e31fb8e0000000121675cce
-
+ {0x0000000121675cce, 0x000000012e31fb8e},
/* x^186368 mod p(x)` << 1, x^186432 mod p(x)` << 1 */
- .octa 0x000000005cf619d800000001f409ee92
-
+ {0x00000001f409ee92, 0x000000005cf619d8},
/* x^185344 mod p(x)` << 1, x^185408 mod p(x)` << 1 */
- .octa 0x0000000063f4d8b200000000f36b9c88
-
+ {0x00000000f36b9c88, 0x0000000063f4d8b2},
/* x^184320 mod p(x)` << 1, x^184384 mod p(x)` << 1 */
- .octa 0x000000004138dc8a0000000036b398f4
-
+ {0x0000000036b398f4, 0x000000004138dc8a},
/* x^183296 mod p(x)` << 1, x^183360 mod p(x)` << 1 */
- .octa 0x00000001d29ee8e000000001748f9adc
-
+ {0x00000001748f9adc, 0x00000001d29ee8e0},
/* x^182272 mod p(x)` << 1, x^182336 mod p(x)` << 1 */
- .octa 0x000000006a08ace800000001be94ec00
-
+ {0x00000001be94ec00, 0x000000006a08ace8},
/* x^181248 mod p(x)` << 1, x^181312 mod p(x)` << 1 */
- .octa 0x0000000127d4201000000000b74370d6
-
+ {0x00000000b74370d6, 0x0000000127d42010},
/* x^180224 mod p(x)` << 1, x^180288 mod p(x)` << 1 */
- .octa 0x0000000019d76b6200000001174d0b98
-
+ {0x00000001174d0b98, 0x0000000019d76b62},
/* x^179200 mod p(x)` << 1, x^179264 mod p(x)` << 1 */
- .octa 0x00000001b1471f6e00000000befc06a4
-
+ {0x00000000befc06a4, 0x00000001b1471f6e},
/* x^178176 mod p(x)` << 1, x^178240 mod p(x)` << 1 */
- .octa 0x00000001f64c19cc00000001ae125288
-
+ {0x00000001ae125288, 0x00000001f64c19cc},
/* x^177152 mod p(x)` << 1, x^177216 mod p(x)` << 1 */
- .octa 0x00000000003c0ea00000000095c19b34
-
+ {0x0000000095c19b34, 0x00000000003c0ea0},
/* x^176128 mod p(x)` << 1, x^176192 mod p(x)` << 1 */
- .octa 0x000000014d73abf600000001a78496f2
-
+ {0x00000001a78496f2, 0x000000014d73abf6},
/* x^175104 mod p(x)` << 1, x^175168 mod p(x)` << 1 */
- .octa 0x00000001620eb84400000001ac5390a0
-
+ {0x00000001ac5390a0, 0x00000001620eb844},
/* x^174080 mod p(x)` << 1, x^174144 mod p(x)` << 1 */
- .octa 0x0000000147655048000000002a80ed6e
-
+ {0x000000002a80ed6e, 0x0000000147655048},
/* x^173056 mod p(x)` << 1, x^173120 mod p(x)` << 1 */
- .octa 0x0000000067b5077e00000001fa9b0128
-
+ {0x00000001fa9b0128, 0x0000000067b5077e},
/* x^172032 mod p(x)` << 1, x^172096 mod p(x)` << 1 */
- .octa 0x0000000010ffe20600000001ea94929e
-
+ {0x00000001ea94929e, 0x0000000010ffe206},
/* x^171008 mod p(x)` << 1, x^171072 mod p(x)` << 1 */
- .octa 0x000000000fee8f1e0000000125f4305c
-
+ {0x0000000125f4305c, 0x000000000fee8f1e},
/* x^169984 mod p(x)` << 1, x^170048 mod p(x)` << 1 */
- .octa 0x00000001da26fbae00000001471e2002
-
+ {0x00000001471e2002, 0x00000001da26fbae},
/* x^168960 mod p(x)` << 1, x^169024 mod p(x)` << 1 */
- .octa 0x00000001b3a8bd880000000132d2253a
-
+ {0x0000000132d2253a, 0x00000001b3a8bd88},
/* x^167936 mod p(x)` << 1, x^168000 mod p(x)` << 1 */
- .octa 0x00000000e8f3898e00000000f26b3592
-
+ {0x00000000f26b3592, 0x00000000e8f3898e},
/* x^166912 mod p(x)` << 1, x^166976 mod p(x)` << 1 */
- .octa 0x00000000b0d0d28c00000000bc8b67b0
-
+ {0x00000000bc8b67b0, 0x00000000b0d0d28c},
/* x^165888 mod p(x)` << 1, x^165952 mod p(x)` << 1 */
- .octa 0x0000000030f2a798000000013a826ef2
-
+ {0x000000013a826ef2, 0x0000000030f2a798},
/* x^164864 mod p(x)` << 1, x^164928 mod p(x)` << 1 */
- .octa 0x000000000fba10020000000081482c84
-
+ {0x0000000081482c84, 0x000000000fba1002},
/* x^163840 mod p(x)` << 1, x^163904 mod p(x)` << 1 */
- .octa 0x00000000bdb9bd7200000000e77307c2
-
+ {0x00000000e77307c2, 0x00000000bdb9bd72},
/* x^162816 mod p(x)` << 1, x^162880 mod p(x)` << 1 */
- .octa 0x0000000075d3bf5a00000000d4a07ec8
-
+ {0x00000000d4a07ec8, 0x0000000075d3bf5a},
/* x^161792 mod p(x)` << 1, x^161856 mod p(x)` << 1 */
- .octa 0x00000000ef1f98a00000000017102100
-
+ {0x0000000017102100, 0x00000000ef1f98a0},
/* x^160768 mod p(x)` << 1, x^160832 mod p(x)` << 1 */
- .octa 0x00000000689c760200000000db406486
-
+ {0x00000000db406486, 0x00000000689c7602},
/* x^159744 mod p(x)` << 1, x^159808 mod p(x)` << 1 */
- .octa 0x000000016d5fa5fe0000000192db7f88
-
+ {0x0000000192db7f88, 0x000000016d5fa5fe},
/* x^158720 mod p(x)` << 1, x^158784 mod p(x)` << 1 */
- .octa 0x00000001d0d2b9ca000000018bf67b1e
-
+ {0x000000018bf67b1e, 0x00000001d0d2b9ca},
/* x^157696 mod p(x)` << 1, x^157760 mod p(x)` << 1 */
- .octa 0x0000000041e7b470000000007c09163e
-
+ {0x000000007c09163e, 0x0000000041e7b470},
/* x^156672 mod p(x)` << 1, x^156736 mod p(x)` << 1 */
- .octa 0x00000001cbb6495e000000000adac060
-
+ {0x000000000adac060, 0x00000001cbb6495e},
/* x^155648 mod p(x)` << 1, x^155712 mod p(x)` << 1 */
- .octa 0x000000010052a0b000000000bd8316ae
-
+ {0x00000000bd8316ae, 0x000000010052a0b0},
/* x^154624 mod p(x)` << 1, x^154688 mod p(x)` << 1 */
- .octa 0x00000001d8effb5c000000019f09ab54
-
+ {0x000000019f09ab54, 0x00000001d8effb5c},
/* x^153600 mod p(x)` << 1, x^153664 mod p(x)` << 1 */
- .octa 0x00000001d969853c0000000125155542
-
+ {0x0000000125155542, 0x00000001d969853c},
/* x^152576 mod p(x)` << 1, x^152640 mod p(x)` << 1 */
- .octa 0x00000000523ccce2000000018fdb5882
-
+ {0x000000018fdb5882, 0x00000000523ccce2},
/* x^151552 mod p(x)` << 1, x^151616 mod p(x)` << 1 */
- .octa 0x000000001e2436bc00000000e794b3f4
-
+ {0x00000000e794b3f4, 0x000000001e2436bc},
/* x^150528 mod p(x)` << 1, x^150592 mod p(x)` << 1 */
- .octa 0x00000000ddd1c3a2000000016f9bb022
-
+ {0x000000016f9bb022, 0x00000000ddd1c3a2},
/* x^149504 mod p(x)` << 1, x^149568 mod p(x)` << 1 */
- .octa 0x0000000019fcfe3800000000290c9978
-
+ {0x00000000290c9978, 0x0000000019fcfe38},
/* x^148480 mod p(x)` << 1, x^148544 mod p(x)` << 1 */
- .octa 0x00000001ce95db640000000083c0f350
-
+ {0x0000000083c0f350, 0x00000001ce95db64},
/* x^147456 mod p(x)` << 1, x^147520 mod p(x)` << 1 */
- .octa 0x00000000af5828060000000173ea6628
-
+ {0x0000000173ea6628, 0x00000000af582806},
/* x^146432 mod p(x)` << 1, x^146496 mod p(x)` << 1 */
- .octa 0x00000001006388f600000001c8b4e00a
-
+ {0x00000001c8b4e00a, 0x00000001006388f6},
/* x^145408 mod p(x)` << 1, x^145472 mod p(x)` << 1 */
- .octa 0x0000000179eca00a00000000de95d6aa
-
+ {0x00000000de95d6aa, 0x0000000179eca00a},
/* x^144384 mod p(x)` << 1, x^144448 mod p(x)` << 1 */
- .octa 0x0000000122410a6a000000010b7f7248
-
+ {0x000000010b7f7248, 0x0000000122410a6a},
/* x^143360 mod p(x)` << 1, x^143424 mod p(x)` << 1 */
- .octa 0x000000004288e87c00000001326e3a06
-
+ {0x00000001326e3a06, 0x000000004288e87c},
/* x^142336 mod p(x)` << 1, x^142400 mod p(x)` << 1 */
- .octa 0x000000016c5490da00000000bb62c2e6
-
+ {0x00000000bb62c2e6, 0x000000016c5490da},
/* x^141312 mod p(x)` << 1, x^141376 mod p(x)` << 1 */
- .octa 0x00000000d1c71f6e0000000156a4b2c2
-
+ {0x0000000156a4b2c2, 0x00000000d1c71f6e},
/* x^140288 mod p(x)` << 1, x^140352 mod p(x)` << 1 */
- .octa 0x00000001b4ce08a6000000011dfe763a
-
+ {0x000000011dfe763a, 0x00000001b4ce08a6},
/* x^139264 mod p(x)` << 1, x^139328 mod p(x)` << 1 */
- .octa 0x00000001466ba60c000000007bcca8e2
-
+ {0x000000007bcca8e2, 0x00000001466ba60c},
/* x^138240 mod p(x)` << 1, x^138304 mod p(x)` << 1 */
- .octa 0x00000001f6c488a40000000186118faa
-
+ {0x0000000186118faa, 0x00000001f6c488a4},
/* x^137216 mod p(x)` << 1, x^137280 mod p(x)` << 1 */
- .octa 0x000000013bfb06820000000111a65a88
-
+ {0x0000000111a65a88, 0x000000013bfb0682},
/* x^136192 mod p(x)` << 1, x^136256 mod p(x)` << 1 */
- .octa 0x00000000690e9e54000000003565e1c4
-
+ {0x000000003565e1c4, 0x00000000690e9e54},
/* x^135168 mod p(x)` << 1, x^135232 mod p(x)` << 1 */
- .octa 0x00000000281346b6000000012ed02a82
-
+ {0x000000012ed02a82, 0x00000000281346b6},
/* x^134144 mod p(x)` << 1, x^134208 mod p(x)` << 1 */
- .octa 0x000000015646402400000000c486ecfc
-
+ {0x00000000c486ecfc, 0x0000000156464024},
/* x^133120 mod p(x)` << 1, x^133184 mod p(x)` << 1 */
- .octa 0x000000016063a8dc0000000001b951b2
-
+ {0x0000000001b951b2, 0x000000016063a8dc},
/* x^132096 mod p(x)` << 1, x^132160 mod p(x)` << 1 */
- .octa 0x0000000116a663620000000048143916
-
+ {0x0000000048143916, 0x0000000116a66362},
/* x^131072 mod p(x)` << 1, x^131136 mod p(x)` << 1 */
- .octa 0x000000017e8aa4d200000001dc2ae124
-
+ {0x00000001dc2ae124, 0x000000017e8aa4d2},
/* x^130048 mod p(x)` << 1, x^130112 mod p(x)` << 1 */
- .octa 0x00000001728eb10c00000001416c58d6
-
+ {0x00000001416c58d6, 0x00000001728eb10c},
/* x^129024 mod p(x)` << 1, x^129088 mod p(x)` << 1 */
- .octa 0x00000001b08fd7fa00000000a479744a
-
+ {0x00000000a479744a, 0x00000001b08fd7fa},
/* x^128000 mod p(x)` << 1, x^128064 mod p(x)` << 1 */
- .octa 0x00000001092a16e80000000096ca3a26
-
+ {0x0000000096ca3a26, 0x00000001092a16e8},
/* x^126976 mod p(x)` << 1, x^127040 mod p(x)` << 1 */
- .octa 0x00000000a505637c00000000ff223d4e
-
+ {0x00000000ff223d4e, 0x00000000a505637c},
/* x^125952 mod p(x)` << 1, x^126016 mod p(x)` << 1 */
- .octa 0x00000000d94869b2000000010e84da42
-
+ {0x000000010e84da42, 0x00000000d94869b2},
/* x^124928 mod p(x)` << 1, x^124992 mod p(x)` << 1 */
- .octa 0x00000001c8b203ae00000001b61ba3d0
-
+ {0x00000001b61ba3d0, 0x00000001c8b203ae},
/* x^123904 mod p(x)` << 1, x^123968 mod p(x)` << 1 */
- .octa 0x000000005704aea000000000680f2de8
-
+ {0x00000000680f2de8, 0x000000005704aea0},
/* x^122880 mod p(x)` << 1, x^122944 mod p(x)` << 1 */
- .octa 0x000000012e295fa2000000008772a9a8
-
+ {0x000000008772a9a8, 0x000000012e295fa2},
/* x^121856 mod p(x)` << 1, x^121920 mod p(x)` << 1 */
- .octa 0x000000011d0908bc0000000155f295bc
-
+ {0x0000000155f295bc, 0x000000011d0908bc},
/* x^120832 mod p(x)` << 1, x^120896 mod p(x)` << 1 */
- .octa 0x0000000193ed97ea00000000595f9282
-
+ {0x00000000595f9282, 0x0000000193ed97ea},
/* x^119808 mod p(x)` << 1, x^119872 mod p(x)` << 1 */
- .octa 0x000000013a0f1c520000000164b1c25a
-
+ {0x0000000164b1c25a, 0x000000013a0f1c52},
/* x^118784 mod p(x)` << 1, x^118848 mod p(x)` << 1 */
- .octa 0x000000010c2c40c000000000fbd67c50
-
+ {0x00000000fbd67c50, 0x000000010c2c40c0},
/* x^117760 mod p(x)` << 1, x^117824 mod p(x)` << 1 */
- .octa 0x00000000ff6fac3e0000000096076268
-
+ {0x0000000096076268, 0x00000000ff6fac3e},
/* x^116736 mod p(x)` << 1, x^116800 mod p(x)` << 1 */
- .octa 0x000000017b3609c000000001d288e4cc
-
+ {0x00000001d288e4cc, 0x000000017b3609c0},
/* x^115712 mod p(x)` << 1, x^115776 mod p(x)` << 1 */
- .octa 0x0000000088c8c92200000001eaac1bdc
-
+ {0x00000001eaac1bdc, 0x0000000088c8c922},
/* x^114688 mod p(x)` << 1, x^114752 mod p(x)` << 1 */
- .octa 0x00000001751baae600000001f1ea39e2
-
+ {0x00000001f1ea39e2, 0x00000001751baae6},
/* x^113664 mod p(x)` << 1, x^113728 mod p(x)` << 1 */
- .octa 0x000000010795297200000001eb6506fc
-
+ {0x00000001eb6506fc, 0x0000000107952972},
/* x^112640 mod p(x)` << 1, x^112704 mod p(x)` << 1 */
- .octa 0x0000000162b00abe000000010f806ffe
-
+ {0x000000010f806ffe, 0x0000000162b00abe},
/* x^111616 mod p(x)` << 1, x^111680 mod p(x)` << 1 */
- .octa 0x000000000d7b404c000000010408481e
-
+ {0x000000010408481e, 0x000000000d7b404c},
/* x^110592 mod p(x)` << 1, x^110656 mod p(x)` << 1 */
- .octa 0x00000000763b13d40000000188260534
-
+ {0x0000000188260534, 0x00000000763b13d4},
/* x^109568 mod p(x)` << 1, x^109632 mod p(x)` << 1 */
- .octa 0x00000000f6dc22d80000000058fc73e0
-
+ {0x0000000058fc73e0, 0x00000000f6dc22d8},
/* x^108544 mod p(x)` << 1, x^108608 mod p(x)` << 1 */
- .octa 0x000000007daae06000000000391c59b8
-
+ {0x00000000391c59b8, 0x000000007daae060},
/* x^107520 mod p(x)` << 1, x^107584 mod p(x)` << 1 */
- .octa 0x000000013359ab7c000000018b638400
-
+ {0x000000018b638400, 0x000000013359ab7c},
/* x^106496 mod p(x)` << 1, x^106560 mod p(x)` << 1 */
- .octa 0x000000008add438a000000011738f5c4
-
+ {0x000000011738f5c4, 0x000000008add438a},
/* x^105472 mod p(x)` << 1, x^105536 mod p(x)` << 1 */
- .octa 0x00000001edbefdea000000008cf7c6da
-
+ {0x000000008cf7c6da, 0x00000001edbefdea},
/* x^104448 mod p(x)` << 1, x^104512 mod p(x)` << 1 */
- .octa 0x000000004104e0f800000001ef97fb16
-
+ {0x00000001ef97fb16, 0x000000004104e0f8},
/* x^103424 mod p(x)` << 1, x^103488 mod p(x)` << 1 */
- .octa 0x00000000b48a82220000000102130e20
-
+ {0x0000000102130e20, 0x00000000b48a8222},
/* x^102400 mod p(x)` << 1, x^102464 mod p(x)` << 1 */
- .octa 0x00000001bcb4684400000000db968898
-
+ {0x00000000db968898, 0x00000001bcb46844},
/* x^101376 mod p(x)` << 1, x^101440 mod p(x)` << 1 */
- .octa 0x000000013293ce0a00000000b5047b5e
-
+ {0x00000000b5047b5e, 0x000000013293ce0a},
/* x^100352 mod p(x)` << 1, x^100416 mod p(x)` << 1 */
- .octa 0x00000001710d0844000000010b90fdb2
-
+ {0x000000010b90fdb2, 0x00000001710d0844},
/* x^99328 mod p(x)` << 1, x^99392 mod p(x)` << 1 */
- .octa 0x0000000117907f6e000000004834a32e
-
+ {0x000000004834a32e, 0x0000000117907f6e},
/* x^98304 mod p(x)` << 1, x^98368 mod p(x)` << 1 */
- .octa 0x0000000087ddf93e0000000059c8f2b0
-
+ {0x0000000059c8f2b0, 0x0000000087ddf93e},
/* x^97280 mod p(x)` << 1, x^97344 mod p(x)` << 1 */
- .octa 0x000000005970e9b00000000122cec508
-
+ {0x0000000122cec508, 0x000000005970e9b0},
/* x^96256 mod p(x)` << 1, x^96320 mod p(x)` << 1 */
- .octa 0x0000000185b2b7d0000000000a330cda
-
+ {0x000000000a330cda, 0x0000000185b2b7d0},
/* x^95232 mod p(x)` << 1, x^95296 mod p(x)` << 1 */
- .octa 0x00000001dcee0efc000000014a47148c
-
+ {0x000000014a47148c, 0x00000001dcee0efc},
/* x^94208 mod p(x)` << 1, x^94272 mod p(x)` << 1 */
- .octa 0x0000000030da27220000000042c61cb8
-
+ {0x0000000042c61cb8, 0x0000000030da2722},
/* x^93184 mod p(x)` << 1, x^93248 mod p(x)` << 1 */
- .octa 0x000000012f925a180000000012fe6960
-
+ {0x0000000012fe6960, 0x000000012f925a18},
/* x^92160 mod p(x)` << 1, x^92224 mod p(x)` << 1 */
- .octa 0x00000000dd2e357c00000000dbda2c20
-
+ {0x00000000dbda2c20, 0x00000000dd2e357c},
/* x^91136 mod p(x)` << 1, x^91200 mod p(x)` << 1 */
- .octa 0x00000000071c80de000000011122410c
-
+ {0x000000011122410c, 0x00000000071c80de},
/* x^90112 mod p(x)` << 1, x^90176 mod p(x)` << 1 */
- .octa 0x000000011513140a00000000977b2070
-
+ {0x00000000977b2070, 0x000000011513140a},
/* x^89088 mod p(x)` << 1, x^89152 mod p(x)` << 1 */
- .octa 0x00000001df876e8e000000014050438e
-
+ {0x000000014050438e, 0x00000001df876e8e},
/* x^88064 mod p(x)` << 1, x^88128 mod p(x)` << 1 */
- .octa 0x000000015f81d6ce0000000147c840e8
-
+ {0x0000000147c840e8, 0x000000015f81d6ce},
/* x^87040 mod p(x)` << 1, x^87104 mod p(x)` << 1 */
- .octa 0x000000019dd94dbe00000001cc7c88ce
-
+ {0x00000001cc7c88ce, 0x000000019dd94dbe},
/* x^86016 mod p(x)` << 1, x^86080 mod p(x)` << 1 */
- .octa 0x00000001373d206e00000001476b35a4
-
+ {0x00000001476b35a4, 0x00000001373d206e},
/* x^84992 mod p(x)` << 1, x^85056 mod p(x)` << 1 */
- .octa 0x00000000668ccade000000013d52d508
-
+ {0x000000013d52d508, 0x00000000668ccade},
/* x^83968 mod p(x)` << 1, x^84032 mod p(x)` << 1 */
- .octa 0x00000001b192d268000000008e4be32e
-
+ {0x000000008e4be32e, 0x00000001b192d268},
/* x^82944 mod p(x)` << 1, x^83008 mod p(x)` << 1 */
- .octa 0x00000000e30f3a7800000000024120fe
-
+ {0x00000000024120fe, 0x00000000e30f3a78},
/* x^81920 mod p(x)` << 1, x^81984 mod p(x)` << 1 */
- .octa 0x000000010ef1f7bc00000000ddecddb4
-
+ {0x00000000ddecddb4, 0x000000010ef1f7bc},
/* x^80896 mod p(x)` << 1, x^80960 mod p(x)` << 1 */
- .octa 0x00000001f5ac738000000000d4d403bc
-
+ {0x00000000d4d403bc, 0x00000001f5ac7380},
/* x^79872 mod p(x)` << 1, x^79936 mod p(x)` << 1 */
- .octa 0x000000011822ea7000000001734b89aa
-
+ {0x00000001734b89aa, 0x000000011822ea70},
/* x^78848 mod p(x)` << 1, x^78912 mod p(x)` << 1 */
- .octa 0x00000000c3a33848000000010e7a58d6
-
+ {0x000000010e7a58d6, 0x00000000c3a33848},
/* x^77824 mod p(x)` << 1, x^77888 mod p(x)` << 1 */
- .octa 0x00000001bd151c2400000001f9f04e9c
-
+ {0x00000001f9f04e9c, 0x00000001bd151c24},
/* x^76800 mod p(x)` << 1, x^76864 mod p(x)` << 1 */
- .octa 0x0000000056002d7600000000b692225e
-
+ {0x00000000b692225e, 0x0000000056002d76},
/* x^75776 mod p(x)` << 1, x^75840 mod p(x)` << 1 */
- .octa 0x000000014657c4f4000000019b8d3f3e
-
+ {0x000000019b8d3f3e, 0x000000014657c4f4},
/* x^74752 mod p(x)` << 1, x^74816 mod p(x)` << 1 */
- .octa 0x0000000113742d7c00000001a874f11e
-
+ {0x00000001a874f11e, 0x0000000113742d7c},
/* x^73728 mod p(x)` << 1, x^73792 mod p(x)` << 1 */
- .octa 0x000000019c5920ba000000010d5a4254
-
+ {0x000000010d5a4254, 0x000000019c5920ba},
/* x^72704 mod p(x)` << 1, x^72768 mod p(x)` << 1 */
- .octa 0x000000005216d2d600000000bbb2f5d6
-
+ {0x00000000bbb2f5d6, 0x000000005216d2d6},
/* x^71680 mod p(x)` << 1, x^71744 mod p(x)` << 1 */
- .octa 0x0000000136f5ad8a0000000179cc0e36
-
+ {0x0000000179cc0e36, 0x0000000136f5ad8a},
/* x^70656 mod p(x)` << 1, x^70720 mod p(x)` << 1 */
- .octa 0x000000018b07beb600000001dca1da4a
-
+ {0x00000001dca1da4a, 0x000000018b07beb6},
/* x^69632 mod p(x)` << 1, x^69696 mod p(x)` << 1 */
- .octa 0x00000000db1e93b000000000feb1a192
-
+ {0x00000000feb1a192, 0x00000000db1e93b0},
/* x^68608 mod p(x)` << 1, x^68672 mod p(x)` << 1 */
- .octa 0x000000000b96fa3a00000000d1eeedd6
-
+ {0x00000000d1eeedd6, 0x000000000b96fa3a},
/* x^67584 mod p(x)` << 1, x^67648 mod p(x)` << 1 */
- .octa 0x00000001d9968af0000000008fad9bb4
-
+ {0x000000008fad9bb4, 0x00000001d9968af0},
/* x^66560 mod p(x)` << 1, x^66624 mod p(x)` << 1 */
- .octa 0x000000000e4a77a200000001884938e4
-
+ {0x00000001884938e4, 0x000000000e4a77a2},
/* x^65536 mod p(x)` << 1, x^65600 mod p(x)` << 1 */
- .octa 0x00000000508c2ac800000001bc2e9bc0
-
+ {0x00000001bc2e9bc0, 0x00000000508c2ac8},
/* x^64512 mod p(x)` << 1, x^64576 mod p(x)` << 1 */
- .octa 0x0000000021572a8000000001f9658a68
-
+ {0x00000001f9658a68, 0x0000000021572a80},
/* x^63488 mod p(x)` << 1, x^63552 mod p(x)` << 1 */
- .octa 0x00000001b859daf2000000001b9224fc
-
+ {0x000000001b9224fc, 0x00000001b859daf2},
/* x^62464 mod p(x)` << 1, x^62528 mod p(x)` << 1 */
- .octa 0x000000016f7884740000000055b2fb84
-
+ {0x0000000055b2fb84, 0x000000016f788474},
/* x^61440 mod p(x)` << 1, x^61504 mod p(x)` << 1 */
- .octa 0x00000001b438810e000000018b090348
-
+ {0x000000018b090348, 0x00000001b438810e},
/* x^60416 mod p(x)` << 1, x^60480 mod p(x)` << 1 */
- .octa 0x0000000095ddc6f2000000011ccbd5ea
-
+ {0x000000011ccbd5ea, 0x0000000095ddc6f2},
/* x^59392 mod p(x)` << 1, x^59456 mod p(x)` << 1 */
- .octa 0x00000001d977c20c0000000007ae47f8
-
+ {0x0000000007ae47f8, 0x00000001d977c20c},
/* x^58368 mod p(x)` << 1, x^58432 mod p(x)` << 1 */
- .octa 0x00000000ebedb99a0000000172acbec0
-
+ {0x0000000172acbec0, 0x00000000ebedb99a},
/* x^57344 mod p(x)` << 1, x^57408 mod p(x)` << 1 */
- .octa 0x00000001df9e9e9200000001c6e3ff20
-
+ {0x00000001c6e3ff20, 0x00000001df9e9e92},
/* x^56320 mod p(x)` << 1, x^56384 mod p(x)` << 1 */
- .octa 0x00000001a4a3f95200000000e1b38744
-
+ {0x00000000e1b38744, 0x00000001a4a3f952},
/* x^55296 mod p(x)` << 1, x^55360 mod p(x)` << 1 */
- .octa 0x00000000e2f5122000000000791585b2
-
+ {0x00000000791585b2, 0x00000000e2f51220},
/* x^54272 mod p(x)` << 1, x^54336 mod p(x)` << 1 */
- .octa 0x000000004aa01f3e00000000ac53b894
-
+ {0x00000000ac53b894, 0x000000004aa01f3e},
/* x^53248 mod p(x)` << 1, x^53312 mod p(x)` << 1 */
- .octa 0x00000000b3e90a5800000001ed5f2cf4
-
+ {0x00000001ed5f2cf4, 0x00000000b3e90a58},
/* x^52224 mod p(x)` << 1, x^52288 mod p(x)` << 1 */
- .octa 0x000000000c9ca2aa00000001df48b2e0
-
+ {0x00000001df48b2e0, 0x000000000c9ca2aa},
/* x^51200 mod p(x)` << 1, x^51264 mod p(x)` << 1 */
- .octa 0x000000015168231600000000049c1c62
-
+ {0x00000000049c1c62, 0x0000000151682316},
/* x^50176 mod p(x)` << 1, x^50240 mod p(x)` << 1 */
- .octa 0x0000000036fce78c000000017c460c12
-
+ {0x000000017c460c12, 0x0000000036fce78c},
/* x^49152 mod p(x)` << 1, x^49216 mod p(x)` << 1 */
- .octa 0x000000009037dc10000000015be4da7e
-
+ {0x000000015be4da7e, 0x000000009037dc10},
/* x^48128 mod p(x)` << 1, x^48192 mod p(x)` << 1 */
- .octa 0x00000000d3298582000000010f38f668
-
+ {0x000000010f38f668, 0x00000000d3298582},
/* x^47104 mod p(x)` << 1, x^47168 mod p(x)` << 1 */
- .octa 0x00000001b42e8ad60000000039f40a00
-
+ {0x0000000039f40a00, 0x00000001b42e8ad6},
/* x^46080 mod p(x)` << 1, x^46144 mod p(x)` << 1 */
- .octa 0x00000000142a983800000000bd4c10c4
-
+ {0x00000000bd4c10c4, 0x00000000142a9838},
/* x^45056 mod p(x)` << 1, x^45120 mod p(x)` << 1 */
- .octa 0x0000000109c7f1900000000042db1d98
-
+ {0x0000000042db1d98, 0x0000000109c7f190},
/* x^44032 mod p(x)` << 1, x^44096 mod p(x)` << 1 */
- .octa 0x0000000056ff931000000001c905bae6
-
+ {0x00000001c905bae6, 0x0000000056ff9310},
/* x^43008 mod p(x)` << 1, x^43072 mod p(x)` << 1 */
- .octa 0x00000001594513aa00000000069d40ea
-
+ {0x00000000069d40ea, 0x00000001594513aa},
/* x^41984 mod p(x)` << 1, x^42048 mod p(x)` << 1 */
- .octa 0x00000001e3b5b1e8000000008e4fbad0
-
+ {0x000000008e4fbad0, 0x00000001e3b5b1e8},
/* x^40960 mod p(x)` << 1, x^41024 mod p(x)` << 1 */
- .octa 0x000000011dd5fc080000000047bedd46
-
+ {0x0000000047bedd46, 0x000000011dd5fc08},
/* x^39936 mod p(x)` << 1, x^40000 mod p(x)` << 1 */
- .octa 0x00000001675f0cc20000000026396bf8
-
+ {0x0000000026396bf8, 0x00000001675f0cc2},
/* x^38912 mod p(x)` << 1, x^38976 mod p(x)` << 1 */
- .octa 0x00000000d1c8dd4400000000379beb92
-
+ {0x00000000379beb92, 0x00000000d1c8dd44},
/* x^37888 mod p(x)` << 1, x^37952 mod p(x)` << 1 */
- .octa 0x0000000115ebd3d8000000000abae54a
-
+ {0x000000000abae54a, 0x0000000115ebd3d8},
/* x^36864 mod p(x)` << 1, x^36928 mod p(x)` << 1 */
- .octa 0x00000001ecbd0dac0000000007e6a128
-
+ {0x0000000007e6a128, 0x00000001ecbd0dac},
/* x^35840 mod p(x)` << 1, x^35904 mod p(x)` << 1 */
- .octa 0x00000000cdf67af2000000000ade29d2
-
+ {0x000000000ade29d2, 0x00000000cdf67af2},
/* x^34816 mod p(x)` << 1, x^34880 mod p(x)` << 1 */
- .octa 0x000000004c01ff4c00000000f974c45c
-
+ {0x00000000f974c45c, 0x000000004c01ff4c},
/* x^33792 mod p(x)` << 1, x^33856 mod p(x)` << 1 */
- .octa 0x00000000f2d8657e00000000e77ac60a
-
+ {0x00000000e77ac60a, 0x00000000f2d8657e},
/* x^32768 mod p(x)` << 1, x^32832 mod p(x)` << 1 */
- .octa 0x000000006bae74c40000000145895816
-
+ {0x0000000145895816, 0x000000006bae74c4},
/* x^31744 mod p(x)` << 1, x^31808 mod p(x)` << 1 */
- .octa 0x0000000152af8aa00000000038e362be
-
+ {0x0000000038e362be, 0x0000000152af8aa0},
/* x^30720 mod p(x)` << 1, x^30784 mod p(x)` << 1 */
- .octa 0x0000000004663802000000007f991a64
-
+ {0x000000007f991a64, 0x0000000004663802},
/* x^29696 mod p(x)` << 1, x^29760 mod p(x)` << 1 */
- .octa 0x00000001ab2f5afc00000000fa366d3a
-
+ {0x00000000fa366d3a, 0x00000001ab2f5afc},
/* x^28672 mod p(x)` << 1, x^28736 mod p(x)` << 1 */
- .octa 0x0000000074a4ebd400000001a2bb34f0
-
+ {0x00000001a2bb34f0, 0x0000000074a4ebd4},
/* x^27648 mod p(x)` << 1, x^27712 mod p(x)` << 1 */
- .octa 0x00000001d7ab3a4c0000000028a9981e
-
+ {0x0000000028a9981e, 0x00000001d7ab3a4c},
/* x^26624 mod p(x)` << 1, x^26688 mod p(x)` << 1 */
- .octa 0x00000001a8da60c600000001dbc672be
-
+ {0x00000001dbc672be, 0x00000001a8da60c6},
/* x^25600 mod p(x)` << 1, x^25664 mod p(x)` << 1 */
- .octa 0x000000013cf6382000000000b04d77f6
-
+ {0x00000000b04d77f6, 0x000000013cf63820},
/* x^24576 mod p(x)` << 1, x^24640 mod p(x)` << 1 */
- .octa 0x00000000bec12e1e0000000124400d96
-
+ {0x0000000124400d96, 0x00000000bec12e1e},
/* x^23552 mod p(x)` << 1, x^23616 mod p(x)` << 1 */
- .octa 0x00000001c6368010000000014ca4b414
-
+ {0x000000014ca4b414, 0x00000001c6368010},
/* x^22528 mod p(x)` << 1, x^22592 mod p(x)` << 1 */
- .octa 0x00000001e6e78758000000012fe2c938
-
+ {0x000000012fe2c938, 0x00000001e6e78758},
/* x^21504 mod p(x)` << 1, x^21568 mod p(x)` << 1 */
- .octa 0x000000008d7f2b3c00000001faed01e6
-
+ {0x00000001faed01e6, 0x000000008d7f2b3c},
/* x^20480 mod p(x)` << 1, x^20544 mod p(x)` << 1 */
- .octa 0x000000016b4a156e000000007e80ecfe
-
+ {0x000000007e80ecfe, 0x000000016b4a156e},
/* x^19456 mod p(x)` << 1, x^19520 mod p(x)` << 1 */
- .octa 0x00000001c63cfeb60000000098daee94
-
+ {0x0000000098daee94, 0x00000001c63cfeb6},
/* x^18432 mod p(x)` << 1, x^18496 mod p(x)` << 1 */
- .octa 0x000000015f902670000000010a04edea
-
+ {0x000000010a04edea, 0x000000015f902670},
/* x^17408 mod p(x)` << 1, x^17472 mod p(x)` << 1 */
- .octa 0x00000001cd5de11e00000001c00b4524
-
+ {0x00000001c00b4524, 0x00000001cd5de11e},
/* x^16384 mod p(x)` << 1, x^16448 mod p(x)` << 1 */
- .octa 0x000000001acaec540000000170296550
-
+ {0x0000000170296550, 0x000000001acaec54},
/* x^15360 mod p(x)` << 1, x^15424 mod p(x)` << 1 */
- .octa 0x000000002bd0ca780000000181afaa48
-
+ {0x0000000181afaa48, 0x000000002bd0ca78},
/* x^14336 mod p(x)` << 1, x^14400 mod p(x)` << 1 */
- .octa 0x0000000032d63d5c0000000185a31ffa
-
+ {0x0000000185a31ffa, 0x0000000032d63d5c},
/* x^13312 mod p(x)` << 1, x^13376 mod p(x)` << 1 */
- .octa 0x000000001c6d4e4c000000002469f608
-
+ {0x000000002469f608, 0x000000001c6d4e4c},
/* x^12288 mod p(x)` << 1, x^12352 mod p(x)` << 1 */
- .octa 0x0000000106a60b92000000006980102a
-
+ {0x000000006980102a, 0x0000000106a60b92},
/* x^11264 mod p(x)` << 1, x^11328 mod p(x)` << 1 */
- .octa 0x00000000d3855e120000000111ea9ca8
-
+ {0x0000000111ea9ca8, 0x00000000d3855e12},
/* x^10240 mod p(x)` << 1, x^10304 mod p(x)` << 1 */
- .octa 0x00000000e312563600000001bd1d29ce
-
+ {0x00000001bd1d29ce, 0x00000000e3125636},
/* x^9216 mod p(x)` << 1, x^9280 mod p(x)` << 1 */
- .octa 0x000000009e8f7ea400000001b34b9580
-
+ {0x00000001b34b9580, 0x000000009e8f7ea4},
/* x^8192 mod p(x)` << 1, x^8256 mod p(x)` << 1 */
- .octa 0x00000001c82e562c000000003076054e
-
+ {0x000000003076054e, 0x00000001c82e562c},
/* x^7168 mod p(x)` << 1, x^7232 mod p(x)` << 1 */
- .octa 0x00000000ca9f09ce000000012a608ea4
-
+ {0x000000012a608ea4, 0x00000000ca9f09ce},
/* x^6144 mod p(x)` << 1, x^6208 mod p(x)` << 1 */
- .octa 0x00000000c63764e600000000784d05fe
-
+ {0x00000000784d05fe, 0x00000000c63764e6},
/* x^5120 mod p(x)` << 1, x^5184 mod p(x)` << 1 */
- .octa 0x0000000168d2e49e000000016ef0d82a
-
+ {0x000000016ef0d82a, 0x0000000168d2e49e},
/* x^4096 mod p(x)` << 1, x^4160 mod p(x)` << 1 */
- .octa 0x00000000e986c1480000000075bda454
-
+ {0x0000000075bda454, 0x00000000e986c148},
/* x^3072 mod p(x)` << 1, x^3136 mod p(x)` << 1 */
- .octa 0x00000000cfb65894000000003dc0a1c4
-
+ {0x000000003dc0a1c4, 0x00000000cfb65894},
/* x^2048 mod p(x)` << 1, x^2112 mod p(x)` << 1 */
- .octa 0x0000000111cadee400000000e9a5d8be
-
+ {0x00000000e9a5d8be, 0x0000000111cadee4},
/* x^1024 mod p(x)` << 1, x^1088 mod p(x)` << 1 */
- .octa 0x0000000171fb63ce00000001609bc4b4
-
- .short_constants :
-
- /* Reduce final 1024-2048 bits to 64 bits, shifting 32 bits to include
- the trailing 32 bits of zeros */
- /* x^1952 mod p(x)`, x^1984 mod p(x)`, x^2016 mod p(x)`, x^2048 mod
- p(x)` */
- .octa 0x7fec2963e5bf80485cf015c388e56f72
-
- /* x^1824 mod p(x)`, x^1856 mod p(x)`, x^1888 mod p(x)`, x^1920 mod
- p(x)` */
- .octa 0x38e888d4844752a9963a18920246e2e6
-
- /* x^1696 mod p(x)`, x^1728 mod p(x)`, x^1760 mod p(x)`, x^1792 mod
- p(x)` */
- .octa 0x42316c00730206ad419a441956993a31
-
- /* x^1568 mod p(x)`, x^1600 mod p(x)`, x^1632 mod p(x)`, x^1664 mod
- p(x)` */
- .octa 0x543d5c543e65ddf9924752ba2b830011
-
- /* x^1440 mod p(x)`, x^1472 mod p(x)`, x^1504 mod p(x)`, x^1536 mod
- p(x)` */
- .octa 0x78e87aaf56767c9255bd7f9518e4a304
-
- /* x^1312 mod p(x)`, x^1344 mod p(x)`, x^1376 mod p(x)`, x^1408 mod
- p(x)` */
- .octa 0x8f68fcec1903da7f6d76739fe0553f1e
-
- /* x^1184 mod p(x)`, x^1216 mod p(x)`, x^1248 mod p(x)`, x^1280 mod
- p(x)` */
- .octa 0x3f4840246791d588c133722b1fe0b5c3
-
- /* x^1056 mod p(x)`, x^1088 mod p(x)`, x^1120 mod p(x)`, x^1152 mod
- p(x)` */
- .octa 0x34c96751b04de25a64b67ee0e55ef1f3
-
- /* x^928 mod p(x)`, x^960 mod p(x)`, x^992 mod p(x)`, x^1024 mod p(x)`
- */
- .octa 0x156c8e180b4a395b069db049b8fdb1e7
-
- /* x^800 mod p(x)`, x^832 mod p(x)`, x^864 mod p(x)`, x^896 mod p(x)` */
- .octa 0xe0b99ccbe661f7bea11bfaf3c9e90b9e
-
- /* x^672 mod p(x)`, x^704 mod p(x)`, x^736 mod p(x)`, x^768 mod p(x)` */
- .octa 0x041d37768cd75659817cdc5119b29a35
-
- /* x^544 mod p(x)`, x^576 mod p(x)`, x^608 mod p(x)`, x^640 mod p(x)` */
- .octa 0x3a0777818cfaa9651ce9d94b36c41f1c
-
- /* x^416 mod p(x)`, x^448 mod p(x)`, x^480 mod p(x)`, x^512 mod p(x)` */
- .octa 0x0e148e8252377a554f256efcb82be955
-
- /* x^288 mod p(x)`, x^320 mod p(x)`, x^352 mod p(x)`, x^384 mod p(x)` */
- .octa 0x9c25531d19e65ddeec1631edb2dea967
-
- /* x^160 mod p(x)`, x^192 mod p(x)`, x^224 mod p(x)`, x^256 mod p(x)` */
- .octa 0x790606ff9957c0a65d27e147510ac59a
+ {0x00000001609bc4b4, 0x0000000171fb63ce}
+#else /* __LITTLE_ENDIAN__ */
+ /* x^261120 mod p(x)` << 1, x^261184 mod p(x)` << 1 */
+ {0x00000000b6ca9e20, 0x000000009c37c408},
+ /* x^260096 mod p(x)` << 1, x^260160 mod p(x)` << 1 */
+ {0x00000000350249a8, 0x00000001b51df26c},
+ /* x^259072 mod p(x)` << 1, x^259136 mod p(x)` << 1 */
+ {0x00000001862dac54, 0x000000000724b9d0},
+ /* x^258048 mod p(x)` << 1, x^258112 mod p(x)` << 1 */
+ {0x00000001d87fb48c, 0x00000001c00532fe},
+ /* x^257024 mod p(x)` << 1, x^257088 mod p(x)` << 1 */
+ {0x00000001f39b699e, 0x00000000f05a9362},
+ /* x^256000 mod p(x)` << 1, x^256064 mod p(x)` << 1 */
+ {0x0000000101da11b4, 0x00000001e1007970},
+ /* x^254976 mod p(x)` << 1, x^255040 mod p(x)` << 1 */
+ {0x00000001cab571e0, 0x00000000a57366ee},
+ /* x^253952 mod p(x)` << 1, x^254016 mod p(x)` << 1 */
+ {0x00000000c7020cfe, 0x0000000192011284},
+ /* x^252928 mod p(x)` << 1, x^252992 mod p(x)` << 1 */
+ {0x00000000cdaed1ae, 0x0000000162716d9a},
+ /* x^251904 mod p(x)` << 1, x^251968 mod p(x)` << 1 */
+ {0x00000001e804effc, 0x00000000cd97ecde},
+ /* x^250880 mod p(x)` << 1, x^250944 mod p(x)` << 1 */
+ {0x0000000077c3ea3a, 0x0000000058812bc0},
+ /* x^249856 mod p(x)` << 1, x^249920 mod p(x)` << 1 */
+ {0x0000000068df31b4, 0x0000000088b8c12e},
+ /* x^248832 mod p(x)` << 1, x^248896 mod p(x)` << 1 */
+ {0x00000000b059b6c2, 0x00000001230b234c},
+ /* x^247808 mod p(x)` << 1, x^247872 mod p(x)` << 1 */
+ {0x0000000145fb8ed8, 0x00000001120b416e},
+ /* x^246784 mod p(x)` << 1, x^246848 mod p(x)` << 1 */
+ {0x00000000cbc09168, 0x00000001974aecb0},
+ /* x^245760 mod p(x)` << 1, x^245824 mod p(x)` << 1 */
+ {0x000000005ceeedc2, 0x000000008ee3f226},
+ /* x^244736 mod p(x)` << 1, x^244800 mod p(x)` << 1 */
+ {0x0000000047d74e86, 0x00000001089aba9a},
+ /* x^243712 mod p(x)` << 1, x^243776 mod p(x)` << 1 */
+ {0x00000001407e9e22, 0x0000000065113872},
+ /* x^242688 mod p(x)` << 1, x^242752 mod p(x)` << 1 */
+ {0x00000001da967bda, 0x000000005c07ec10},
+ /* x^241664 mod p(x)` << 1, x^241728 mod p(x)` << 1 */
+ {0x000000006c898368, 0x0000000187590924},
+ /* x^240640 mod p(x)` << 1, x^240704 mod p(x)` << 1 */
+ {0x00000000f2d14c98, 0x00000000e35da7c6},
+ /* x^239616 mod p(x)` << 1, x^239680 mod p(x)` << 1 */
+ {0x00000001993c6ad4, 0x000000000415855a},
+ /* x^238592 mod p(x)` << 1, x^238656 mod p(x)` << 1 */
+ {0x000000014683d1ac, 0x0000000073617758},
+ /* x^237568 mod p(x)` << 1, x^237632 mod p(x)` << 1 */
+ {0x00000001a7c93e6c, 0x0000000176021d28},
+ /* x^236544 mod p(x)` << 1, x^236608 mod p(x)` << 1 */
+ {0x000000010211e90a, 0x00000001c358fd0a},
+ /* x^235520 mod p(x)` << 1, x^235584 mod p(x)` << 1 */
+ {0x000000001119403e, 0x00000001ff7a2c18},
+ /* x^234496 mod p(x)` << 1, x^234560 mod p(x)` << 1 */
+ {0x000000001c3261aa, 0x00000000f2d9f7e4},
+ /* x^233472 mod p(x)` << 1, x^233536 mod p(x)` << 1 */
+ {0x000000014e37a634, 0x000000016cf1f9c8},
+ /* x^232448 mod p(x)` << 1, x^232512 mod p(x)` << 1 */
+ {0x0000000073786c0c, 0x000000010af9279a},
+ /* x^231424 mod p(x)` << 1, x^231488 mod p(x)` << 1 */
+ {0x000000011dc037f8, 0x0000000004f101e8},
+ /* x^230400 mod p(x)` << 1, x^230464 mod p(x)` << 1 */
+ {0x0000000031433dfc, 0x0000000070bcf184},
+ /* x^229376 mod p(x)` << 1, x^229440 mod p(x)` << 1 */
+ {0x000000009cde8348, 0x000000000a8de642},
+ /* x^228352 mod p(x)` << 1, x^228416 mod p(x)` << 1 */
+ {0x0000000038d3c2a6, 0x0000000062ea130c},
+ /* x^227328 mod p(x)` << 1, x^227392 mod p(x)` << 1 */
+ {0x000000011b25f260, 0x00000001eb31cbb2},
+ /* x^226304 mod p(x)` << 1, x^226368 mod p(x)` << 1 */
+ {0x000000001629e6f0, 0x0000000170783448},
+ /* x^225280 mod p(x)` << 1, x^225344 mod p(x)` << 1 */
+ {0x0000000160838b4c, 0x00000001a684b4c6},
+ /* x^224256 mod p(x)` << 1, x^224320 mod p(x)` << 1 */
+ {0x000000007a44011c, 0x00000000253ca5b4},
+ /* x^223232 mod p(x)` << 1, x^223296 mod p(x)` << 1 */
+ {0x00000000226f417a, 0x0000000057b4b1e2},
+ /* x^222208 mod p(x)` << 1, x^222272 mod p(x)` << 1 */
+ {0x0000000045eb2eb4, 0x00000000b6bd084c},
+ /* x^221184 mod p(x)` << 1, x^221248 mod p(x)` << 1 */
+ {0x000000014459d70c, 0x0000000123c2d592},
+ /* x^220160 mod p(x)` << 1, x^220224 mod p(x)` << 1 */
+ {0x00000001d406ed82, 0x00000000159dafce},
+ /* x^219136 mod p(x)` << 1, x^219200 mod p(x)` << 1 */
+ {0x0000000160c8e1a8, 0x0000000127e1a64e},
+ /* x^218112 mod p(x)` << 1, x^218176 mod p(x)` << 1 */
+ {0x0000000027ba8098, 0x0000000056860754},
+ /* x^217088 mod p(x)` << 1, x^217152 mod p(x)` << 1 */
+ {0x000000006d92d018, 0x00000001e661aae8},
+ /* x^216064 mod p(x)` << 1, x^216128 mod p(x)` << 1 */
+ {0x000000012ed7e3f2, 0x00000000f82c6166},
+ /* x^215040 mod p(x)` << 1, x^215104 mod p(x)` << 1 */
+ {0x000000002dc87788, 0x00000000c4f9c7ae},
+ /* x^214016 mod p(x)` << 1, x^214080 mod p(x)` << 1 */
+ {0x0000000018240bb8, 0x0000000074203d20},
+ /* x^212992 mod p(x)` << 1, x^213056 mod p(x)` << 1 */
+ {0x000000001ad38158, 0x0000000198173052},
+ /* x^211968 mod p(x)` << 1, x^212032 mod p(x)` << 1 */
+ {0x00000001396b78f2, 0x00000001ce8aba54},
+ /* x^210944 mod p(x)` << 1, x^211008 mod p(x)` << 1 */
+ {0x000000011a681334, 0x00000001850d5d94},
+ /* x^209920 mod p(x)` << 1, x^209984 mod p(x)` << 1 */
+ {0x000000012104732e, 0x00000001d609239c},
+ /* x^208896 mod p(x)` << 1, x^208960 mod p(x)` << 1 */
+ {0x00000000a140d90c, 0x000000001595f048},
+ /* x^207872 mod p(x)` << 1, x^207936 mod p(x)` << 1 */
+ {0x00000001b7215eda, 0x0000000042ccee08},
+ /* x^206848 mod p(x)` << 1, x^206912 mod p(x)` << 1 */
+ {0x00000001aaf1df3c, 0x000000010a389d74},
+ /* x^205824 mod p(x)` << 1, x^205888 mod p(x)` << 1 */
+ {0x0000000029d15b8a, 0x000000012a840da6},
+ /* x^204800 mod p(x)` << 1, x^204864 mod p(x)` << 1 */
+ {0x00000000f1a96922, 0x000000001d181c0c},
+ /* x^203776 mod p(x)` << 1, x^203840 mod p(x)` << 1 */
+ {0x00000001ac80d03c, 0x0000000068b7d1f6},
+ /* x^202752 mod p(x)` << 1, x^202816 mod p(x)` << 1 */
+ {0x000000000f11d56a, 0x000000005b0f14fc},
+ /* x^201728 mod p(x)` << 1, x^201792 mod p(x)` << 1 */
+ {0x00000001f1c022a2, 0x0000000179e9e730},
+ /* x^200704 mod p(x)` << 1, x^200768 mod p(x)` << 1 */
+ {0x0000000173d00ae2, 0x00000001ce1368d6},
+ /* x^199680 mod p(x)` << 1, x^199744 mod p(x)` << 1 */
+ {0x00000001d4ffe4ac, 0x0000000112c3a84c},
+ /* x^198656 mod p(x)` << 1, x^198720 mod p(x)` << 1 */
+ {0x000000016edc5ae4, 0x00000000de940fee},
+ /* x^197632 mod p(x)` << 1, x^197696 mod p(x)` << 1 */
+ {0x00000001f1a02140, 0x00000000fe896b7e},
+ /* x^196608 mod p(x)` << 1, x^196672 mod p(x)` << 1 */
+ {0x00000000ca0b28a0, 0x00000001f797431c},
+ /* x^195584 mod p(x)` << 1, x^195648 mod p(x)` << 1 */
+ {0x00000001928e30a2, 0x0000000053e989ba},
+ /* x^194560 mod p(x)` << 1, x^194624 mod p(x)` << 1 */
+ {0x0000000097b1b002, 0x000000003920cd16},
+ /* x^193536 mod p(x)` << 1, x^193600 mod p(x)` << 1 */
+ {0x00000000b15bf906, 0x00000001e6f579b8},
+ /* x^192512 mod p(x)` << 1, x^192576 mod p(x)` << 1 */
+ {0x00000000411c5d52, 0x000000007493cb0a},
+ /* x^191488 mod p(x)` << 1, x^191552 mod p(x)` << 1 */
+ {0x00000001c36f3300, 0x00000001bdd376d8},
+ /* x^190464 mod p(x)` << 1, x^190528 mod p(x)` << 1 */
+ {0x00000001119227e0, 0x000000016badfee6},
+ /* x^189440 mod p(x)` << 1, x^189504 mod p(x)` << 1 */
+ {0x00000000114d4702, 0x0000000071de5c58},
+ /* x^188416 mod p(x)` << 1, x^188480 mod p(x)` << 1 */
+ {0x00000000458b5b98, 0x00000000453f317c},
+ /* x^187392 mod p(x)` << 1, x^187456 mod p(x)` << 1 */
+ {0x000000012e31fb8e, 0x0000000121675cce},
+ /* x^186368 mod p(x)` << 1, x^186432 mod p(x)` << 1 */
+ {0x000000005cf619d8, 0x00000001f409ee92},
+ /* x^185344 mod p(x)` << 1, x^185408 mod p(x)` << 1 */
+ {0x0000000063f4d8b2, 0x00000000f36b9c88},
+ /* x^184320 mod p(x)` << 1, x^184384 mod p(x)` << 1 */
+ {0x000000004138dc8a, 0x0000000036b398f4},
+ /* x^183296 mod p(x)` << 1, x^183360 mod p(x)` << 1 */
+ {0x00000001d29ee8e0, 0x00000001748f9adc},
+ /* x^182272 mod p(x)` << 1, x^182336 mod p(x)` << 1 */
+ {0x000000006a08ace8, 0x00000001be94ec00},
+ /* x^181248 mod p(x)` << 1, x^181312 mod p(x)` << 1 */
+ {0x0000000127d42010, 0x00000000b74370d6},
+ /* x^180224 mod p(x)` << 1, x^180288 mod p(x)` << 1 */
+ {0x0000000019d76b62, 0x00000001174d0b98},
+ /* x^179200 mod p(x)` << 1, x^179264 mod p(x)` << 1 */
+ {0x00000001b1471f6e, 0x00000000befc06a4},
+ /* x^178176 mod p(x)` << 1, x^178240 mod p(x)` << 1 */
+ {0x00000001f64c19cc, 0x00000001ae125288},
+ /* x^177152 mod p(x)` << 1, x^177216 mod p(x)` << 1 */
+ {0x00000000003c0ea0, 0x0000000095c19b34},
+ /* x^176128 mod p(x)` << 1, x^176192 mod p(x)` << 1 */
+ {0x000000014d73abf6, 0x00000001a78496f2},
+ /* x^175104 mod p(x)` << 1, x^175168 mod p(x)` << 1 */
+ {0x00000001620eb844, 0x00000001ac5390a0},
+ /* x^174080 mod p(x)` << 1, x^174144 mod p(x)` << 1 */
+ {0x0000000147655048, 0x000000002a80ed6e},
+ /* x^173056 mod p(x)` << 1, x^173120 mod p(x)` << 1 */
+ {0x0000000067b5077e, 0x00000001fa9b0128},
+ /* x^172032 mod p(x)` << 1, x^172096 mod p(x)` << 1 */
+ {0x0000000010ffe206, 0x00000001ea94929e},
+ /* x^171008 mod p(x)` << 1, x^171072 mod p(x)` << 1 */
+ {0x000000000fee8f1e, 0x0000000125f4305c},
+ /* x^169984 mod p(x)` << 1, x^170048 mod p(x)` << 1 */
+ {0x00000001da26fbae, 0x00000001471e2002},
+ /* x^168960 mod p(x)` << 1, x^169024 mod p(x)` << 1 */
+ {0x00000001b3a8bd88, 0x0000000132d2253a},
+ /* x^167936 mod p(x)` << 1, x^168000 mod p(x)` << 1 */
+ {0x00000000e8f3898e, 0x00000000f26b3592},
+ /* x^166912 mod p(x)` << 1, x^166976 mod p(x)` << 1 */
+ {0x00000000b0d0d28c, 0x00000000bc8b67b0},
+ /* x^165888 mod p(x)` << 1, x^165952 mod p(x)` << 1 */
+ {0x0000000030f2a798, 0x000000013a826ef2},
+ /* x^164864 mod p(x)` << 1, x^164928 mod p(x)` << 1 */
+ {0x000000000fba1002, 0x0000000081482c84},
+ /* x^163840 mod p(x)` << 1, x^163904 mod p(x)` << 1 */
+ {0x00000000bdb9bd72, 0x00000000e77307c2},
+ /* x^162816 mod p(x)` << 1, x^162880 mod p(x)` << 1 */
+ {0x0000000075d3bf5a, 0x00000000d4a07ec8},
+ /* x^161792 mod p(x)` << 1, x^161856 mod p(x)` << 1 */
+ {0x00000000ef1f98a0, 0x0000000017102100},
+ /* x^160768 mod p(x)` << 1, x^160832 mod p(x)` << 1 */
+ {0x00000000689c7602, 0x00000000db406486},
+ /* x^159744 mod p(x)` << 1, x^159808 mod p(x)` << 1 */
+ {0x000000016d5fa5fe, 0x0000000192db7f88},
+ /* x^158720 mod p(x)` << 1, x^158784 mod p(x)` << 1 */
+ {0x00000001d0d2b9ca, 0x000000018bf67b1e},
+ /* x^157696 mod p(x)` << 1, x^157760 mod p(x)` << 1 */
+ {0x0000000041e7b470, 0x000000007c09163e},
+ /* x^156672 mod p(x)` << 1, x^156736 mod p(x)` << 1 */
+ {0x00000001cbb6495e, 0x000000000adac060},
+ /* x^155648 mod p(x)` << 1, x^155712 mod p(x)` << 1 */
+ {0x000000010052a0b0, 0x00000000bd8316ae},
+ /* x^154624 mod p(x)` << 1, x^154688 mod p(x)` << 1 */
+ {0x00000001d8effb5c, 0x000000019f09ab54},
+ /* x^153600 mod p(x)` << 1, x^153664 mod p(x)` << 1 */
+ {0x00000001d969853c, 0x0000000125155542},
+ /* x^152576 mod p(x)` << 1, x^152640 mod p(x)` << 1 */
+ {0x00000000523ccce2, 0x000000018fdb5882},
+ /* x^151552 mod p(x)` << 1, x^151616 mod p(x)` << 1 */
+ {0x000000001e2436bc, 0x00000000e794b3f4},
+ /* x^150528 mod p(x)` << 1, x^150592 mod p(x)` << 1 */
+ {0x00000000ddd1c3a2, 0x000000016f9bb022},
+ /* x^149504 mod p(x)` << 1, x^149568 mod p(x)` << 1 */
+ {0x0000000019fcfe38, 0x00000000290c9978},
+ /* x^148480 mod p(x)` << 1, x^148544 mod p(x)` << 1 */
+ {0x00000001ce95db64, 0x0000000083c0f350},
+ /* x^147456 mod p(x)` << 1, x^147520 mod p(x)` << 1 */
+ {0x00000000af582806, 0x0000000173ea6628},
+ /* x^146432 mod p(x)` << 1, x^146496 mod p(x)` << 1 */
+ {0x00000001006388f6, 0x00000001c8b4e00a},
+ /* x^145408 mod p(x)` << 1, x^145472 mod p(x)` << 1 */
+ {0x0000000179eca00a, 0x00000000de95d6aa},
+ /* x^144384 mod p(x)` << 1, x^144448 mod p(x)` << 1 */
+ {0x0000000122410a6a, 0x000000010b7f7248},
+ /* x^143360 mod p(x)` << 1, x^143424 mod p(x)` << 1 */
+ {0x000000004288e87c, 0x00000001326e3a06},
+ /* x^142336 mod p(x)` << 1, x^142400 mod p(x)` << 1 */
+ {0x000000016c5490da, 0x00000000bb62c2e6},
+ /* x^141312 mod p(x)` << 1, x^141376 mod p(x)` << 1 */
+ {0x00000000d1c71f6e, 0x0000000156a4b2c2},
+ /* x^140288 mod p(x)` << 1, x^140352 mod p(x)` << 1 */
+ {0x00000001b4ce08a6, 0x000000011dfe763a},
+ /* x^139264 mod p(x)` << 1, x^139328 mod p(x)` << 1 */
+ {0x00000001466ba60c, 0x000000007bcca8e2},
+ /* x^138240 mod p(x)` << 1, x^138304 mod p(x)` << 1 */
+ {0x00000001f6c488a4, 0x0000000186118faa},
+ /* x^137216 mod p(x)` << 1, x^137280 mod p(x)` << 1 */
+ {0x000000013bfb0682, 0x0000000111a65a88},
+ /* x^136192 mod p(x)` << 1, x^136256 mod p(x)` << 1 */
+ {0x00000000690e9e54, 0x000000003565e1c4},
+ /* x^135168 mod p(x)` << 1, x^135232 mod p(x)` << 1 */
+ {0x00000000281346b6, 0x000000012ed02a82},
+ /* x^134144 mod p(x)` << 1, x^134208 mod p(x)` << 1 */
+ {0x0000000156464024, 0x00000000c486ecfc},
+ /* x^133120 mod p(x)` << 1, x^133184 mod p(x)` << 1 */
+ {0x000000016063a8dc, 0x0000000001b951b2},
+ /* x^132096 mod p(x)` << 1, x^132160 mod p(x)` << 1 */
+ {0x0000000116a66362, 0x0000000048143916},
+ /* x^131072 mod p(x)` << 1, x^131136 mod p(x)` << 1 */
+ {0x000000017e8aa4d2, 0x00000001dc2ae124},
+ /* x^130048 mod p(x)` << 1, x^130112 mod p(x)` << 1 */
+ {0x00000001728eb10c, 0x00000001416c58d6},
+ /* x^129024 mod p(x)` << 1, x^129088 mod p(x)` << 1 */
+ {0x00000001b08fd7fa, 0x00000000a479744a},
+ /* x^128000 mod p(x)` << 1, x^128064 mod p(x)` << 1 */
+ {0x00000001092a16e8, 0x0000000096ca3a26},
+ /* x^126976 mod p(x)` << 1, x^127040 mod p(x)` << 1 */
+ {0x00000000a505637c, 0x00000000ff223d4e},
+ /* x^125952 mod p(x)` << 1, x^126016 mod p(x)` << 1 */
+ {0x00000000d94869b2, 0x000000010e84da42},
+ /* x^124928 mod p(x)` << 1, x^124992 mod p(x)` << 1 */
+ {0x00000001c8b203ae, 0x00000001b61ba3d0},
+ /* x^123904 mod p(x)` << 1, x^123968 mod p(x)` << 1 */
+ {0x000000005704aea0, 0x00000000680f2de8},
+ /* x^122880 mod p(x)` << 1, x^122944 mod p(x)` << 1 */
+ {0x000000012e295fa2, 0x000000008772a9a8},
+ /* x^121856 mod p(x)` << 1, x^121920 mod p(x)` << 1 */
+ {0x000000011d0908bc, 0x0000000155f295bc},
+ /* x^120832 mod p(x)` << 1, x^120896 mod p(x)` << 1 */
+ {0x0000000193ed97ea, 0x00000000595f9282},
+ /* x^119808 mod p(x)` << 1, x^119872 mod p(x)` << 1 */
+ {0x000000013a0f1c52, 0x0000000164b1c25a},
+ /* x^118784 mod p(x)` << 1, x^118848 mod p(x)` << 1 */
+ {0x000000010c2c40c0, 0x00000000fbd67c50},
+ /* x^117760 mod p(x)` << 1, x^117824 mod p(x)` << 1 */
+ {0x00000000ff6fac3e, 0x0000000096076268},
+ /* x^116736 mod p(x)` << 1, x^116800 mod p(x)` << 1 */
+ {0x000000017b3609c0, 0x00000001d288e4cc},
+ /* x^115712 mod p(x)` << 1, x^115776 mod p(x)` << 1 */
+ {0x0000000088c8c922, 0x00000001eaac1bdc},
+ /* x^114688 mod p(x)` << 1, x^114752 mod p(x)` << 1 */
+ {0x00000001751baae6, 0x00000001f1ea39e2},
+ /* x^113664 mod p(x)` << 1, x^113728 mod p(x)` << 1 */
+ {0x0000000107952972, 0x00000001eb6506fc},
+ /* x^112640 mod p(x)` << 1, x^112704 mod p(x)` << 1 */
+ {0x0000000162b00abe, 0x000000010f806ffe},
+ /* x^111616 mod p(x)` << 1, x^111680 mod p(x)` << 1 */
+ {0x000000000d7b404c, 0x000000010408481e},
+ /* x^110592 mod p(x)` << 1, x^110656 mod p(x)` << 1 */
+ {0x00000000763b13d4, 0x0000000188260534},
+ /* x^109568 mod p(x)` << 1, x^109632 mod p(x)` << 1 */
+ {0x00000000f6dc22d8, 0x0000000058fc73e0},
+ /* x^108544 mod p(x)` << 1, x^108608 mod p(x)` << 1 */
+ {0x000000007daae060, 0x00000000391c59b8},
+ /* x^107520 mod p(x)` << 1, x^107584 mod p(x)` << 1 */
+ {0x000000013359ab7c, 0x000000018b638400},
+ /* x^106496 mod p(x)` << 1, x^106560 mod p(x)` << 1 */
+ {0x000000008add438a, 0x000000011738f5c4},
+ /* x^105472 mod p(x)` << 1, x^105536 mod p(x)` << 1 */
+ {0x00000001edbefdea, 0x000000008cf7c6da},
+ /* x^104448 mod p(x)` << 1, x^104512 mod p(x)` << 1 */
+ {0x000000004104e0f8, 0x00000001ef97fb16},
+ /* x^103424 mod p(x)` << 1, x^103488 mod p(x)` << 1 */
+ {0x00000000b48a8222, 0x0000000102130e20},
+ /* x^102400 mod p(x)` << 1, x^102464 mod p(x)` << 1 */
+ {0x00000001bcb46844, 0x00000000db968898},
+ /* x^101376 mod p(x)` << 1, x^101440 mod p(x)` << 1 */
+ {0x000000013293ce0a, 0x00000000b5047b5e},
+ /* x^100352 mod p(x)` << 1, x^100416 mod p(x)` << 1 */
+ {0x00000001710d0844, 0x000000010b90fdb2},
+ /* x^99328 mod p(x)` << 1, x^99392 mod p(x)` << 1 */
+ {0x0000000117907f6e, 0x000000004834a32e},
+ /* x^98304 mod p(x)` << 1, x^98368 mod p(x)` << 1 */
+ {0x0000000087ddf93e, 0x0000000059c8f2b0},
+ /* x^97280 mod p(x)` << 1, x^97344 mod p(x)` << 1 */
+ {0x000000005970e9b0, 0x0000000122cec508},
+ /* x^96256 mod p(x)` << 1, x^96320 mod p(x)` << 1 */
+ {0x0000000185b2b7d0, 0x000000000a330cda},
+ /* x^95232 mod p(x)` << 1, x^95296 mod p(x)` << 1 */
+ {0x00000001dcee0efc, 0x000000014a47148c},
+ /* x^94208 mod p(x)` << 1, x^94272 mod p(x)` << 1 */
+ {0x0000000030da2722, 0x0000000042c61cb8},
+ /* x^93184 mod p(x)` << 1, x^93248 mod p(x)` << 1 */
+ {0x000000012f925a18, 0x0000000012fe6960},
+ /* x^92160 mod p(x)` << 1, x^92224 mod p(x)` << 1 */
+ {0x00000000dd2e357c, 0x00000000dbda2c20},
+ /* x^91136 mod p(x)` << 1, x^91200 mod p(x)` << 1 */
+ {0x00000000071c80de, 0x000000011122410c},
+ /* x^90112 mod p(x)` << 1, x^90176 mod p(x)` << 1 */
+ {0x000000011513140a, 0x00000000977b2070},
+ /* x^89088 mod p(x)` << 1, x^89152 mod p(x)` << 1 */
+ {0x00000001df876e8e, 0x000000014050438e},
+ /* x^88064 mod p(x)` << 1, x^88128 mod p(x)` << 1 */
+ {0x000000015f81d6ce, 0x0000000147c840e8},
+ /* x^87040 mod p(x)` << 1, x^87104 mod p(x)` << 1 */
+ {0x000000019dd94dbe, 0x00000001cc7c88ce},
+ /* x^86016 mod p(x)` << 1, x^86080 mod p(x)` << 1 */
+ {0x00000001373d206e, 0x00000001476b35a4},
+ /* x^84992 mod p(x)` << 1, x^85056 mod p(x)` << 1 */
+ {0x00000000668ccade, 0x000000013d52d508},
+ /* x^83968 mod p(x)` << 1, x^84032 mod p(x)` << 1 */
+ {0x00000001b192d268, 0x000000008e4be32e},
+ /* x^82944 mod p(x)` << 1, x^83008 mod p(x)` << 1 */
+ {0x00000000e30f3a78, 0x00000000024120fe},
+ /* x^81920 mod p(x)` << 1, x^81984 mod p(x)` << 1 */
+ {0x000000010ef1f7bc, 0x00000000ddecddb4},
+ /* x^80896 mod p(x)` << 1, x^80960 mod p(x)` << 1 */
+ {0x00000001f5ac7380, 0x00000000d4d403bc},
+ /* x^79872 mod p(x)` << 1, x^79936 mod p(x)` << 1 */
+ {0x000000011822ea70, 0x00000001734b89aa},
+ /* x^78848 mod p(x)` << 1, x^78912 mod p(x)` << 1 */
+ {0x00000000c3a33848, 0x000000010e7a58d6},
+ /* x^77824 mod p(x)` << 1, x^77888 mod p(x)` << 1 */
+ {0x00000001bd151c24, 0x00000001f9f04e9c},
+ /* x^76800 mod p(x)` << 1, x^76864 mod p(x)` << 1 */
+ {0x0000000056002d76, 0x00000000b692225e},
+ /* x^75776 mod p(x)` << 1, x^75840 mod p(x)` << 1 */
+ {0x000000014657c4f4, 0x000000019b8d3f3e},
+ /* x^74752 mod p(x)` << 1, x^74816 mod p(x)` << 1 */
+ {0x0000000113742d7c, 0x00000001a874f11e},
+ /* x^73728 mod p(x)` << 1, x^73792 mod p(x)` << 1 */
+ {0x000000019c5920ba, 0x000000010d5a4254},
+ /* x^72704 mod p(x)` << 1, x^72768 mod p(x)` << 1 */
+ {0x000000005216d2d6, 0x00000000bbb2f5d6},
+ /* x^71680 mod p(x)` << 1, x^71744 mod p(x)` << 1 */
+ {0x0000000136f5ad8a, 0x0000000179cc0e36},
+ /* x^70656 mod p(x)` << 1, x^70720 mod p(x)` << 1 */
+ {0x000000018b07beb6, 0x00000001dca1da4a},
+ /* x^69632 mod p(x)` << 1, x^69696 mod p(x)` << 1 */
+ {0x00000000db1e93b0, 0x00000000feb1a192},
+ /* x^68608 mod p(x)` << 1, x^68672 mod p(x)` << 1 */
+ {0x000000000b96fa3a, 0x00000000d1eeedd6},
+ /* x^67584 mod p(x)` << 1, x^67648 mod p(x)` << 1 */
+ {0x00000001d9968af0, 0x000000008fad9bb4},
+ /* x^66560 mod p(x)` << 1, x^66624 mod p(x)` << 1 */
+ {0x000000000e4a77a2, 0x00000001884938e4},
+ /* x^65536 mod p(x)` << 1, x^65600 mod p(x)` << 1 */
+ {0x00000000508c2ac8, 0x00000001bc2e9bc0},
+ /* x^64512 mod p(x)` << 1, x^64576 mod p(x)` << 1 */
+ {0x0000000021572a80, 0x00000001f9658a68},
+ /* x^63488 mod p(x)` << 1, x^63552 mod p(x)` << 1 */
+ {0x00000001b859daf2, 0x000000001b9224fc},
+ /* x^62464 mod p(x)` << 1, x^62528 mod p(x)` << 1 */
+ {0x000000016f788474, 0x0000000055b2fb84},
+ /* x^61440 mod p(x)` << 1, x^61504 mod p(x)` << 1 */
+ {0x00000001b438810e, 0x000000018b090348},
+ /* x^60416 mod p(x)` << 1, x^60480 mod p(x)` << 1 */
+ {0x0000000095ddc6f2, 0x000000011ccbd5ea},
+ /* x^59392 mod p(x)` << 1, x^59456 mod p(x)` << 1 */
+ {0x00000001d977c20c, 0x0000000007ae47f8},
+ /* x^58368 mod p(x)` << 1, x^58432 mod p(x)` << 1 */
+ {0x00000000ebedb99a, 0x0000000172acbec0},
+ /* x^57344 mod p(x)` << 1, x^57408 mod p(x)` << 1 */
+ {0x00000001df9e9e92, 0x00000001c6e3ff20},
+ /* x^56320 mod p(x)` << 1, x^56384 mod p(x)` << 1 */
+ {0x00000001a4a3f952, 0x00000000e1b38744},
+ /* x^55296 mod p(x)` << 1, x^55360 mod p(x)` << 1 */
+ {0x00000000e2f51220, 0x00000000791585b2},
+ /* x^54272 mod p(x)` << 1, x^54336 mod p(x)` << 1 */
+ {0x000000004aa01f3e, 0x00000000ac53b894},
+ /* x^53248 mod p(x)` << 1, x^53312 mod p(x)` << 1 */
+ {0x00000000b3e90a58, 0x00000001ed5f2cf4},
+ /* x^52224 mod p(x)` << 1, x^52288 mod p(x)` << 1 */
+ {0x000000000c9ca2aa, 0x00000001df48b2e0},
+ /* x^51200 mod p(x)` << 1, x^51264 mod p(x)` << 1 */
+ {0x0000000151682316, 0x00000000049c1c62},
+ /* x^50176 mod p(x)` << 1, x^50240 mod p(x)` << 1 */
+ {0x0000000036fce78c, 0x000000017c460c12},
+ /* x^49152 mod p(x)` << 1, x^49216 mod p(x)` << 1 */
+ {0x000000009037dc10, 0x000000015be4da7e},
+ /* x^48128 mod p(x)` << 1, x^48192 mod p(x)` << 1 */
+ {0x00000000d3298582, 0x000000010f38f668},
+ /* x^47104 mod p(x)` << 1, x^47168 mod p(x)` << 1 */
+ {0x00000001b42e8ad6, 0x0000000039f40a00},
+ /* x^46080 mod p(x)` << 1, x^46144 mod p(x)` << 1 */
+ {0x00000000142a9838, 0x00000000bd4c10c4},
+ /* x^45056 mod p(x)` << 1, x^45120 mod p(x)` << 1 */
+ {0x0000000109c7f190, 0x0000000042db1d98},
+ /* x^44032 mod p(x)` << 1, x^44096 mod p(x)` << 1 */
+ {0x0000000056ff9310, 0x00000001c905bae6},
+ /* x^43008 mod p(x)` << 1, x^43072 mod p(x)` << 1 */
+ {0x00000001594513aa, 0x00000000069d40ea},
+ /* x^41984 mod p(x)` << 1, x^42048 mod p(x)` << 1 */
+ {0x00000001e3b5b1e8, 0x000000008e4fbad0},
+ /* x^40960 mod p(x)` << 1, x^41024 mod p(x)` << 1 */
+ {0x000000011dd5fc08, 0x0000000047bedd46},
+ /* x^39936 mod p(x)` << 1, x^40000 mod p(x)` << 1 */
+ {0x00000001675f0cc2, 0x0000000026396bf8},
+ /* x^38912 mod p(x)` << 1, x^38976 mod p(x)` << 1 */
+ {0x00000000d1c8dd44, 0x00000000379beb92},
+ /* x^37888 mod p(x)` << 1, x^37952 mod p(x)` << 1 */
+ {0x0000000115ebd3d8, 0x000000000abae54a},
+ /* x^36864 mod p(x)` << 1, x^36928 mod p(x)` << 1 */
+ {0x00000001ecbd0dac, 0x0000000007e6a128},
+ /* x^35840 mod p(x)` << 1, x^35904 mod p(x)` << 1 */
+ {0x00000000cdf67af2, 0x000000000ade29d2},
+ /* x^34816 mod p(x)` << 1, x^34880 mod p(x)` << 1 */
+ {0x000000004c01ff4c, 0x00000000f974c45c},
+ /* x^33792 mod p(x)` << 1, x^33856 mod p(x)` << 1 */
+ {0x00000000f2d8657e, 0x00000000e77ac60a},
+ /* x^32768 mod p(x)` << 1, x^32832 mod p(x)` << 1 */
+ {0x000000006bae74c4, 0x0000000145895816},
+ /* x^31744 mod p(x)` << 1, x^31808 mod p(x)` << 1 */
+ {0x0000000152af8aa0, 0x0000000038e362be},
+ /* x^30720 mod p(x)` << 1, x^30784 mod p(x)` << 1 */
+ {0x0000000004663802, 0x000000007f991a64},
+ /* x^29696 mod p(x)` << 1, x^29760 mod p(x)` << 1 */
+ {0x00000001ab2f5afc, 0x00000000fa366d3a},
+ /* x^28672 mod p(x)` << 1, x^28736 mod p(x)` << 1 */
+ {0x0000000074a4ebd4, 0x00000001a2bb34f0},
+ /* x^27648 mod p(x)` << 1, x^27712 mod p(x)` << 1 */
+ {0x00000001d7ab3a4c, 0x0000000028a9981e},
+ /* x^26624 mod p(x)` << 1, x^26688 mod p(x)` << 1 */
+ {0x00000001a8da60c6, 0x00000001dbc672be},
+ /* x^25600 mod p(x)` << 1, x^25664 mod p(x)` << 1 */
+ {0x000000013cf63820, 0x00000000b04d77f6},
+ /* x^24576 mod p(x)` << 1, x^24640 mod p(x)` << 1 */
+ {0x00000000bec12e1e, 0x0000000124400d96},
+ /* x^23552 mod p(x)` << 1, x^23616 mod p(x)` << 1 */
+ {0x00000001c6368010, 0x000000014ca4b414},
+ /* x^22528 mod p(x)` << 1, x^22592 mod p(x)` << 1 */
+ {0x00000001e6e78758, 0x000000012fe2c938},
+ /* x^21504 mod p(x)` << 1, x^21568 mod p(x)` << 1 */
+ {0x000000008d7f2b3c, 0x00000001faed01e6},
+ /* x^20480 mod p(x)` << 1, x^20544 mod p(x)` << 1 */
+ {0x000000016b4a156e, 0x000000007e80ecfe},
+ /* x^19456 mod p(x)` << 1, x^19520 mod p(x)` << 1 */
+ {0x00000001c63cfeb6, 0x0000000098daee94},
+ /* x^18432 mod p(x)` << 1, x^18496 mod p(x)` << 1 */
+ {0x000000015f902670, 0x000000010a04edea},
+ /* x^17408 mod p(x)` << 1, x^17472 mod p(x)` << 1 */
+ {0x00000001cd5de11e, 0x00000001c00b4524},
+ /* x^16384 mod p(x)` << 1, x^16448 mod p(x)` << 1 */
+ {0x000000001acaec54, 0x0000000170296550},
+ /* x^15360 mod p(x)` << 1, x^15424 mod p(x)` << 1 */
+ {0x000000002bd0ca78, 0x0000000181afaa48},
+ /* x^14336 mod p(x)` << 1, x^14400 mod p(x)` << 1 */
+ {0x0000000032d63d5c, 0x0000000185a31ffa},
+ /* x^13312 mod p(x)` << 1, x^13376 mod p(x)` << 1 */
+ {0x000000001c6d4e4c, 0x000000002469f608},
+ /* x^12288 mod p(x)` << 1, x^12352 mod p(x)` << 1 */
+ {0x0000000106a60b92, 0x000000006980102a},
+ /* x^11264 mod p(x)` << 1, x^11328 mod p(x)` << 1 */
+ {0x00000000d3855e12, 0x0000000111ea9ca8},
+ /* x^10240 mod p(x)` << 1, x^10304 mod p(x)` << 1 */
+ {0x00000000e3125636, 0x00000001bd1d29ce},
+ /* x^9216 mod p(x)` << 1, x^9280 mod p(x)` << 1 */
+ {0x000000009e8f7ea4, 0x00000001b34b9580},
+ /* x^8192 mod p(x)` << 1, x^8256 mod p(x)` << 1 */
+ {0x00000001c82e562c, 0x000000003076054e},
+ /* x^7168 mod p(x)` << 1, x^7232 mod p(x)` << 1 */
+ {0x00000000ca9f09ce, 0x000000012a608ea4},
+ /* x^6144 mod p(x)` << 1, x^6208 mod p(x)` << 1 */
+ {0x00000000c63764e6, 0x00000000784d05fe},
+ /* x^5120 mod p(x)` << 1, x^5184 mod p(x)` << 1 */
+ {0x0000000168d2e49e, 0x000000016ef0d82a},
+ /* x^4096 mod p(x)` << 1, x^4160 mod p(x)` << 1 */
+ {0x00000000e986c148, 0x0000000075bda454},
+ /* x^3072 mod p(x)` << 1, x^3136 mod p(x)` << 1 */
+ {0x00000000cfb65894, 0x000000003dc0a1c4},
+ /* x^2048 mod p(x)` << 1, x^2112 mod p(x)` << 1 */
+ {0x0000000111cadee4, 0x00000000e9a5d8be},
+ /* x^1024 mod p(x)` << 1, x^1088 mod p(x)` << 1 */
+ {0x0000000171fb63ce, 0x00000001609bc4b4}
+#endif /* __LITTLE_ENDIAN__ */
+};
- /* x^32 mod p(x)`, x^64 mod p(x)`, x^96 mod p(x)`, x^128 mod p(x)` */
- .octa 0x82f63b786ea2d55ca66805eb18b8ea18
+/* Reduce final 1024-2048 bits to 64 bits, shifting 32 bits to include the
+ * trailing 32 bits of zeros */
+
+static const __vector unsigned long long vcrc_short_const[16] __attribute__((
+ aligned(16))) = {
+#ifdef __LITTLE_ENDIAN__
+ /* x^1952 mod p(x) , x^1984 mod p(x) , x^2016 mod p(x) , x^2048 mod p(x) */
+ {0x5cf015c388e56f72, 0x7fec2963e5bf8048},
+ /* x^1824 mod p(x) , x^1856 mod p(x) , x^1888 mod p(x) , x^1920 mod p(x) */
+ {0x963a18920246e2e6, 0x38e888d4844752a9},
+ /* x^1696 mod p(x) , x^1728 mod p(x) , x^1760 mod p(x) , x^1792 mod p(x) */
+ {0x419a441956993a31, 0x42316c00730206ad},
+ /* x^1568 mod p(x) , x^1600 mod p(x) , x^1632 mod p(x) , x^1664 mod p(x) */
+ {0x924752ba2b830011, 0x543d5c543e65ddf9},
+ /* x^1440 mod p(x) , x^1472 mod p(x) , x^1504 mod p(x) , x^1536 mod p(x) */
+ {0x55bd7f9518e4a304, 0x78e87aaf56767c92},
+ /* x^1312 mod p(x) , x^1344 mod p(x) , x^1376 mod p(x) , x^1408 mod p(x) */
+ {0x6d76739fe0553f1e, 0x8f68fcec1903da7f},
+ /* x^1184 mod p(x) , x^1216 mod p(x) , x^1248 mod p(x) , x^1280 mod p(x) */
+ {0xc133722b1fe0b5c3, 0x3f4840246791d588},
+ /* x^1056 mod p(x) , x^1088 mod p(x) , x^1120 mod p(x) , x^1152 mod p(x) */
+ {0x64b67ee0e55ef1f3, 0x34c96751b04de25a},
+ /* x^928 mod p(x) , x^960 mod p(x) , x^992 mod p(x) , x^1024 mod p(x) */
+ {0x069db049b8fdb1e7, 0x156c8e180b4a395b},
+ /* x^800 mod p(x) , x^832 mod p(x) , x^864 mod p(x) , x^896 mod p(x) */
+ {0xa11bfaf3c9e90b9e, 0xe0b99ccbe661f7be},
+ /* x^672 mod p(x) , x^704 mod p(x) , x^736 mod p(x) , x^768 mod p(x) */
+ {0x817cdc5119b29a35, 0x041d37768cd75659},
+ /* x^544 mod p(x) , x^576 mod p(x) , x^608 mod p(x) , x^640 mod p(x) */
+ {0x1ce9d94b36c41f1c, 0x3a0777818cfaa965},
+ /* x^416 mod p(x) , x^448 mod p(x) , x^480 mod p(x) , x^512 mod p(x) */
+ {0x4f256efcb82be955, 0x0e148e8252377a55},
+ /* x^288 mod p(x) , x^320 mod p(x) , x^352 mod p(x) , x^384 mod p(x) */
+ {0xec1631edb2dea967, 0x9c25531d19e65dde},
+ /* x^160 mod p(x) , x^192 mod p(x) , x^224 mod p(x) , x^256 mod p(x) */
+ {0x5d27e147510ac59a, 0x790606ff9957c0a6},
+ /* x^32 mod p(x) , x^64 mod p(x) , x^96 mod p(x) , x^128 mod p(x) */
+ {0xa66805eb18b8ea18, 0x82f63b786ea2d55c}
+#else /* __LITTLE_ENDIAN__ */
+ /* x^1952 mod p(x) , x^1984 mod p(x) , x^2016 mod p(x) , x^2048 mod p(x) */
+ {0x7fec2963e5bf8048, 0x5cf015c388e56f72},
+ /* x^1824 mod p(x) , x^1856 mod p(x) , x^1888 mod p(x) , x^1920 mod p(x) */
+ {0x38e888d4844752a9, 0x963a18920246e2e6},
+ /* x^1696 mod p(x) , x^1728 mod p(x) , x^1760 mod p(x) , x^1792 mod p(x) */
+ {0x42316c00730206ad, 0x419a441956993a31},
+ /* x^1568 mod p(x) , x^1600 mod p(x) , x^1632 mod p(x) , x^1664 mod p(x) */
+ {0x543d5c543e65ddf9, 0x924752ba2b830011},
+ /* x^1440 mod p(x) , x^1472 mod p(x) , x^1504 mod p(x) , x^1536 mod p(x) */
+ {0x78e87aaf56767c92, 0x55bd7f9518e4a304},
+ /* x^1312 mod p(x) , x^1344 mod p(x) , x^1376 mod p(x) , x^1408 mod p(x) */
+ {0x8f68fcec1903da7f, 0x6d76739fe0553f1e},
+ /* x^1184 mod p(x) , x^1216 mod p(x) , x^1248 mod p(x) , x^1280 mod p(x) */
+ {0x3f4840246791d588, 0xc133722b1fe0b5c3},
+ /* x^1056 mod p(x) , x^1088 mod p(x) , x^1120 mod p(x) , x^1152 mod p(x) */
+ {0x34c96751b04de25a, 0x64b67ee0e55ef1f3},
+ /* x^928 mod p(x) , x^960 mod p(x) , x^992 mod p(x) , x^1024 mod p(x) */
+ {0x156c8e180b4a395b, 0x069db049b8fdb1e7},
+ /* x^800 mod p(x) , x^832 mod p(x) , x^864 mod p(x) , x^896 mod p(x) */
+ {0xe0b99ccbe661f7be, 0xa11bfaf3c9e90b9e},
+ /* x^672 mod p(x) , x^704 mod p(x) , x^736 mod p(x) , x^768 mod p(x) */
+ {0x041d37768cd75659, 0x817cdc5119b29a35},
+ /* x^544 mod p(x) , x^576 mod p(x) , x^608 mod p(x) , x^640 mod p(x) */
+ {0x3a0777818cfaa965, 0x1ce9d94b36c41f1c},
+ /* x^416 mod p(x) , x^448 mod p(x) , x^480 mod p(x) , x^512 mod p(x) */
+ {0x0e148e8252377a55, 0x4f256efcb82be955},
+ /* x^288 mod p(x) , x^320 mod p(x) , x^352 mod p(x) , x^384 mod p(x) */
+ {0x9c25531d19e65dde, 0xec1631edb2dea967},
+ /* x^160 mod p(x) , x^192 mod p(x) , x^224 mod p(x) , x^256 mod p(x) */
+ {0x790606ff9957c0a6, 0x5d27e147510ac59a},
+ /* x^32 mod p(x) , x^64 mod p(x) , x^96 mod p(x) , x^128 mod p(x) */
+ {0x82f63b786ea2d55c, 0xa66805eb18b8ea18}
+#endif /* __LITTLE_ENDIAN__ */
+};
- .barrett_constants :
- /* 33 bit reflected Barrett constant m - (4^32)/n */
- .octa 0x000000000000000000000000dea713f1 /* x^64 div p(x)` */
- /* 33 bit reflected Barrett constant n */
- .octa 0x00000000000000000000000105ec76f1
-#endif
+/* Barrett constants */
+/* 33 bit reflected Barrett constant m - (4^32)/n */
+
+static const __vector unsigned long long v_Barrett_const[2]
+ __attribute__((aligned(16))) = {
+/* x^64 div p(x) */
+#ifdef __LITTLE_ENDIAN__
+ {0x00000000dea713f1, 0x0000000000000000},
+ {0x0000000105ec76f1, 0x0000000000000000}
+#else /* __LITTLE_ENDIAN__ */
+ {0x0000000000000000, 0x00000000dea713f1},
+ {0x0000000000000000, 0x0000000105ec76f1}
+#endif /* __LITTLE_ENDIAN__ */
+};
+#endif /* POWER8_INTRINSICS */
diff --git a/util/crc32c_test.cc b/util/crc32c_test.cc
index 3e4f7396e7..546c6a4401 100644
--- a/util/crc32c_test.cc
+++ b/util/crc32c_test.cc
@@ -108,6 +108,9 @@ TEST(CRC, StandardResults) {
EXPECT_EQ(~expected.crc32c, result);
}
+ // NULL buffer
+ EXPECT_EQ((uint32_t)0, Value(NULL, 0));
+
// Test 2: stitching two computations
for (auto expected : expectedResults) {
size_t partialLength = expected.length / 2;