Commit d7e12014 authored by Ken Raeburn's avatar Ken Raeburn Committed by Mike Snitzer

dm vdo murmurhash3: use kernel byteswapping routines instead of GCC ones

Also open-code the calls.
Reported-by: default avatarGuenter Roeck <linux@roeck-us.net>
Signed-off-by: default avatarKen Raeburn <raeburn@redhat.com>
Signed-off-by: default avatarMatthew Sakai <msakai@redhat.com>
Signed-off-by: default avatarMike Snitzer <snitzer@kernel.org>
parent 309d8ced
...@@ -8,33 +8,14 @@ ...@@ -8,33 +8,14 @@
#include "murmurhash3.h" #include "murmurhash3.h"
#include <asm/unaligned.h>
static inline u64 rotl64(u64 x, s8 r) static inline u64 rotl64(u64 x, s8 r)
{ {
return (x << r) | (x >> (64 - r)); return (x << r) | (x >> (64 - r));
} }
#define ROTL64(x, y) rotl64(x, y) #define ROTL64(x, y) rotl64(x, y)
static __always_inline u64 getblock64(const u64 *p, int i)
{
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
return p[i];
#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
return __builtin_bswap64(p[i]);
#else
#error "can't figure out byte order"
#endif
}
static __always_inline void putblock64(u64 *p, int i, u64 value)
{
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
p[i] = value;
#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
p[i] = __builtin_bswap64(value);
#else
#error "can't figure out byte order"
#endif
}
/* Finalization mix - force all bits of a hash block to avalanche */ /* Finalization mix - force all bits of a hash block to avalanche */
...@@ -60,6 +41,8 @@ void murmurhash3_128(const void *key, const int len, const u32 seed, void *out) ...@@ -60,6 +41,8 @@ void murmurhash3_128(const void *key, const int len, const u32 seed, void *out)
const u64 c1 = 0x87c37b91114253d5LLU; const u64 c1 = 0x87c37b91114253d5LLU;
const u64 c2 = 0x4cf5ad432745937fLLU; const u64 c2 = 0x4cf5ad432745937fLLU;
u64 *hash_out = out;
/* body */ /* body */
const u64 *blocks = (const u64 *)(data); const u64 *blocks = (const u64 *)(data);
...@@ -67,8 +50,8 @@ void murmurhash3_128(const void *key, const int len, const u32 seed, void *out) ...@@ -67,8 +50,8 @@ void murmurhash3_128(const void *key, const int len, const u32 seed, void *out)
int i; int i;
for (i = 0; i < nblocks; i++) { for (i = 0; i < nblocks; i++) {
u64 k1 = getblock64(blocks, i * 2 + 0); u64 k1 = get_unaligned_le64(&blocks[i * 2]);
u64 k2 = getblock64(blocks, i * 2 + 1); u64 k2 = get_unaligned_le64(&blocks[i * 2 + 1]);
k1 *= c1; k1 *= c1;
k1 = ROTL64(k1, 31); k1 = ROTL64(k1, 31);
...@@ -170,6 +153,6 @@ void murmurhash3_128(const void *key, const int len, const u32 seed, void *out) ...@@ -170,6 +153,6 @@ void murmurhash3_128(const void *key, const int len, const u32 seed, void *out)
h1 += h2; h1 += h2;
h2 += h1; h2 += h1;
putblock64((u64 *)out, 0, h1); put_unaligned_le64(h1, &hash_out[0]);
putblock64((u64 *)out, 1, h2); put_unaligned_le64(h2, &hash_out[1]);
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment