Commit e69eae65 authored by Joakim Tjernlund's avatar Joakim Tjernlund Committed by Linus Torvalds

zlib: make new optimized inflate endian independent

Commit 6846ee5c ("zlib: Fix build of
powerpc boot wrapper") made the new optimized inflate only available on
arch's that define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS.

This patch will again enable the optimization for all arch's by defining
our own endian independent version of unaligned access.  As an added
bonus, arch's that define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS do a
plain load instead.
Signed-off-by: default avatarJoakim Tjernlund <Joakim.Tjernlund@transmode.se>
Cc: Anton Blanchard <anton@samba.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: David Woodhouse <dwmw2@infradead.org>
Cc: Kumar Gala <galak@kernel.crashing.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 5ceaa2f3
...@@ -8,21 +8,6 @@ ...@@ -8,21 +8,6 @@
#include "inflate.h" #include "inflate.h"
#include "inffast.h" #include "inffast.h"
/* Only do the unaligned "Faster" variant when
* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS is set
*
* On powerpc, it won't be as we don't include autoconf.h
* automatically for the boot wrapper, which is intended as
* we run in an environment where we may not be able to deal
* with (even rare) alignment faults. In addition, we do not
* define __KERNEL__ for arch/powerpc/boot unlike x86
*/
#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
#include <asm/unaligned.h>
#include <asm/byteorder.h>
#endif
#ifndef ASMINF #ifndef ASMINF
/* Allow machine dependent optimization for post-increment or pre-increment. /* Allow machine dependent optimization for post-increment or pre-increment.
...@@ -36,14 +21,31 @@ ...@@ -36,14 +21,31 @@
- Pentium III (Anderson) - Pentium III (Anderson)
- M68060 (Nikl) - M68060 (Nikl)
*/ */
union uu {
unsigned short us;
unsigned char b[2];
};
/* Endian independed version */
static inline unsigned short
get_unaligned16(const unsigned short *p)
{
union uu mm;
unsigned char *b = (unsigned char *)p;
mm.b[0] = b[0];
mm.b[1] = b[1];
return mm.us;
}
#ifdef POSTINC #ifdef POSTINC
# define OFF 0 # define OFF 0
# define PUP(a) *(a)++ # define PUP(a) *(a)++
# define UP_UNALIGNED(a) get_unaligned((a)++) # define UP_UNALIGNED(a) get_unaligned16((a)++)
#else #else
# define OFF 1 # define OFF 1
# define PUP(a) *++(a) # define PUP(a) *++(a)
# define UP_UNALIGNED(a) get_unaligned(++(a)) # define UP_UNALIGNED(a) get_unaligned16(++(a))
#endif #endif
/* /*
...@@ -256,7 +258,6 @@ void inflate_fast(z_streamp strm, unsigned start) ...@@ -256,7 +258,6 @@ void inflate_fast(z_streamp strm, unsigned start)
} }
} }
else { else {
#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
unsigned short *sout; unsigned short *sout;
unsigned long loops; unsigned long loops;
...@@ -274,7 +275,11 @@ void inflate_fast(z_streamp strm, unsigned start) ...@@ -274,7 +275,11 @@ void inflate_fast(z_streamp strm, unsigned start)
sfrom = (unsigned short *)(from - OFF); sfrom = (unsigned short *)(from - OFF);
loops = len >> 1; loops = len >> 1;
do do
#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
PUP(sout) = PUP(sfrom);
#else
PUP(sout) = UP_UNALIGNED(sfrom); PUP(sout) = UP_UNALIGNED(sfrom);
#endif
while (--loops); while (--loops);
out = (unsigned char *)sout + OFF; out = (unsigned char *)sout + OFF;
from = (unsigned char *)sfrom + OFF; from = (unsigned char *)sfrom + OFF;
...@@ -282,14 +287,13 @@ void inflate_fast(z_streamp strm, unsigned start) ...@@ -282,14 +287,13 @@ void inflate_fast(z_streamp strm, unsigned start)
unsigned short pat16; unsigned short pat16;
pat16 = *(sout-2+2*OFF); pat16 = *(sout-2+2*OFF);
if (dist == 1) if (dist == 1) {
#if defined(__BIG_ENDIAN) union uu mm;
pat16 = (pat16 & 0xff) | ((pat16 & 0xff) << 8); /* copy one char pattern to both bytes */
#elif defined(__LITTLE_ENDIAN) mm.us = pat16;
pat16 = (pat16 & 0xff00) | ((pat16 & 0xff00) >> 8); mm.b[0] = mm.b[1];
#else pat16 = mm.us;
#error __BIG_ENDIAN nor __LITTLE_ENDIAN is defined }
#endif
loops = len >> 1; loops = len >> 1;
do do
PUP(sout) = pat16; PUP(sout) = pat16;
...@@ -298,20 +302,6 @@ void inflate_fast(z_streamp strm, unsigned start) ...@@ -298,20 +302,6 @@ void inflate_fast(z_streamp strm, unsigned start)
} }
if (len & 1) if (len & 1)
PUP(out) = PUP(from); PUP(out) = PUP(from);
#else /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */
from = out - dist; /* copy direct from output */
do { /* minimum length is three */
PUP(out) = PUP(from);
PUP(out) = PUP(from);
PUP(out) = PUP(from);
len -= 3;
} while (len > 2);
if (len) {
PUP(out) = PUP(from);
if (len > 1)
PUP(out) = PUP(from);
}
#endif /* !CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */
} }
} }
else if ((op & 64) == 0) { /* 2nd level distance code */ else if ((op & 64) == 0) { /* 2nd level distance code */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment