Commit df6ab559 authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/lethal/sh-2.6.26

* git://git.kernel.org/pub/scm/linux/kernel/git/lethal/sh-2.6.26:
  sh: Add defconfig for RSK7203.
  sh: Update SE7206 defconfig.
  sh: Disable 4KSTACKS on nommu.
  sh: fix miscompilation of ip_fast_csum with gcc >= 4.3
  sh: module.c use kernel unaligned helpers
  sh/kernel/cpu/irq/intc-sh5.c build fix
parents 3b5b60b8 39eb41ef
......@@ -81,7 +81,7 @@ config DEBUG_STACK_USAGE
config 4KSTACKS
bool "Use 4Kb for kernel stacks instead of 8Kb"
depends on DEBUG_KERNEL
depends on DEBUG_KERNEL && (MMU || BROKEN)
help
If you say Y here the kernel will use a 4Kb stacksize for the
kernel stack attached to each process/thread. This facilitates
......
This diff is collapsed.
This diff is collapsed.
......@@ -242,6 +242,7 @@ void __init plat_irq_setup(void)
reg += 8;
}
}
}
#endif
/*
......
......@@ -30,6 +30,7 @@
#include <linux/fs.h>
#include <linux/string.h>
#include <linux/kernel.h>
#include <asm/unaligned.h>
void *module_alloc(unsigned long size)
{
......@@ -56,34 +57,6 @@ int module_frob_arch_sections(Elf_Ehdr *hdr,
return 0;
}
#ifdef CONFIG_SUPERH32
#define COPY_UNALIGNED_WORD(sw, tw, align) \
{ \
void *__s = &(sw), *__t = &(tw); \
unsigned short *__s2 = __s, *__t2 = __t; \
unsigned char *__s1 = __s, *__t1 = __t; \
switch ((align)) \
{ \
case 0: \
*(unsigned long *) __t = *(unsigned long *) __s; \
break; \
case 2: \
*__t2++ = *__s2++; \
*__t2 = *__s2; \
break; \
default: \
*__t1++ = *__s1++; \
*__t1++ = *__s1++; \
*__t1++ = *__s1++; \
*__t1 = *__s1; \
break; \
} \
}
#else
/* One thing SHmedia doesn't screw up! */
#define COPY_UNALIGNED_WORD(sw, tw, align) { (tw) = (sw); }
#endif
int apply_relocate_add(Elf32_Shdr *sechdrs,
const char *strtab,
unsigned int symindex,
......@@ -96,7 +69,6 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
Elf32_Addr relocation;
uint32_t *location;
uint32_t value;
int align;
pr_debug("Applying relocate section %u to %u\n", relsec,
sechdrs[relsec].sh_info);
......@@ -109,7 +81,6 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
+ ELF32_R_SYM(rel[i].r_info);
relocation = sym->st_value + rel[i].r_addend;
align = (int)location & 3;
#ifdef CONFIG_SUPERH64
/* For text addresses, bit2 of the st_other field indicates
......@@ -122,15 +93,15 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
switch (ELF32_R_TYPE(rel[i].r_info)) {
case R_SH_DIR32:
COPY_UNALIGNED_WORD (*location, value, align);
value = get_unaligned(location);
value += relocation;
COPY_UNALIGNED_WORD (value, *location, align);
put_unaligned(value, location);
break;
case R_SH_REL32:
relocation = (relocation - (Elf32_Addr) location);
COPY_UNALIGNED_WORD (*location, value, align);
value = get_unaligned(location);
value += relocation;
COPY_UNALIGNED_WORD (value, *location, align);
put_unaligned(value, location);
break;
case R_SH_IMM_LOW16:
*location = (*location & ~0x3fffc00) |
......
......@@ -109,7 +109,7 @@ static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
will assume they contain their original values. */
: "=r" (sum), "=r" (iph), "=r" (ihl), "=&r" (__dummy0), "=&z" (__dummy1)
: "1" (iph), "2" (ihl)
: "t");
: "t", "memory");
return csum_fold(sum);
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment