Commit 0682490d authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-3.4-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/rw/uml

Pull UML fixes from Richard Weinberger.

* 'for-3.4-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/rw/uml:
  um: uml_setup_stubs': warning: unused variable 'pages'
  um: Use asm-generic/switch_to.h
  um: Disintegrate asm/system.h
  um: switch cow_user.h to htobe{32,64}/betoh{32,64}
  um: several x86 hw-dependent crypto modules won't build on uml
  um: fix linker script generation
parents 3a48d1c0 657b12d3
...@@ -3,41 +3,6 @@ ...@@ -3,41 +3,6 @@
#include <asm/types.h> #include <asm/types.h>
#if defined(__KERNEL__)
# include <asm/byteorder.h>
# if defined(__BIG_ENDIAN)
# define ntohll(x) (x)
# define htonll(x) (x)
# elif defined(__LITTLE_ENDIAN)
# define ntohll(x) be64_to_cpu(x)
# define htonll(x) cpu_to_be64(x)
# else
# error "Could not determine byte order"
# endif
#else
/* For the definition of ntohl, htonl and __BYTE_ORDER */
#include <endian.h>
#include <netinet/in.h>
#if defined(__BYTE_ORDER)
# if __BYTE_ORDER == __BIG_ENDIAN
# define ntohll(x) (x)
# define htonll(x) (x)
# elif __BYTE_ORDER == __LITTLE_ENDIAN
# define ntohll(x) bswap_64(x)
# define htonll(x) bswap_64(x)
# else
# error "Could not determine byte order: __BYTE_ORDER uncorrectly defined"
# endif
#else /* ! defined(__BYTE_ORDER) */
# error "Could not determine byte order: __BYTE_ORDER not defined"
#endif
#endif /* ! defined(__KERNEL__) */
extern int init_cow_file(int fd, char *cow_file, char *backing_file, extern int init_cow_file(int fd, char *cow_file, char *backing_file,
int sectorsize, int alignment, int *bitmap_offset_out, int sectorsize, int alignment, int *bitmap_offset_out,
unsigned long *bitmap_len_out, int *data_offset_out); unsigned long *bitmap_len_out, int *data_offset_out);
......
...@@ -8,11 +8,10 @@ ...@@ -8,11 +8,10 @@
* that. * that.
*/ */
#include <unistd.h> #include <unistd.h>
#include <byteswap.h>
#include <errno.h> #include <errno.h>
#include <string.h> #include <string.h>
#include <arpa/inet.h> #include <arpa/inet.h>
#include <asm/types.h> #include <endian.h>
#include "cow.h" #include "cow.h"
#include "cow_sys.h" #include "cow_sys.h"
...@@ -214,8 +213,8 @@ int write_cow_header(char *cow_file, int fd, char *backing_file, ...@@ -214,8 +213,8 @@ int write_cow_header(char *cow_file, int fd, char *backing_file,
"header\n"); "header\n");
goto out; goto out;
} }
header->magic = htonl(COW_MAGIC); header->magic = htobe32(COW_MAGIC);
header->version = htonl(COW_VERSION); header->version = htobe32(COW_VERSION);
err = -EINVAL; err = -EINVAL;
if (strlen(backing_file) > sizeof(header->backing_file) - 1) { if (strlen(backing_file) > sizeof(header->backing_file) - 1) {
...@@ -246,10 +245,10 @@ int write_cow_header(char *cow_file, int fd, char *backing_file, ...@@ -246,10 +245,10 @@ int write_cow_header(char *cow_file, int fd, char *backing_file,
goto out_free; goto out_free;
} }
header->mtime = htonl(modtime); header->mtime = htobe32(modtime);
header->size = htonll(*size); header->size = htobe64(*size);
header->sectorsize = htonl(sectorsize); header->sectorsize = htobe32(sectorsize);
header->alignment = htonl(alignment); header->alignment = htobe32(alignment);
header->cow_format = COW_BITMAP; header->cow_format = COW_BITMAP;
err = cow_write_file(fd, header, sizeof(*header)); err = cow_write_file(fd, header, sizeof(*header));
...@@ -301,8 +300,8 @@ int read_cow_header(int (*reader)(__u64, char *, int, void *), void *arg, ...@@ -301,8 +300,8 @@ int read_cow_header(int (*reader)(__u64, char *, int, void *), void *arg,
magic = header->v1.magic; magic = header->v1.magic;
if (magic == COW_MAGIC) if (magic == COW_MAGIC)
version = header->v1.version; version = header->v1.version;
else if (magic == ntohl(COW_MAGIC)) else if (magic == be32toh(COW_MAGIC))
version = ntohl(header->v1.version); version = be32toh(header->v1.version);
/* No error printed because the non-COW case comes through here */ /* No error printed because the non-COW case comes through here */
else goto out; else goto out;
...@@ -327,9 +326,9 @@ int read_cow_header(int (*reader)(__u64, char *, int, void *), void *arg, ...@@ -327,9 +326,9 @@ int read_cow_header(int (*reader)(__u64, char *, int, void *), void *arg,
"header\n"); "header\n");
goto out; goto out;
} }
*mtime_out = ntohl(header->v2.mtime); *mtime_out = be32toh(header->v2.mtime);
*size_out = ntohll(header->v2.size); *size_out = be64toh(header->v2.size);
*sectorsize_out = ntohl(header->v2.sectorsize); *sectorsize_out = be32toh(header->v2.sectorsize);
*bitmap_offset_out = sizeof(header->v2); *bitmap_offset_out = sizeof(header->v2);
*align_out = *sectorsize_out; *align_out = *sectorsize_out;
file = header->v2.backing_file; file = header->v2.backing_file;
...@@ -341,10 +340,10 @@ int read_cow_header(int (*reader)(__u64, char *, int, void *), void *arg, ...@@ -341,10 +340,10 @@ int read_cow_header(int (*reader)(__u64, char *, int, void *), void *arg,
"header\n"); "header\n");
goto out; goto out;
} }
*mtime_out = ntohl(header->v3.mtime); *mtime_out = be32toh(header->v3.mtime);
*size_out = ntohll(header->v3.size); *size_out = be64toh(header->v3.size);
*sectorsize_out = ntohl(header->v3.sectorsize); *sectorsize_out = be32toh(header->v3.sectorsize);
*align_out = ntohl(header->v3.alignment); *align_out = be32toh(header->v3.alignment);
if (*align_out == 0) { if (*align_out == 0) {
cow_printf("read_cow_header - invalid COW header, " cow_printf("read_cow_header - invalid COW header, "
"align == 0\n"); "align == 0\n");
...@@ -366,16 +365,16 @@ int read_cow_header(int (*reader)(__u64, char *, int, void *), void *arg, ...@@ -366,16 +365,16 @@ int read_cow_header(int (*reader)(__u64, char *, int, void *), void *arg,
* this was used until Dec2005 - 64bits are needed to represent * this was used until Dec2005 - 64bits are needed to represent
* 2038+. I.e. we can safely do this truncating cast. * 2038+. I.e. we can safely do this truncating cast.
* *
* Additionally, we must use ntohl() instead of ntohll(), since * Additionally, we must use be32toh() instead of be64toh(), since
* the program used to use the former (tested - I got mtime * the program used to use the former (tested - I got mtime
* mismatch "0 vs whatever"). * mismatch "0 vs whatever").
* *
* Ever heard about bug-to-bug-compatibility ? ;-) */ * Ever heard about bug-to-bug-compatibility ? ;-) */
*mtime_out = (time32_t) ntohl(header->v3_b.mtime); *mtime_out = (time32_t) be32toh(header->v3_b.mtime);
*size_out = ntohll(header->v3_b.size); *size_out = be64toh(header->v3_b.size);
*sectorsize_out = ntohl(header->v3_b.sectorsize); *sectorsize_out = be32toh(header->v3_b.sectorsize);
*align_out = ntohl(header->v3_b.alignment); *align_out = be32toh(header->v3_b.alignment);
if (*align_out == 0) { if (*align_out == 0) {
cow_printf("read_cow_header - invalid COW header, " cow_printf("read_cow_header - invalid COW header, "
"align == 0\n"); "align == 0\n");
......
...@@ -22,6 +22,7 @@ ...@@ -22,6 +22,7 @@
#include <linux/workqueue.h> #include <linux/workqueue.h>
#include <linux/mutex.h> #include <linux/mutex.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/switch_to.h>
#include "init.h" #include "init.h"
#include "irq_kern.h" #include "irq_kern.h"
......
generic-y += bug.h cputime.h device.h emergency-restart.h futex.h hardirq.h generic-y += bug.h cputime.h device.h emergency-restart.h futex.h hardirq.h
generic-y += hw_irq.h irq_regs.h kdebug.h percpu.h sections.h topology.h xor.h generic-y += hw_irq.h irq_regs.h kdebug.h percpu.h sections.h topology.h xor.h
generic-y += ftrace.h pci.h io.h param.h delay.h mutex.h current.h generic-y += ftrace.h pci.h io.h param.h delay.h mutex.h current.h exec.h
generic-y += switch_to.h
...@@ -3,9 +3,10 @@ ...@@ -3,9 +3,10 @@
# Licensed under the GPL # Licensed under the GPL
# #
CPPFLAGS_vmlinux.lds := -DSTART=$(LDS_START) \ CPPFLAGS_vmlinux.lds := -DSTART=$(LDS_START) \
-DELF_ARCH=$(LDS_ELF_ARCH) \ -DELF_ARCH=$(LDS_ELF_ARCH) \
-DELF_FORMAT=$(LDS_ELF_FORMAT) -DELF_FORMAT=$(LDS_ELF_FORMAT) \
$(LDS_EXTRA)
extra-y := vmlinux.lds extra-y := vmlinux.lds
clean-files := clean-files :=
......
...@@ -88,11 +88,8 @@ static inline void set_current(struct task_struct *task) ...@@ -88,11 +88,8 @@ static inline void set_current(struct task_struct *task)
extern void arch_switch_to(struct task_struct *to); extern void arch_switch_to(struct task_struct *to);
void *_switch_to(void *prev, void *next, void *last) void *__switch_to(struct task_struct *from, struct task_struct *to)
{ {
struct task_struct *from = prev;
struct task_struct *to = next;
to->thread.prev_sched = from; to->thread.prev_sched = from;
set_current(to); set_current(to);
...@@ -111,7 +108,6 @@ void *_switch_to(void *prev, void *next, void *last) ...@@ -111,7 +108,6 @@ void *_switch_to(void *prev, void *next, void *last)
} while (current->thread.saved_task); } while (current->thread.saved_task);
return current->thread.prev_sched; return current->thread.prev_sched;
} }
void interrupt_end(void) void interrupt_end(void)
......
...@@ -103,7 +103,6 @@ int init_new_context(struct task_struct *task, struct mm_struct *mm) ...@@ -103,7 +103,6 @@ int init_new_context(struct task_struct *task, struct mm_struct *mm)
void uml_setup_stubs(struct mm_struct *mm) void uml_setup_stubs(struct mm_struct *mm)
{ {
struct page **pages;
int err, ret; int err, ret;
if (!skas_needs_stub) if (!skas_needs_stub)
......
...@@ -14,6 +14,9 @@ LINK-y += $(call cc-option,-m32) ...@@ -14,6 +14,9 @@ LINK-y += $(call cc-option,-m32)
export LDFLAGS export LDFLAGS
LDS_EXTRA := -Ui386
export LDS_EXTRA
# First of all, tune CFLAGS for the specific CPU. This actually sets cflags-y. # First of all, tune CFLAGS for the specific CPU. This actually sets cflags-y.
include $(srctree)/arch/x86/Makefile_32.cpu include $(srctree)/arch/x86/Makefile_32.cpu
......
#ifndef _ASM_X86_SYSTEM_H_ #ifndef _ASM_UM_BARRIER_H_
#define _ASM_X86_SYSTEM_H_ #define _ASM_UM_BARRIER_H_
#include <asm/asm.h> #include <asm/asm.h>
#include <asm/segment.h> #include <asm/segment.h>
...@@ -10,111 +10,54 @@ ...@@ -10,111 +10,54 @@
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/irqflags.h> #include <linux/irqflags.h>
/* entries in ARCH_DLINFO: */
#ifdef CONFIG_IA32_EMULATION
# define AT_VECTOR_SIZE_ARCH 2
#else
# define AT_VECTOR_SIZE_ARCH 1
#endif
extern unsigned long arch_align_stack(unsigned long sp);
void default_idle(void);
/* /*
* Force strict CPU ordering. * Force strict CPU ordering.
* And yes, this is required on UP too when we're talking * And yes, this is required on UP too when we're talking
* to devices. * to devices.
*/ */
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
/*
* Some non-Intel clones support out of order store. wmb() ceases to be a
* nop for these.
*/
#define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
#define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
#define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM)
#else
#define mb() asm volatile("mfence":::"memory")
#define rmb() asm volatile("lfence":::"memory")
#define wmb() asm volatile("sfence" ::: "memory")
#endif
/** #define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
* read_barrier_depends - Flush all pending reads that subsequents reads #define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
* depend on. #define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM)
*
* No data-dependent reads from memory-like regions are ever reordered #else /* CONFIG_X86_32 */
* over this barrier. All reads preceding this primitive are guaranteed
* to access memory (but not necessarily other CPUs' caches) before any #define mb() asm volatile("mfence" : : : "memory")
* reads following this primitive that depend on the data return by #define rmb() asm volatile("lfence" : : : "memory")
* any of the preceding reads. This primitive is much lighter weight than #define wmb() asm volatile("sfence" : : : "memory")
* rmb() on most CPUs, and is never heavier weight than is
* rmb(). #endif /* CONFIG_X86_32 */
*
* These ordering constraints are respected by both the local CPU
* and the compiler.
*
* Ordering is not guaranteed by anything other than these primitives,
* not even by data dependencies. See the documentation for
* memory_barrier() for examples and URLs to more information.
*
* For example, the following code would force ordering (the initial
* value of "a" is zero, "b" is one, and "p" is "&a"):
*
* <programlisting>
* CPU 0 CPU 1
*
* b = 2;
* memory_barrier();
* p = &b; q = p;
* read_barrier_depends();
* d = *q;
* </programlisting>
*
* because the read of "*q" depends on the read of "p" and these
* two reads are separated by a read_barrier_depends(). However,
* the following code, with the same initial values for "a" and "b":
*
* <programlisting>
* CPU 0 CPU 1
*
* a = 2;
* memory_barrier();
* b = 3; y = b;
* read_barrier_depends();
* x = a;
* </programlisting>
*
* does not enforce ordering, since there is no data dependency between
* the read of "a" and the read of "b". Therefore, on some CPUs, such
* as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
* in cases like this where there are no data dependencies.
**/
#define read_barrier_depends() do { } while (0) #define read_barrier_depends() do { } while (0)
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
#define smp_mb() mb() #define smp_mb() mb()
#ifdef CONFIG_X86_PPRO_FENCE #ifdef CONFIG_X86_PPRO_FENCE
# define smp_rmb() rmb() #define smp_rmb() rmb()
#else #else /* CONFIG_X86_PPRO_FENCE */
# define smp_rmb() barrier() #define smp_rmb() barrier()
#endif #endif /* CONFIG_X86_PPRO_FENCE */
#ifdef CONFIG_X86_OOSTORE #ifdef CONFIG_X86_OOSTORE
# define smp_wmb() wmb() #define smp_wmb() wmb()
#else #else /* CONFIG_X86_OOSTORE */
# define smp_wmb() barrier() #define smp_wmb() barrier()
#endif #endif /* CONFIG_X86_OOSTORE */
#define smp_read_barrier_depends() read_barrier_depends() #define smp_read_barrier_depends() read_barrier_depends()
#define set_mb(var, value) do { (void)xchg(&var, value); } while (0) #define set_mb(var, value) do { (void)xchg(&var, value); } while (0)
#else
#else /* CONFIG_SMP */
#define smp_mb() barrier() #define smp_mb() barrier()
#define smp_rmb() barrier() #define smp_rmb() barrier()
#define smp_wmb() barrier() #define smp_wmb() barrier()
#define smp_read_barrier_depends() do { } while (0) #define smp_read_barrier_depends() do { } while (0)
#define set_mb(var, value) do { var = value; barrier(); } while (0) #define set_mb(var, value) do { var = value; barrier(); } while (0)
#endif
#endif /* CONFIG_SMP */
/* /*
* Stop RDTSC speculation. This is needed when you need to use RDTSC * Stop RDTSC speculation. This is needed when you need to use RDTSC
...@@ -129,7 +72,4 @@ static inline void rdtsc_barrier(void) ...@@ -129,7 +72,4 @@ static inline void rdtsc_barrier(void)
alternative(ASM_NOP3, "lfence", X86_FEATURE_LFENCE_RDTSC); alternative(ASM_NOP3, "lfence", X86_FEATURE_LFENCE_RDTSC);
} }
extern void *_switch_to(void *prev, void *next, void *last);
#define switch_to(prev, next, last) prev = _switch_to(prev, next, last)
#endif #endif
...@@ -627,7 +627,7 @@ config CRYPTO_BLOWFISH_COMMON ...@@ -627,7 +627,7 @@ config CRYPTO_BLOWFISH_COMMON
config CRYPTO_BLOWFISH_X86_64 config CRYPTO_BLOWFISH_X86_64
tristate "Blowfish cipher algorithm (x86_64)" tristate "Blowfish cipher algorithm (x86_64)"
depends on (X86 || UML_X86) && 64BIT depends on X86 && 64BIT
select CRYPTO_ALGAPI select CRYPTO_ALGAPI
select CRYPTO_BLOWFISH_COMMON select CRYPTO_BLOWFISH_COMMON
help help
...@@ -657,7 +657,7 @@ config CRYPTO_CAMELLIA ...@@ -657,7 +657,7 @@ config CRYPTO_CAMELLIA
config CRYPTO_CAMELLIA_X86_64 config CRYPTO_CAMELLIA_X86_64
tristate "Camellia cipher algorithm (x86_64)" tristate "Camellia cipher algorithm (x86_64)"
depends on (X86 || UML_X86) && 64BIT depends on X86 && 64BIT
depends on CRYPTO depends on CRYPTO
select CRYPTO_ALGAPI select CRYPTO_ALGAPI
select CRYPTO_LRW select CRYPTO_LRW
...@@ -893,7 +893,7 @@ config CRYPTO_TWOFISH_X86_64 ...@@ -893,7 +893,7 @@ config CRYPTO_TWOFISH_X86_64
config CRYPTO_TWOFISH_X86_64_3WAY config CRYPTO_TWOFISH_X86_64_3WAY
tristate "Twofish cipher algorithm (x86_64, 3-way parallel)" tristate "Twofish cipher algorithm (x86_64, 3-way parallel)"
depends on (X86 || UML_X86) && 64BIT depends on X86 && 64BIT
select CRYPTO_ALGAPI select CRYPTO_ALGAPI
select CRYPTO_TWOFISH_COMMON select CRYPTO_TWOFISH_COMMON
select CRYPTO_TWOFISH_X86_64 select CRYPTO_TWOFISH_X86_64
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment