Commit 54c0a4b4 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'akpm' (incoming from Andrew)

Merge misc updates from Andrew Morton:

 - a few hotfixes

 - dynamic-debug updates

 - ipc updates

 - various other sweepings off the factory floor

* akpm: (31 commits)
  firmware/google: drop 'select EFI' to avoid recursive dependency
  compat: fix sys_fanotify_mark
  checkpatch.pl: check for function declarations without arguments
  mm/migrate.c: fix setting of cpupid on page migration twice against normal page
  softirq: use const char * const for softirq_to_name, whitespace neatening
  softirq: convert printks to pr_<level>
  softirq: use ffs() in __do_softirq()
  kernel/kexec.c: use vscnprintf() instead of vsnprintf() in vmcoreinfo_append_str()
  splice: fix unexpected size truncation
  ipc: fix compat msgrcv with negative msgtyp
  ipc,msg: document barriers
  ipc: delete seq_max field in struct ipc_ids
  ipc: simplify sysvipc_proc_open() return
  ipc: remove useless return statement
  ipc: remove braces for single statements
  ipc: standardize code comments
  ipc: whitespace cleanup
  ipc: change kern_ipc_perm.deleted type to bool
  ipc: introduce ipc_valid_object() helper to sort out IPC_RMID races
  ipc/sem.c: avoid overflow of semop undo (semadj) value
  ...
parents 1b17366d c2218e26
...@@ -731,7 +731,7 @@ static void __init request_standard_resources(const struct machine_desc *mdesc) ...@@ -731,7 +731,7 @@ static void __init request_standard_resources(const struct machine_desc *mdesc)
kernel_data.end = virt_to_phys(_end - 1); kernel_data.end = virt_to_phys(_end - 1);
for_each_memblock(memory, region) { for_each_memblock(memory, region) {
res = memblock_virt_alloc(sizeof(*res), 0); res = memblock_virt_alloc_low(sizeof(*res), 0);
res->name = "System RAM"; res->name = "System RAM";
res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region)); res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1; res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
......
...@@ -35,17 +35,11 @@ static struct console early_ocd_console = { ...@@ -35,17 +35,11 @@ static struct console early_ocd_console = {
static int __init setup_early_printk(char *buf) static int __init setup_early_printk(char *buf)
{ {
int keep_early;
if (!buf || early_console) if (!buf || early_console)
return 0; return 0;
if (strstr(buf, "keep"))
keep_early = 1;
early_console = &early_ocd_console; early_console = &early_ocd_console;
if (strstr(buf, "keep"))
if (keep_early)
early_console->flags &= ~CON_BOOT; early_console->flags &= ~CON_BOOT;
else else
early_console->flags |= CON_BOOT; early_console->flags |= CON_BOOT;
......
...@@ -51,9 +51,9 @@ extern int devmem_is_allowed(unsigned long pagenr); ...@@ -51,9 +51,9 @@ extern int devmem_is_allowed(unsigned long pagenr);
extern unsigned long max_low_pfn_mapped; extern unsigned long max_low_pfn_mapped;
extern unsigned long max_pfn_mapped; extern unsigned long max_pfn_mapped;
static inline phys_addr_t get_max_low_mapped(void) static inline phys_addr_t get_max_mapped(void)
{ {
return (phys_addr_t)max_low_pfn_mapped << PAGE_SHIFT; return (phys_addr_t)max_pfn_mapped << PAGE_SHIFT;
} }
bool pfn_range_is_mapped(unsigned long start_pfn, unsigned long end_pfn); bool pfn_range_is_mapped(unsigned long start_pfn, unsigned long end_pfn);
......
...@@ -1119,7 +1119,7 @@ void __init setup_arch(char **cmdline_p) ...@@ -1119,7 +1119,7 @@ void __init setup_arch(char **cmdline_p)
setup_real_mode(); setup_real_mode();
memblock_set_current_limit(get_max_low_mapped()); memblock_set_current_limit(get_max_mapped());
dma_contiguous_reserve(0); dma_contiguous_reserve(0);
/* /*
......
...@@ -12,8 +12,7 @@ menu "Google Firmware Drivers" ...@@ -12,8 +12,7 @@ menu "Google Firmware Drivers"
config GOOGLE_SMI config GOOGLE_SMI
tristate "SMI interface for Google platforms" tristate "SMI interface for Google platforms"
depends on ACPI && DMI depends on ACPI && DMI && EFI
select EFI
select EFI_VARS select EFI_VARS
help help
Say Y here if you want to enable SMI callbacks for Google Say Y here if you want to enable SMI callbacks for Google
......
...@@ -886,9 +886,9 @@ COMPAT_SYSCALL_DEFINE6(fanotify_mark, ...@@ -886,9 +886,9 @@ COMPAT_SYSCALL_DEFINE6(fanotify_mark,
{ {
return sys_fanotify_mark(fanotify_fd, flags, return sys_fanotify_mark(fanotify_fd, flags,
#ifdef __BIG_ENDIAN #ifdef __BIG_ENDIAN
((__u64)mask1 << 32) | mask0,
#else
((__u64)mask0 << 32) | mask1, ((__u64)mask0 << 32) | mask1,
#else
((__u64)mask1 << 32) | mask0,
#endif #endif
dfd, pathname); dfd, pathname);
} }
......
...@@ -948,7 +948,7 @@ static int ocfs2_unlink(struct inode *dir, ...@@ -948,7 +948,7 @@ static int ocfs2_unlink(struct inode *dir,
ocfs2_free_dir_lookup_result(&orphan_insert); ocfs2_free_dir_lookup_result(&orphan_insert);
ocfs2_free_dir_lookup_result(&lookup); ocfs2_free_dir_lookup_result(&lookup);
if (status && (status != -ENOTEMPTY)) if (status && (status != -ENOTEMPTY) && (status != -ENOENT))
mlog_errno(status); mlog_errno(status);
return status; return status;
......
...@@ -175,6 +175,27 @@ static inline void * __init memblock_virt_alloc_nopanic( ...@@ -175,6 +175,27 @@ static inline void * __init memblock_virt_alloc_nopanic(
NUMA_NO_NODE); NUMA_NO_NODE);
} }
#ifndef ARCH_LOW_ADDRESS_LIMIT
#define ARCH_LOW_ADDRESS_LIMIT 0xffffffffUL
#endif
static inline void * __init memblock_virt_alloc_low(
phys_addr_t size, phys_addr_t align)
{
return memblock_virt_alloc_try_nid(size, align,
BOOTMEM_LOW_LIMIT,
ARCH_LOW_ADDRESS_LIMIT,
NUMA_NO_NODE);
}
static inline void * __init memblock_virt_alloc_low_nopanic(
phys_addr_t size, phys_addr_t align)
{
return memblock_virt_alloc_try_nid_nopanic(size, align,
BOOTMEM_LOW_LIMIT,
ARCH_LOW_ADDRESS_LIMIT,
NUMA_NO_NODE);
}
static inline void * __init memblock_virt_alloc_from_nopanic( static inline void * __init memblock_virt_alloc_from_nopanic(
phys_addr_t size, phys_addr_t align, phys_addr_t min_addr) phys_addr_t size, phys_addr_t align, phys_addr_t min_addr)
{ {
...@@ -238,6 +259,22 @@ static inline void * __init memblock_virt_alloc_nopanic( ...@@ -238,6 +259,22 @@ static inline void * __init memblock_virt_alloc_nopanic(
return __alloc_bootmem_nopanic(size, align, BOOTMEM_LOW_LIMIT); return __alloc_bootmem_nopanic(size, align, BOOTMEM_LOW_LIMIT);
} }
static inline void * __init memblock_virt_alloc_low(
phys_addr_t size, phys_addr_t align)
{
if (!align)
align = SMP_CACHE_BYTES;
return __alloc_bootmem_low(size, align, BOOTMEM_LOW_LIMIT);
}
static inline void * __init memblock_virt_alloc_low_nopanic(
phys_addr_t size, phys_addr_t align)
{
if (!align)
align = SMP_CACHE_BYTES;
return __alloc_bootmem_low_nopanic(size, align, BOOTMEM_LOW_LIMIT);
}
static inline void * __init memblock_virt_alloc_from_nopanic( static inline void * __init memblock_virt_alloc_from_nopanic(
phys_addr_t size, phys_addr_t align, phys_addr_t min_addr) phys_addr_t size, phys_addr_t align, phys_addr_t min_addr)
{ {
......
...@@ -360,7 +360,7 @@ enum ...@@ -360,7 +360,7 @@ enum
/* map softirq index to softirq name. update 'softirq_to_name' in /* map softirq index to softirq name. update 'softirq_to_name' in
* kernel/softirq.c when adding a new softirq. * kernel/softirq.c when adding a new softirq.
*/ */
extern char *softirq_to_name[NR_SOFTIRQS]; extern const char * const softirq_to_name[NR_SOFTIRQS];
/* softirq mask and active fields moved to irq_cpustat_t in /* softirq mask and active fields moved to irq_cpustat_t in
* asm/hardirq.h to get better cache usage. KAO * asm/hardirq.h to get better cache usage. KAO
......
...@@ -11,7 +11,7 @@ ...@@ -11,7 +11,7 @@
struct kern_ipc_perm struct kern_ipc_perm
{ {
spinlock_t lock; spinlock_t lock;
int deleted; bool deleted;
int id; int id;
key_t key; key_t key;
kuid_t uid; kuid_t uid;
......
...@@ -21,7 +21,6 @@ struct user_namespace; ...@@ -21,7 +21,6 @@ struct user_namespace;
struct ipc_ids { struct ipc_ids {
int in_use; int in_use;
unsigned short seq; unsigned short seq;
unsigned short seq_max;
struct rw_semaphore rwsem; struct rw_semaphore rwsem;
struct idr ipcs_idr; struct idr ipcs_idr;
int next_id; int next_id;
......
...@@ -9,7 +9,7 @@ struct msg_msg { ...@@ -9,7 +9,7 @@ struct msg_msg {
struct list_head m_list; struct list_head m_list;
long m_type; long m_type;
size_t m_ts; /* message text size */ size_t m_ts; /* message text size */
struct msg_msgseg* next; struct msg_msgseg *next;
void *security; void *security;
/* the actual message follows immediately */ /* the actual message follows immediately */
}; };
......
...@@ -9,7 +9,7 @@ ...@@ -9,7 +9,7 @@
struct shmid_kernel /* private to the kernel */ struct shmid_kernel /* private to the kernel */
{ {
struct kern_ipc_perm shm_perm; struct kern_ipc_perm shm_perm;
struct file * shm_file; struct file *shm_file;
unsigned long shm_nattch; unsigned long shm_nattch;
unsigned long shm_segsz; unsigned long shm_segsz;
time_t shm_atim; time_t shm_atim;
......
...@@ -24,7 +24,8 @@ ...@@ -24,7 +24,8 @@
* Passed to the actors * Passed to the actors
*/ */
struct splice_desc { struct splice_desc {
unsigned int len, total_len; /* current and remaining length */ size_t total_len; /* remaining length */
unsigned int len; /* current length */
unsigned int flags; /* splice flags */ unsigned int flags; /* splice flags */
/* /*
* actor() private data * actor() private data
......
...@@ -92,8 +92,6 @@ static int kernel_init(void *); ...@@ -92,8 +92,6 @@ static int kernel_init(void *);
extern void init_IRQ(void); extern void init_IRQ(void);
extern void fork_init(unsigned long); extern void fork_init(unsigned long);
extern void mca_init(void);
extern void sbus_init(void);
extern void radix_tree_init(void); extern void radix_tree_init(void);
#ifndef CONFIG_DEBUG_RODATA #ifndef CONFIG_DEBUG_RODATA
static inline void mark_rodata_ro(void) { } static inline void mark_rodata_ro(void) { }
......
...@@ -197,7 +197,7 @@ static inline int __put_compat_ipc_perm(struct ipc64_perm *p, ...@@ -197,7 +197,7 @@ static inline int __put_compat_ipc_perm(struct ipc64_perm *p,
static inline int get_compat_semid64_ds(struct semid64_ds *s64, static inline int get_compat_semid64_ds(struct semid64_ds *s64,
struct compat_semid64_ds __user *up64) struct compat_semid64_ds __user *up64)
{ {
if (!access_ok (VERIFY_READ, up64, sizeof(*up64))) if (!access_ok(VERIFY_READ, up64, sizeof(*up64)))
return -EFAULT; return -EFAULT;
return __get_compat_ipc64_perm(&s64->sem_perm, &up64->sem_perm); return __get_compat_ipc64_perm(&s64->sem_perm, &up64->sem_perm);
} }
...@@ -205,7 +205,7 @@ static inline int get_compat_semid64_ds(struct semid64_ds *s64, ...@@ -205,7 +205,7 @@ static inline int get_compat_semid64_ds(struct semid64_ds *s64,
static inline int get_compat_semid_ds(struct semid64_ds *s, static inline int get_compat_semid_ds(struct semid64_ds *s,
struct compat_semid_ds __user *up) struct compat_semid_ds __user *up)
{ {
if (!access_ok (VERIFY_READ, up, sizeof(*up))) if (!access_ok(VERIFY_READ, up, sizeof(*up)))
return -EFAULT; return -EFAULT;
return __get_compat_ipc_perm(&s->sem_perm, &up->sem_perm); return __get_compat_ipc_perm(&s->sem_perm, &up->sem_perm);
} }
...@@ -215,7 +215,7 @@ static inline int put_compat_semid64_ds(struct semid64_ds *s64, ...@@ -215,7 +215,7 @@ static inline int put_compat_semid64_ds(struct semid64_ds *s64,
{ {
int err; int err;
if (!access_ok (VERIFY_WRITE, up64, sizeof(*up64))) if (!access_ok(VERIFY_WRITE, up64, sizeof(*up64)))
return -EFAULT; return -EFAULT;
err = __put_compat_ipc64_perm(&s64->sem_perm, &up64->sem_perm); err = __put_compat_ipc64_perm(&s64->sem_perm, &up64->sem_perm);
err |= __put_user(s64->sem_otime, &up64->sem_otime); err |= __put_user(s64->sem_otime, &up64->sem_otime);
...@@ -229,7 +229,7 @@ static inline int put_compat_semid_ds(struct semid64_ds *s, ...@@ -229,7 +229,7 @@ static inline int put_compat_semid_ds(struct semid64_ds *s,
{ {
int err; int err;
if (!access_ok (VERIFY_WRITE, up, sizeof(*up))) if (!access_ok(VERIFY_WRITE, up, sizeof(*up)))
return -EFAULT; return -EFAULT;
err = __put_compat_ipc_perm(&s->sem_perm, &up->sem_perm); err = __put_compat_ipc_perm(&s->sem_perm, &up->sem_perm);
err |= __put_user(s->sem_otime, &up->sem_otime); err |= __put_user(s->sem_otime, &up->sem_otime);
...@@ -288,11 +288,11 @@ static long do_compat_semctl(int first, int second, int third, u32 pad) ...@@ -288,11 +288,11 @@ static long do_compat_semctl(int first, int second, int third, u32 pad)
break; break;
case IPC_SET: case IPC_SET:
if (version == IPC_64) { if (version == IPC_64)
err = get_compat_semid64_ds(&s64, compat_ptr(pad)); err = get_compat_semid64_ds(&s64, compat_ptr(pad));
} else { else
err = get_compat_semid_ds(&s64, compat_ptr(pad)); err = get_compat_semid_ds(&s64, compat_ptr(pad));
}
up64 = compat_alloc_user_space(sizeof(s64)); up64 = compat_alloc_user_space(sizeof(s64));
if (copy_to_user(up64, &s64, sizeof(s64))) if (copy_to_user(up64, &s64, sizeof(s64)))
err = -EFAULT; err = -EFAULT;
...@@ -376,12 +376,12 @@ COMPAT_SYSCALL_DEFINE6(ipc, u32, call, int, first, int, second, ...@@ -376,12 +376,12 @@ COMPAT_SYSCALL_DEFINE6(ipc, u32, call, int, first, int, second,
struct compat_ipc_kludge ipck; struct compat_ipc_kludge ipck;
if (!uptr) if (!uptr)
return -EINVAL; return -EINVAL;
if (copy_from_user (&ipck, uptr, sizeof(ipck))) if (copy_from_user(&ipck, uptr, sizeof(ipck)))
return -EFAULT; return -EFAULT;
uptr = compat_ptr(ipck.msgp); uptr = compat_ptr(ipck.msgp);
fifth = ipck.msgtyp; fifth = ipck.msgtyp;
} }
return do_msgrcv(first, uptr, second, fifth, third, return do_msgrcv(first, uptr, second, (s32)fifth, third,
compat_do_msg_fill); compat_do_msg_fill);
} }
case MSGGET: case MSGGET:
...@@ -515,11 +515,11 @@ long compat_sys_msgctl(int first, int second, void __user *uptr) ...@@ -515,11 +515,11 @@ long compat_sys_msgctl(int first, int second, void __user *uptr)
break; break;
case IPC_SET: case IPC_SET:
if (version == IPC_64) { if (version == IPC_64)
err = get_compat_msqid64(&m64, uptr); err = get_compat_msqid64(&m64, uptr);
} else { else
err = get_compat_msqid(&m64, uptr); err = get_compat_msqid(&m64, uptr);
}
if (err) if (err)
break; break;
p = compat_alloc_user_space(sizeof(m64)); p = compat_alloc_user_space(sizeof(m64));
...@@ -702,11 +702,11 @@ long compat_sys_shmctl(int first, int second, void __user *uptr) ...@@ -702,11 +702,11 @@ long compat_sys_shmctl(int first, int second, void __user *uptr)
case IPC_SET: case IPC_SET:
if (version == IPC_64) { if (version == IPC_64)
err = get_compat_shmid64_ds(&s64, uptr); err = get_compat_shmid64_ds(&s64, uptr);
} else { else
err = get_compat_shmid_ds(&s64, uptr); err = get_compat_shmid_ds(&s64, uptr);
}
if (err) if (err)
break; break;
p = compat_alloc_user_space(sizeof(s64)); p = compat_alloc_user_space(sizeof(s64));
......
...@@ -64,7 +64,7 @@ asmlinkage long compat_sys_mq_open(const char __user *u_name, ...@@ -64,7 +64,7 @@ asmlinkage long compat_sys_mq_open(const char __user *u_name,
return sys_mq_open(u_name, oflag, mode, p); return sys_mq_open(u_name, oflag, mode, p);
} }
static int compat_prepare_timeout(struct timespec __user * *p, static int compat_prepare_timeout(struct timespec __user **p,
const struct compat_timespec __user *u) const struct compat_timespec __user *u)
{ {
struct timespec ts; struct timespec ts;
......
...@@ -164,21 +164,21 @@ static struct ctl_table ipc_kern_table[] = { ...@@ -164,21 +164,21 @@ static struct ctl_table ipc_kern_table[] = {
{ {
.procname = "shmmax", .procname = "shmmax",
.data = &init_ipc_ns.shm_ctlmax, .data = &init_ipc_ns.shm_ctlmax,
.maxlen = sizeof (init_ipc_ns.shm_ctlmax), .maxlen = sizeof(init_ipc_ns.shm_ctlmax),
.mode = 0644, .mode = 0644,
.proc_handler = proc_ipc_doulongvec_minmax, .proc_handler = proc_ipc_doulongvec_minmax,
}, },
{ {
.procname = "shmall", .procname = "shmall",
.data = &init_ipc_ns.shm_ctlall, .data = &init_ipc_ns.shm_ctlall,
.maxlen = sizeof (init_ipc_ns.shm_ctlall), .maxlen = sizeof(init_ipc_ns.shm_ctlall),
.mode = 0644, .mode = 0644,
.proc_handler = proc_ipc_doulongvec_minmax, .proc_handler = proc_ipc_doulongvec_minmax,
}, },
{ {
.procname = "shmmni", .procname = "shmmni",
.data = &init_ipc_ns.shm_ctlmni, .data = &init_ipc_ns.shm_ctlmni,
.maxlen = sizeof (init_ipc_ns.shm_ctlmni), .maxlen = sizeof(init_ipc_ns.shm_ctlmni),
.mode = 0644, .mode = 0644,
.proc_handler = proc_ipc_dointvec, .proc_handler = proc_ipc_dointvec,
}, },
...@@ -194,7 +194,7 @@ static struct ctl_table ipc_kern_table[] = { ...@@ -194,7 +194,7 @@ static struct ctl_table ipc_kern_table[] = {
{ {
.procname = "msgmax", .procname = "msgmax",
.data = &init_ipc_ns.msg_ctlmax, .data = &init_ipc_ns.msg_ctlmax,
.maxlen = sizeof (init_ipc_ns.msg_ctlmax), .maxlen = sizeof(init_ipc_ns.msg_ctlmax),
.mode = 0644, .mode = 0644,
.proc_handler = proc_ipc_dointvec_minmax, .proc_handler = proc_ipc_dointvec_minmax,
.extra1 = &zero, .extra1 = &zero,
...@@ -203,7 +203,7 @@ static struct ctl_table ipc_kern_table[] = { ...@@ -203,7 +203,7 @@ static struct ctl_table ipc_kern_table[] = {
{ {
.procname = "msgmni", .procname = "msgmni",
.data = &init_ipc_ns.msg_ctlmni, .data = &init_ipc_ns.msg_ctlmni,
.maxlen = sizeof (init_ipc_ns.msg_ctlmni), .maxlen = sizeof(init_ipc_ns.msg_ctlmni),
.mode = 0644, .mode = 0644,
.proc_handler = proc_ipc_callback_dointvec_minmax, .proc_handler = proc_ipc_callback_dointvec_minmax,
.extra1 = &zero, .extra1 = &zero,
...@@ -212,7 +212,7 @@ static struct ctl_table ipc_kern_table[] = { ...@@ -212,7 +212,7 @@ static struct ctl_table ipc_kern_table[] = {
{ {
.procname = "msgmnb", .procname = "msgmnb",
.data = &init_ipc_ns.msg_ctlmnb, .data = &init_ipc_ns.msg_ctlmnb,
.maxlen = sizeof (init_ipc_ns.msg_ctlmnb), .maxlen = sizeof(init_ipc_ns.msg_ctlmnb),
.mode = 0644, .mode = 0644,
.proc_handler = proc_ipc_dointvec_minmax, .proc_handler = proc_ipc_dointvec_minmax,
.extra1 = &zero, .extra1 = &zero,
...@@ -221,7 +221,7 @@ static struct ctl_table ipc_kern_table[] = { ...@@ -221,7 +221,7 @@ static struct ctl_table ipc_kern_table[] = {
{ {
.procname = "sem", .procname = "sem",
.data = &init_ipc_ns.sem_ctls, .data = &init_ipc_ns.sem_ctls,
.maxlen = 4*sizeof (int), .maxlen = 4*sizeof(int),
.mode = 0644, .mode = 0644,
.proc_handler = proc_ipc_dointvec, .proc_handler = proc_ipc_dointvec,
}, },
......
...@@ -6,7 +6,7 @@ ...@@ -6,7 +6,7 @@
* *
* Spinlocks: Mohamed Abbas (abbas.mohamed@intel.com) * Spinlocks: Mohamed Abbas (abbas.mohamed@intel.com)
* Lockless receive & send, fd based notify: * Lockless receive & send, fd based notify:
* Manfred Spraul (manfred@colorfullife.com) * Manfred Spraul (manfred@colorfullife.com)
* *
* Audit: George Wilson (ltcgcw@us.ibm.com) * Audit: George Wilson (ltcgcw@us.ibm.com)
* *
...@@ -73,7 +73,7 @@ struct mqueue_inode_info { ...@@ -73,7 +73,7 @@ struct mqueue_inode_info {
struct mq_attr attr; struct mq_attr attr;
struct sigevent notify; struct sigevent notify;
struct pid* notify_owner; struct pid *notify_owner;
struct user_namespace *notify_user_ns; struct user_namespace *notify_user_ns;
struct user_struct *user; /* user who created, for accounting */ struct user_struct *user; /* user who created, for accounting */
struct sock *notify_sock; struct sock *notify_sock;
...@@ -92,7 +92,7 @@ static void remove_notification(struct mqueue_inode_info *info); ...@@ -92,7 +92,7 @@ static void remove_notification(struct mqueue_inode_info *info);
static struct kmem_cache *mqueue_inode_cachep; static struct kmem_cache *mqueue_inode_cachep;
static struct ctl_table_header * mq_sysctl_table; static struct ctl_table_header *mq_sysctl_table;
static inline struct mqueue_inode_info *MQUEUE_I(struct inode *inode) static inline struct mqueue_inode_info *MQUEUE_I(struct inode *inode)
{ {
...@@ -466,13 +466,13 @@ static int mqueue_create(struct inode *dir, struct dentry *dentry, ...@@ -466,13 +466,13 @@ static int mqueue_create(struct inode *dir, struct dentry *dentry,
static int mqueue_unlink(struct inode *dir, struct dentry *dentry) static int mqueue_unlink(struct inode *dir, struct dentry *dentry)
{ {
struct inode *inode = dentry->d_inode; struct inode *inode = dentry->d_inode;
dir->i_ctime = dir->i_mtime = dir->i_atime = CURRENT_TIME; dir->i_ctime = dir->i_mtime = dir->i_atime = CURRENT_TIME;
dir->i_size -= DIRENT_SIZE; dir->i_size -= DIRENT_SIZE;
drop_nlink(inode); drop_nlink(inode);
dput(dentry); dput(dentry);
return 0; return 0;
} }
/* /*
...@@ -622,7 +622,7 @@ static struct ext_wait_queue *wq_get_first_waiter( ...@@ -622,7 +622,7 @@ static struct ext_wait_queue *wq_get_first_waiter(
static inline void set_cookie(struct sk_buff *skb, char code) static inline void set_cookie(struct sk_buff *skb, char code)
{ {
((char*)skb->data)[NOTIFY_COOKIE_LEN-1] = code; ((char *)skb->data)[NOTIFY_COOKIE_LEN-1] = code;
} }
/* /*
...@@ -1303,11 +1303,11 @@ SYSCALL_DEFINE2(mq_notify, mqd_t, mqdes, ...@@ -1303,11 +1303,11 @@ SYSCALL_DEFINE2(mq_notify, mqd_t, mqdes,
out_fput: out_fput:
fdput(f); fdput(f);
out: out:
if (sock) { if (sock)
netlink_detachskb(sock, nc); netlink_detachskb(sock, nc);
} else if (nc) { else if (nc)
dev_kfree_skb(nc); dev_kfree_skb(nc);
}
return ret; return ret;
} }
......
...@@ -253,8 +253,14 @@ static void expunge_all(struct msg_queue *msq, int res) ...@@ -253,8 +253,14 @@ static void expunge_all(struct msg_queue *msq, int res)
struct msg_receiver *msr, *t; struct msg_receiver *msr, *t;
list_for_each_entry_safe(msr, t, &msq->q_receivers, r_list) { list_for_each_entry_safe(msr, t, &msq->q_receivers, r_list) {
msr->r_msg = NULL; msr->r_msg = NULL; /* initialize expunge ordering */
wake_up_process(msr->r_tsk); wake_up_process(msr->r_tsk);
/*
* Ensure that the wakeup is visible before setting r_msg as
* the receiving end depends on it: either spinning on a nil,
* or dealing with -EAGAIN cases. See lockless receive part 1
* and 2 in do_msgrcv().
*/
smp_mb(); smp_mb();
msr->r_msg = ERR_PTR(res); msr->r_msg = ERR_PTR(res);
} }
...@@ -318,7 +324,7 @@ SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg) ...@@ -318,7 +324,7 @@ SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
static inline unsigned long static inline unsigned long
copy_msqid_to_user(void __user *buf, struct msqid64_ds *in, int version) copy_msqid_to_user(void __user *buf, struct msqid64_ds *in, int version)
{ {
switch(version) { switch (version) {
case IPC_64: case IPC_64:
return copy_to_user(buf, in, sizeof(*in)); return copy_to_user(buf, in, sizeof(*in));
case IPC_OLD: case IPC_OLD:
...@@ -363,7 +369,7 @@ copy_msqid_to_user(void __user *buf, struct msqid64_ds *in, int version) ...@@ -363,7 +369,7 @@ copy_msqid_to_user(void __user *buf, struct msqid64_ds *in, int version)
static inline unsigned long static inline unsigned long
copy_msqid_from_user(struct msqid64_ds *out, void __user *buf, int version) copy_msqid_from_user(struct msqid64_ds *out, void __user *buf, int version)
{ {
switch(version) { switch (version) {
case IPC_64: case IPC_64:
if (copy_from_user(out, buf, sizeof(*out))) if (copy_from_user(out, buf, sizeof(*out)))
return -EFAULT; return -EFAULT;
...@@ -375,9 +381,9 @@ copy_msqid_from_user(struct msqid64_ds *out, void __user *buf, int version) ...@@ -375,9 +381,9 @@ copy_msqid_from_user(struct msqid64_ds *out, void __user *buf, int version)
if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old))) if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
return -EFAULT; return -EFAULT;
out->msg_perm.uid = tbuf_old.msg_perm.uid; out->msg_perm.uid = tbuf_old.msg_perm.uid;
out->msg_perm.gid = tbuf_old.msg_perm.gid; out->msg_perm.gid = tbuf_old.msg_perm.gid;
out->msg_perm.mode = tbuf_old.msg_perm.mode; out->msg_perm.mode = tbuf_old.msg_perm.mode;
if (tbuf_old.msg_qbytes == 0) if (tbuf_old.msg_qbytes == 0)
out->msg_qbytes = tbuf_old.msg_lqbytes; out->msg_qbytes = tbuf_old.msg_lqbytes;
...@@ -606,13 +612,13 @@ SYSCALL_DEFINE3(msgctl, int, msqid, int, cmd, struct msqid_ds __user *, buf) ...@@ -606,13 +612,13 @@ SYSCALL_DEFINE3(msgctl, int, msqid, int, cmd, struct msqid_ds __user *, buf)
static int testmsg(struct msg_msg *msg, long type, int mode) static int testmsg(struct msg_msg *msg, long type, int mode)
{ {
switch(mode) switch (mode)
{ {
case SEARCH_ANY: case SEARCH_ANY:
case SEARCH_NUMBER: case SEARCH_NUMBER:
return 1; return 1;
case SEARCH_LESSEQUAL: case SEARCH_LESSEQUAL:
if (msg->m_type <=type) if (msg->m_type <= type)
return 1; return 1;
break; break;
case SEARCH_EQUAL: case SEARCH_EQUAL:
...@@ -638,15 +644,22 @@ static inline int pipelined_send(struct msg_queue *msq, struct msg_msg *msg) ...@@ -638,15 +644,22 @@ static inline int pipelined_send(struct msg_queue *msq, struct msg_msg *msg)
list_del(&msr->r_list); list_del(&msr->r_list);
if (msr->r_maxsize < msg->m_ts) { if (msr->r_maxsize < msg->m_ts) {
/* initialize pipelined send ordering */
msr->r_msg = NULL; msr->r_msg = NULL;
wake_up_process(msr->r_tsk); wake_up_process(msr->r_tsk);
smp_mb(); smp_mb(); /* see barrier comment below */
msr->r_msg = ERR_PTR(-E2BIG); msr->r_msg = ERR_PTR(-E2BIG);
} else { } else {
msr->r_msg = NULL; msr->r_msg = NULL;
msq->q_lrpid = task_pid_vnr(msr->r_tsk); msq->q_lrpid = task_pid_vnr(msr->r_tsk);
msq->q_rtime = get_seconds(); msq->q_rtime = get_seconds();
wake_up_process(msr->r_tsk); wake_up_process(msr->r_tsk);
/*
* Ensure that the wakeup is visible before
* setting r_msg, as the receiving end depends
* on it. See lockless receive part 1 and 2 in
* do_msgrcv().
*/
smp_mb(); smp_mb();
msr->r_msg = msg; msr->r_msg = msg;
...@@ -654,6 +667,7 @@ static inline int pipelined_send(struct msg_queue *msq, struct msg_msg *msg) ...@@ -654,6 +667,7 @@ static inline int pipelined_send(struct msg_queue *msq, struct msg_msg *msg)
} }
} }
} }
return 0; return 0;
} }
...@@ -696,7 +710,7 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext, ...@@ -696,7 +710,7 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext,
goto out_unlock0; goto out_unlock0;
/* raced with RMID? */ /* raced with RMID? */
if (msq->q_perm.deleted) { if (!ipc_valid_object(&msq->q_perm)) {
err = -EIDRM; err = -EIDRM;
goto out_unlock0; goto out_unlock0;
} }
...@@ -716,6 +730,7 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext, ...@@ -716,6 +730,7 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext,
goto out_unlock0; goto out_unlock0;
} }
/* enqueue the sender and prepare to block */
ss_add(msq, &s); ss_add(msq, &s);
if (!ipc_rcu_getref(msq)) { if (!ipc_rcu_getref(msq)) {
...@@ -731,7 +746,8 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext, ...@@ -731,7 +746,8 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext,
ipc_lock_object(&msq->q_perm); ipc_lock_object(&msq->q_perm);
ipc_rcu_putref(msq, ipc_rcu_free); ipc_rcu_putref(msq, ipc_rcu_free);
if (msq->q_perm.deleted) { /* raced with RMID? */
if (!ipc_valid_object(&msq->q_perm)) {
err = -EIDRM; err = -EIDRM;
goto out_unlock0; goto out_unlock0;
} }
...@@ -909,7 +925,7 @@ long do_msgrcv(int msqid, void __user *buf, size_t bufsz, long msgtyp, int msgfl ...@@ -909,7 +925,7 @@ long do_msgrcv(int msqid, void __user *buf, size_t bufsz, long msgtyp, int msgfl
ipc_lock_object(&msq->q_perm); ipc_lock_object(&msq->q_perm);
/* raced with RMID? */ /* raced with RMID? */
if (msq->q_perm.deleted) { if (!ipc_valid_object(&msq->q_perm)) {
msg = ERR_PTR(-EIDRM); msg = ERR_PTR(-EIDRM);
goto out_unlock0; goto out_unlock0;
} }
...@@ -983,7 +999,7 @@ long do_msgrcv(int msqid, void __user *buf, size_t bufsz, long msgtyp, int msgfl ...@@ -983,7 +999,7 @@ long do_msgrcv(int msqid, void __user *buf, size_t bufsz, long msgtyp, int msgfl
* wake_up_process(). There is a race with exit(), see * wake_up_process(). There is a race with exit(), see
* ipc/mqueue.c for the details. * ipc/mqueue.c for the details.
*/ */
msg = (struct msg_msg*)msr_d.r_msg; msg = (struct msg_msg *)msr_d.r_msg;
while (msg == NULL) { while (msg == NULL) {
cpu_relax(); cpu_relax();
msg = (struct msg_msg *)msr_d.r_msg; msg = (struct msg_msg *)msr_d.r_msg;
...@@ -1004,7 +1020,7 @@ long do_msgrcv(int msqid, void __user *buf, size_t bufsz, long msgtyp, int msgfl ...@@ -1004,7 +1020,7 @@ long do_msgrcv(int msqid, void __user *buf, size_t bufsz, long msgtyp, int msgfl
/* Lockless receive, part 4: /* Lockless receive, part 4:
* Repeat test after acquiring the spinlock. * Repeat test after acquiring the spinlock.
*/ */
msg = (struct msg_msg*)msr_d.r_msg; msg = (struct msg_msg *)msr_d.r_msg;
if (msg != ERR_PTR(-EAGAIN)) if (msg != ERR_PTR(-EAGAIN))
goto out_unlock0; goto out_unlock0;
......
...@@ -188,7 +188,7 @@ void sem_exit_ns(struct ipc_namespace *ns) ...@@ -188,7 +188,7 @@ void sem_exit_ns(struct ipc_namespace *ns)
} }
#endif #endif
void __init sem_init (void) void __init sem_init(void)
{ {
sem_init_ns(&init_ipc_ns); sem_init_ns(&init_ipc_ns);
ipc_init_proc_interface("sysvipc/sem", ipc_init_proc_interface("sysvipc/sem",
...@@ -225,7 +225,7 @@ static void unmerge_queues(struct sem_array *sma) ...@@ -225,7 +225,7 @@ static void unmerge_queues(struct sem_array *sma)
} }
/** /**
* merge_queues - Merge single semop queues into global queue * merge_queues - merge single semop queues into global queue
* @sma: semaphore array * @sma: semaphore array
* *
* This function merges all per-semaphore queues into the global queue. * This function merges all per-semaphore queues into the global queue.
...@@ -394,7 +394,7 @@ static inline struct sem_array *sem_obtain_lock(struct ipc_namespace *ns, ...@@ -394,7 +394,7 @@ static inline struct sem_array *sem_obtain_lock(struct ipc_namespace *ns,
/* ipc_rmid() may have already freed the ID while sem_lock /* ipc_rmid() may have already freed the ID while sem_lock
* was spinning: verify that the structure is still valid * was spinning: verify that the structure is still valid
*/ */
if (!ipcp->deleted) if (ipc_valid_object(ipcp))
return container_of(ipcp, struct sem_array, sem_perm); return container_of(ipcp, struct sem_array, sem_perm);
sem_unlock(sma, *locknum); sem_unlock(sma, *locknum);
...@@ -445,11 +445,11 @@ static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s) ...@@ -445,11 +445,11 @@ static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s)
* * call wake_up_process * * call wake_up_process
* * set queue.status to the final value. * * set queue.status to the final value.
* - the previously blocked thread checks queue.status: * - the previously blocked thread checks queue.status:
* * if it's IN_WAKEUP, then it must wait until the value changes * * if it's IN_WAKEUP, then it must wait until the value changes
* * if it's not -EINTR, then the operation was completed by * * if it's not -EINTR, then the operation was completed by
* update_queue. semtimedop can return queue.status without * update_queue. semtimedop can return queue.status without
* performing any operation on the sem array. * performing any operation on the sem array.
* * otherwise it must acquire the spinlock and check what's up. * * otherwise it must acquire the spinlock and check what's up.
* *
* The two-stage algorithm is necessary to protect against the following * The two-stage algorithm is necessary to protect against the following
* races: * races:
...@@ -474,7 +474,6 @@ static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s) ...@@ -474,7 +474,6 @@ static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s)
* *
* Called with sem_ids.rwsem held (as a writer) * Called with sem_ids.rwsem held (as a writer)
*/ */
static int newary(struct ipc_namespace *ns, struct ipc_params *params) static int newary(struct ipc_namespace *ns, struct ipc_params *params)
{ {
int id; int id;
...@@ -491,12 +490,12 @@ static int newary(struct ipc_namespace *ns, struct ipc_params *params) ...@@ -491,12 +490,12 @@ static int newary(struct ipc_namespace *ns, struct ipc_params *params)
if (ns->used_sems + nsems > ns->sc_semmns) if (ns->used_sems + nsems > ns->sc_semmns)
return -ENOSPC; return -ENOSPC;
size = sizeof (*sma) + nsems * sizeof (struct sem); size = sizeof(*sma) + nsems * sizeof(struct sem);
sma = ipc_rcu_alloc(size); sma = ipc_rcu_alloc(size);
if (!sma) { if (!sma)
return -ENOMEM; return -ENOMEM;
}
memset (sma, 0, size); memset(sma, 0, size);
sma->sem_perm.mode = (semflg & S_IRWXUGO); sma->sem_perm.mode = (semflg & S_IRWXUGO);
sma->sem_perm.key = key; sma->sem_perm.key = key;
...@@ -584,10 +583,11 @@ SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg) ...@@ -584,10 +583,11 @@ SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
return ipcget(ns, &sem_ids(ns), &sem_ops, &sem_params); return ipcget(ns, &sem_ids(ns), &sem_ops, &sem_params);
} }
/** perform_atomic_semop - Perform (if possible) a semaphore operation /**
* perform_atomic_semop - Perform (if possible) a semaphore operation
* @sma: semaphore array * @sma: semaphore array
* @sops: array with operations that should be checked * @sops: array with operations that should be checked
* @nsems: number of sops * @nsops: number of operations
* @un: undo array * @un: undo array
* @pid: pid that did the change * @pid: pid that did the change
* *
...@@ -595,19 +595,18 @@ SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg) ...@@ -595,19 +595,18 @@ SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
* Returns 1 if the operation is impossible, the caller must sleep. * Returns 1 if the operation is impossible, the caller must sleep.
* Negative values are error codes. * Negative values are error codes.
*/ */
static int perform_atomic_semop(struct sem_array *sma, struct sembuf *sops, static int perform_atomic_semop(struct sem_array *sma, struct sembuf *sops,
int nsops, struct sem_undo *un, int pid) int nsops, struct sem_undo *un, int pid)
{ {
int result, sem_op; int result, sem_op;
struct sembuf *sop; struct sembuf *sop;
struct sem * curr; struct sem *curr;
for (sop = sops; sop < sops + nsops; sop++) { for (sop = sops; sop < sops + nsops; sop++) {
curr = sma->sem_base + sop->sem_num; curr = sma->sem_base + sop->sem_num;
sem_op = sop->sem_op; sem_op = sop->sem_op;
result = curr->semval; result = curr->semval;
if (!sem_op && result) if (!sem_op && result)
goto would_block; goto would_block;
...@@ -616,25 +615,24 @@ static int perform_atomic_semop(struct sem_array *sma, struct sembuf *sops, ...@@ -616,25 +615,24 @@ static int perform_atomic_semop(struct sem_array *sma, struct sembuf *sops,
goto would_block; goto would_block;
if (result > SEMVMX) if (result > SEMVMX)
goto out_of_range; goto out_of_range;
if (sop->sem_flg & SEM_UNDO) { if (sop->sem_flg & SEM_UNDO) {
int undo = un->semadj[sop->sem_num] - sem_op; int undo = un->semadj[sop->sem_num] - sem_op;
/* /* Exceeding the undo range is an error. */
* Exceeding the undo range is an error.
*/
if (undo < (-SEMAEM - 1) || undo > SEMAEM) if (undo < (-SEMAEM - 1) || undo > SEMAEM)
goto out_of_range; goto out_of_range;
un->semadj[sop->sem_num] = undo;
} }
curr->semval = result; curr->semval = result;
} }
sop--; sop--;
while (sop >= sops) { while (sop >= sops) {
sma->sem_base[sop->sem_num].sempid = pid; sma->sem_base[sop->sem_num].sempid = pid;
if (sop->sem_flg & SEM_UNDO)
un->semadj[sop->sem_num] -= sop->sem_op;
sop--; sop--;
} }
return 0; return 0;
out_of_range: out_of_range:
...@@ -650,7 +648,10 @@ static int perform_atomic_semop(struct sem_array *sma, struct sembuf *sops, ...@@ -650,7 +648,10 @@ static int perform_atomic_semop(struct sem_array *sma, struct sembuf *sops,
undo: undo:
sop--; sop--;
while (sop >= sops) { while (sop >= sops) {
sma->sem_base[sop->sem_num].semval -= sop->sem_op; sem_op = sop->sem_op;
sma->sem_base[sop->sem_num].semval -= sem_op;
if (sop->sem_flg & SEM_UNDO)
un->semadj[sop->sem_num] += sem_op;
sop--; sop--;
} }
...@@ -680,7 +681,7 @@ static void wake_up_sem_queue_prepare(struct list_head *pt, ...@@ -680,7 +681,7 @@ static void wake_up_sem_queue_prepare(struct list_head *pt,
} }
/** /**
* wake_up_sem_queue_do(pt) - do the actual wake-up * wake_up_sem_queue_do - do the actual wake-up
* @pt: list of tasks to be woken up * @pt: list of tasks to be woken up
* *
* Do the actual wake-up. * Do the actual wake-up.
...@@ -746,7 +747,7 @@ static int check_restart(struct sem_array *sma, struct sem_queue *q) ...@@ -746,7 +747,7 @@ static int check_restart(struct sem_array *sma, struct sem_queue *q)
} }
/** /**
* wake_const_ops(sma, semnum, pt) - Wake up non-alter tasks * wake_const_ops - wake up non-alter tasks
* @sma: semaphore array. * @sma: semaphore array.
* @semnum: semaphore that was modified. * @semnum: semaphore that was modified.
* @pt: list head for the tasks that must be woken up. * @pt: list head for the tasks that must be woken up.
...@@ -796,15 +797,14 @@ static int wake_const_ops(struct sem_array *sma, int semnum, ...@@ -796,15 +797,14 @@ static int wake_const_ops(struct sem_array *sma, int semnum,
} }
/** /**
* do_smart_wakeup_zero(sma, sops, nsops, pt) - wakeup all wait for zero tasks * do_smart_wakeup_zero - wakeup all wait for zero tasks
* @sma: semaphore array * @sma: semaphore array
* @sops: operations that were performed * @sops: operations that were performed
* @nsops: number of operations * @nsops: number of operations
* @pt: list head of the tasks that must be woken up. * @pt: list head of the tasks that must be woken up.
* *
* do_smart_wakeup_zero() checks all required queue for wait-for-zero * Checks all required queue for wait-for-zero operations, based
* operations, based on the actual changes that were performed on the * on the actual changes that were performed on the semaphore array.
* semaphore array.
* The function returns 1 if at least one operation was completed successfully. * The function returns 1 if at least one operation was completed successfully.
*/ */
static int do_smart_wakeup_zero(struct sem_array *sma, struct sembuf *sops, static int do_smart_wakeup_zero(struct sem_array *sma, struct sembuf *sops,
...@@ -848,7 +848,7 @@ static int do_smart_wakeup_zero(struct sem_array *sma, struct sembuf *sops, ...@@ -848,7 +848,7 @@ static int do_smart_wakeup_zero(struct sem_array *sma, struct sembuf *sops,
/** /**
* update_queue(sma, semnum): Look for tasks that can be completed. * update_queue - look for tasks that can be completed.
* @sma: semaphore array. * @sma: semaphore array.
* @semnum: semaphore that was modified. * @semnum: semaphore that was modified.
* @pt: list head for the tasks that must be woken up. * @pt: list head for the tasks that must be woken up.
...@@ -918,7 +918,7 @@ static int update_queue(struct sem_array *sma, int semnum, struct list_head *pt) ...@@ -918,7 +918,7 @@ static int update_queue(struct sem_array *sma, int semnum, struct list_head *pt)
} }
/** /**
* set_semotime(sma, sops) - set sem_otime * set_semotime - set sem_otime
* @sma: semaphore array * @sma: semaphore array
* @sops: operations that modified the array, may be NULL * @sops: operations that modified the array, may be NULL
* *
...@@ -936,7 +936,7 @@ static void set_semotime(struct sem_array *sma, struct sembuf *sops) ...@@ -936,7 +936,7 @@ static void set_semotime(struct sem_array *sma, struct sembuf *sops)
} }
/** /**
* do_smart_update(sma, sops, nsops, otime, pt) - optimized update_queue * do_smart_update - optimized update_queue
* @sma: semaphore array * @sma: semaphore array
* @sops: operations that were performed * @sops: operations that were performed
* @nsops: number of operations * @nsops: number of operations
...@@ -998,21 +998,21 @@ static void do_smart_update(struct sem_array *sma, struct sembuf *sops, int nsop ...@@ -998,21 +998,21 @@ static void do_smart_update(struct sem_array *sma, struct sembuf *sops, int nsop
* The counts we return here are a rough approximation, but still * The counts we return here are a rough approximation, but still
* warrant that semncnt+semzcnt>0 if the task is on the pending queue. * warrant that semncnt+semzcnt>0 if the task is on the pending queue.
*/ */
static int count_semncnt (struct sem_array * sma, ushort semnum) static int count_semncnt(struct sem_array *sma, ushort semnum)
{ {
int semncnt; int semncnt;
struct sem_queue * q; struct sem_queue *q;
semncnt = 0; semncnt = 0;
list_for_each_entry(q, &sma->sem_base[semnum].pending_alter, list) { list_for_each_entry(q, &sma->sem_base[semnum].pending_alter, list) {
struct sembuf * sops = q->sops; struct sembuf *sops = q->sops;
BUG_ON(sops->sem_num != semnum); BUG_ON(sops->sem_num != semnum);
if ((sops->sem_op < 0) && !(sops->sem_flg & IPC_NOWAIT)) if ((sops->sem_op < 0) && !(sops->sem_flg & IPC_NOWAIT))
semncnt++; semncnt++;
} }
list_for_each_entry(q, &sma->pending_alter, list) { list_for_each_entry(q, &sma->pending_alter, list) {
struct sembuf * sops = q->sops; struct sembuf *sops = q->sops;
int nsops = q->nsops; int nsops = q->nsops;
int i; int i;
for (i = 0; i < nsops; i++) for (i = 0; i < nsops; i++)
...@@ -1024,21 +1024,21 @@ static int count_semncnt (struct sem_array * sma, ushort semnum) ...@@ -1024,21 +1024,21 @@ static int count_semncnt (struct sem_array * sma, ushort semnum)
return semncnt; return semncnt;
} }
static int count_semzcnt (struct sem_array * sma, ushort semnum) static int count_semzcnt(struct sem_array *sma, ushort semnum)
{ {
int semzcnt; int semzcnt;
struct sem_queue * q; struct sem_queue *q;
semzcnt = 0; semzcnt = 0;
list_for_each_entry(q, &sma->sem_base[semnum].pending_const, list) { list_for_each_entry(q, &sma->sem_base[semnum].pending_const, list) {
struct sembuf * sops = q->sops; struct sembuf *sops = q->sops;
BUG_ON(sops->sem_num != semnum); BUG_ON(sops->sem_num != semnum);
if ((sops->sem_op == 0) && !(sops->sem_flg & IPC_NOWAIT)) if ((sops->sem_op == 0) && !(sops->sem_flg & IPC_NOWAIT))
semzcnt++; semzcnt++;
} }
list_for_each_entry(q, &sma->pending_const, list) { list_for_each_entry(q, &sma->pending_const, list) {
struct sembuf * sops = q->sops; struct sembuf *sops = q->sops;
int nsops = q->nsops; int nsops = q->nsops;
int i; int i;
for (i = 0; i < nsops; i++) for (i = 0; i < nsops; i++)
...@@ -1108,7 +1108,7 @@ static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp) ...@@ -1108,7 +1108,7 @@ static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
static unsigned long copy_semid_to_user(void __user *buf, struct semid64_ds *in, int version) static unsigned long copy_semid_to_user(void __user *buf, struct semid64_ds *in, int version)
{ {
switch(version) { switch (version) {
case IPC_64: case IPC_64:
return copy_to_user(buf, in, sizeof(*in)); return copy_to_user(buf, in, sizeof(*in));
case IPC_OLD: case IPC_OLD:
...@@ -1151,7 +1151,7 @@ static int semctl_nolock(struct ipc_namespace *ns, int semid, ...@@ -1151,7 +1151,7 @@ static int semctl_nolock(struct ipc_namespace *ns, int semid,
int err; int err;
struct sem_array *sma; struct sem_array *sma;
switch(cmd) { switch (cmd) {
case IPC_INFO: case IPC_INFO:
case SEM_INFO: case SEM_INFO:
{ {
...@@ -1162,7 +1162,7 @@ static int semctl_nolock(struct ipc_namespace *ns, int semid, ...@@ -1162,7 +1162,7 @@ static int semctl_nolock(struct ipc_namespace *ns, int semid,
if (err) if (err)
return err; return err;
memset(&seminfo,0,sizeof(seminfo)); memset(&seminfo, 0, sizeof(seminfo));
seminfo.semmni = ns->sc_semmni; seminfo.semmni = ns->sc_semmni;
seminfo.semmns = ns->sc_semmns; seminfo.semmns = ns->sc_semmns;
seminfo.semmsl = ns->sc_semmsl; seminfo.semmsl = ns->sc_semmsl;
...@@ -1183,7 +1183,7 @@ static int semctl_nolock(struct ipc_namespace *ns, int semid, ...@@ -1183,7 +1183,7 @@ static int semctl_nolock(struct ipc_namespace *ns, int semid,
up_read(&sem_ids(ns).rwsem); up_read(&sem_ids(ns).rwsem);
if (copy_to_user(p, &seminfo, sizeof(struct seminfo))) if (copy_to_user(p, &seminfo, sizeof(struct seminfo)))
return -EFAULT; return -EFAULT;
return (max_id < 0) ? 0: max_id; return (max_id < 0) ? 0 : max_id;
} }
case IPC_STAT: case IPC_STAT:
case SEM_STAT: case SEM_STAT:
...@@ -1239,7 +1239,7 @@ static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum, ...@@ -1239,7 +1239,7 @@ static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum,
{ {
struct sem_undo *un; struct sem_undo *un;
struct sem_array *sma; struct sem_array *sma;
struct sem* curr; struct sem *curr;
int err; int err;
struct list_head tasks; struct list_head tasks;
int val; int val;
...@@ -1282,7 +1282,7 @@ static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum, ...@@ -1282,7 +1282,7 @@ static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum,
sem_lock(sma, NULL, -1); sem_lock(sma, NULL, -1);
if (sma->sem_perm.deleted) { if (!ipc_valid_object(&sma->sem_perm)) {
sem_unlock(sma, -1); sem_unlock(sma, -1);
rcu_read_unlock(); rcu_read_unlock();
return -EIDRM; return -EIDRM;
...@@ -1309,10 +1309,10 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum, ...@@ -1309,10 +1309,10 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
int cmd, void __user *p) int cmd, void __user *p)
{ {
struct sem_array *sma; struct sem_array *sma;
struct sem* curr; struct sem *curr;
int err, nsems; int err, nsems;
ushort fast_sem_io[SEMMSL_FAST]; ushort fast_sem_io[SEMMSL_FAST];
ushort* sem_io = fast_sem_io; ushort *sem_io = fast_sem_io;
struct list_head tasks; struct list_head tasks;
INIT_LIST_HEAD(&tasks); INIT_LIST_HEAD(&tasks);
...@@ -1342,11 +1342,11 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum, ...@@ -1342,11 +1342,11 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
int i; int i;
sem_lock(sma, NULL, -1); sem_lock(sma, NULL, -1);
if (sma->sem_perm.deleted) { if (!ipc_valid_object(&sma->sem_perm)) {
err = -EIDRM; err = -EIDRM;
goto out_unlock; goto out_unlock;
} }
if(nsems > SEMMSL_FAST) { if (nsems > SEMMSL_FAST) {
if (!ipc_rcu_getref(sma)) { if (!ipc_rcu_getref(sma)) {
err = -EIDRM; err = -EIDRM;
goto out_unlock; goto out_unlock;
...@@ -1354,14 +1354,14 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum, ...@@ -1354,14 +1354,14 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
sem_unlock(sma, -1); sem_unlock(sma, -1);
rcu_read_unlock(); rcu_read_unlock();
sem_io = ipc_alloc(sizeof(ushort)*nsems); sem_io = ipc_alloc(sizeof(ushort)*nsems);
if(sem_io == NULL) { if (sem_io == NULL) {
ipc_rcu_putref(sma, ipc_rcu_free); ipc_rcu_putref(sma, ipc_rcu_free);
return -ENOMEM; return -ENOMEM;
} }
rcu_read_lock(); rcu_read_lock();
sem_lock_and_putref(sma); sem_lock_and_putref(sma);
if (sma->sem_perm.deleted) { if (!ipc_valid_object(&sma->sem_perm)) {
err = -EIDRM; err = -EIDRM;
goto out_unlock; goto out_unlock;
} }
...@@ -1371,7 +1371,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum, ...@@ -1371,7 +1371,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
sem_unlock(sma, -1); sem_unlock(sma, -1);
rcu_read_unlock(); rcu_read_unlock();
err = 0; err = 0;
if(copy_to_user(array, sem_io, nsems*sizeof(ushort))) if (copy_to_user(array, sem_io, nsems*sizeof(ushort)))
err = -EFAULT; err = -EFAULT;
goto out_free; goto out_free;
} }
...@@ -1386,15 +1386,15 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum, ...@@ -1386,15 +1386,15 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
} }
rcu_read_unlock(); rcu_read_unlock();
if(nsems > SEMMSL_FAST) { if (nsems > SEMMSL_FAST) {
sem_io = ipc_alloc(sizeof(ushort)*nsems); sem_io = ipc_alloc(sizeof(ushort)*nsems);
if(sem_io == NULL) { if (sem_io == NULL) {
ipc_rcu_putref(sma, ipc_rcu_free); ipc_rcu_putref(sma, ipc_rcu_free);
return -ENOMEM; return -ENOMEM;
} }
} }
if (copy_from_user (sem_io, p, nsems*sizeof(ushort))) { if (copy_from_user(sem_io, p, nsems*sizeof(ushort))) {
ipc_rcu_putref(sma, ipc_rcu_free); ipc_rcu_putref(sma, ipc_rcu_free);
err = -EFAULT; err = -EFAULT;
goto out_free; goto out_free;
...@@ -1409,7 +1409,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum, ...@@ -1409,7 +1409,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
} }
rcu_read_lock(); rcu_read_lock();
sem_lock_and_putref(sma); sem_lock_and_putref(sma);
if (sma->sem_perm.deleted) { if (!ipc_valid_object(&sma->sem_perm)) {
err = -EIDRM; err = -EIDRM;
goto out_unlock; goto out_unlock;
} }
...@@ -1435,7 +1435,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum, ...@@ -1435,7 +1435,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
goto out_rcu_wakeup; goto out_rcu_wakeup;
sem_lock(sma, NULL, -1); sem_lock(sma, NULL, -1);
if (sma->sem_perm.deleted) { if (!ipc_valid_object(&sma->sem_perm)) {
err = -EIDRM; err = -EIDRM;
goto out_unlock; goto out_unlock;
} }
...@@ -1449,10 +1449,10 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum, ...@@ -1449,10 +1449,10 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
err = curr->sempid; err = curr->sempid;
goto out_unlock; goto out_unlock;
case GETNCNT: case GETNCNT:
err = count_semncnt(sma,semnum); err = count_semncnt(sma, semnum);
goto out_unlock; goto out_unlock;
case GETZCNT: case GETZCNT:
err = count_semzcnt(sma,semnum); err = count_semzcnt(sma, semnum);
goto out_unlock; goto out_unlock;
} }
...@@ -1462,7 +1462,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum, ...@@ -1462,7 +1462,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
rcu_read_unlock(); rcu_read_unlock();
wake_up_sem_queue_do(&tasks); wake_up_sem_queue_do(&tasks);
out_free: out_free:
if(sem_io != fast_sem_io) if (sem_io != fast_sem_io)
ipc_free(sem_io, sizeof(ushort)*nsems); ipc_free(sem_io, sizeof(ushort)*nsems);
return err; return err;
} }
...@@ -1470,7 +1470,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum, ...@@ -1470,7 +1470,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
static inline unsigned long static inline unsigned long
copy_semid_from_user(struct semid64_ds *out, void __user *buf, int version) copy_semid_from_user(struct semid64_ds *out, void __user *buf, int version)
{ {
switch(version) { switch (version) {
case IPC_64: case IPC_64:
if (copy_from_user(out, buf, sizeof(*out))) if (copy_from_user(out, buf, sizeof(*out)))
return -EFAULT; return -EFAULT;
...@@ -1479,7 +1479,7 @@ copy_semid_from_user(struct semid64_ds *out, void __user *buf, int version) ...@@ -1479,7 +1479,7 @@ copy_semid_from_user(struct semid64_ds *out, void __user *buf, int version)
{ {
struct semid_ds tbuf_old; struct semid_ds tbuf_old;
if(copy_from_user(&tbuf_old, buf, sizeof(tbuf_old))) if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
return -EFAULT; return -EFAULT;
out->sem_perm.uid = tbuf_old.sem_perm.uid; out->sem_perm.uid = tbuf_old.sem_perm.uid;
...@@ -1506,7 +1506,7 @@ static int semctl_down(struct ipc_namespace *ns, int semid, ...@@ -1506,7 +1506,7 @@ static int semctl_down(struct ipc_namespace *ns, int semid,
struct semid64_ds semid64; struct semid64_ds semid64;
struct kern_ipc_perm *ipcp; struct kern_ipc_perm *ipcp;
if(cmd == IPC_SET) { if (cmd == IPC_SET) {
if (copy_semid_from_user(&semid64, p, version)) if (copy_semid_from_user(&semid64, p, version))
return -EFAULT; return -EFAULT;
} }
...@@ -1566,7 +1566,7 @@ SYSCALL_DEFINE4(semctl, int, semid, int, semnum, int, cmd, unsigned long, arg) ...@@ -1566,7 +1566,7 @@ SYSCALL_DEFINE4(semctl, int, semid, int, semnum, int, cmd, unsigned long, arg)
version = ipc_parse_version(&cmd); version = ipc_parse_version(&cmd);
ns = current->nsproxy->ipc_ns; ns = current->nsproxy->ipc_ns;
switch(cmd) { switch (cmd) {
case IPC_INFO: case IPC_INFO:
case SEM_INFO: case SEM_INFO:
case IPC_STAT: case IPC_STAT:
...@@ -1634,7 +1634,7 @@ static struct sem_undo *lookup_undo(struct sem_undo_list *ulp, int semid) ...@@ -1634,7 +1634,7 @@ static struct sem_undo *lookup_undo(struct sem_undo_list *ulp, int semid)
{ {
struct sem_undo *un; struct sem_undo *un;
assert_spin_locked(&ulp->lock); assert_spin_locked(&ulp->lock);
un = __lookup_undo(ulp, semid); un = __lookup_undo(ulp, semid);
if (un) { if (un) {
...@@ -1645,7 +1645,7 @@ static struct sem_undo *lookup_undo(struct sem_undo_list *ulp, int semid) ...@@ -1645,7 +1645,7 @@ static struct sem_undo *lookup_undo(struct sem_undo_list *ulp, int semid)
} }
/** /**
* find_alloc_undo - Lookup (and if not present create) undo array * find_alloc_undo - lookup (and if not present create) undo array
* @ns: namespace * @ns: namespace
* @semid: semaphore array id * @semid: semaphore array id
* *
...@@ -1670,7 +1670,7 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid) ...@@ -1670,7 +1670,7 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
spin_lock(&ulp->lock); spin_lock(&ulp->lock);
un = lookup_undo(ulp, semid); un = lookup_undo(ulp, semid);
spin_unlock(&ulp->lock); spin_unlock(&ulp->lock);
if (likely(un!=NULL)) if (likely(un != NULL))
goto out; goto out;
/* no undo structure around - allocate one. */ /* no undo structure around - allocate one. */
...@@ -1699,7 +1699,7 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid) ...@@ -1699,7 +1699,7 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
/* step 3: Acquire the lock on semaphore array */ /* step 3: Acquire the lock on semaphore array */
rcu_read_lock(); rcu_read_lock();
sem_lock_and_putref(sma); sem_lock_and_putref(sma);
if (sma->sem_perm.deleted) { if (!ipc_valid_object(&sma->sem_perm)) {
sem_unlock(sma, -1); sem_unlock(sma, -1);
rcu_read_unlock(); rcu_read_unlock();
kfree(new); kfree(new);
...@@ -1735,7 +1735,7 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid) ...@@ -1735,7 +1735,7 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
/** /**
* get_queue_result - Retrieve the result code from sem_queue * get_queue_result - retrieve the result code from sem_queue
* @q: Pointer to queue structure * @q: Pointer to queue structure
* *
* Retrieve the return code from the pending queue. If IN_WAKEUP is found in * Retrieve the return code from the pending queue. If IN_WAKEUP is found in
...@@ -1765,7 +1765,7 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops, ...@@ -1765,7 +1765,7 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
int error = -EINVAL; int error = -EINVAL;
struct sem_array *sma; struct sem_array *sma;
struct sembuf fast_sops[SEMOPM_FAST]; struct sembuf fast_sops[SEMOPM_FAST];
struct sembuf* sops = fast_sops, *sop; struct sembuf *sops = fast_sops, *sop;
struct sem_undo *un; struct sem_undo *un;
int undos = 0, alter = 0, max, locknum; int undos = 0, alter = 0, max, locknum;
struct sem_queue queue; struct sem_queue queue;
...@@ -1779,13 +1779,13 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops, ...@@ -1779,13 +1779,13 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
return -EINVAL; return -EINVAL;
if (nsops > ns->sc_semopm) if (nsops > ns->sc_semopm)
return -E2BIG; return -E2BIG;
if(nsops > SEMOPM_FAST) { if (nsops > SEMOPM_FAST) {
sops = kmalloc(sizeof(*sops)*nsops,GFP_KERNEL); sops = kmalloc(sizeof(*sops)*nsops, GFP_KERNEL);
if(sops==NULL) if (sops == NULL)
return -ENOMEM; return -ENOMEM;
} }
if (copy_from_user (sops, tsops, nsops * sizeof(*tsops))) { if (copy_from_user(sops, tsops, nsops * sizeof(*tsops))) {
error=-EFAULT; error = -EFAULT;
goto out_free; goto out_free;
} }
if (timeout) { if (timeout) {
...@@ -1846,7 +1846,15 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops, ...@@ -1846,7 +1846,15 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
error = -EIDRM; error = -EIDRM;
locknum = sem_lock(sma, sops, nsops); locknum = sem_lock(sma, sops, nsops);
if (sma->sem_perm.deleted) /*
* We eventually might perform the following check in a lockless
* fashion, considering ipc_valid_object() locking constraints.
* If nsops == 1 and there is no contention for sem_perm.lock, then
* only a per-semaphore lock is held and it's OK to proceed with the
* check below. More details on the fine grained locking scheme
* entangled here and why it's RMID race safe on comments at sem_lock()
*/
if (!ipc_valid_object(&sma->sem_perm))
goto out_unlock_free; goto out_unlock_free;
/* /*
* semid identifiers are not unique - find_alloc_undo may have * semid identifiers are not unique - find_alloc_undo may have
...@@ -1959,10 +1967,8 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops, ...@@ -1959,10 +1967,8 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
* If queue.status != -EINTR we are woken up by another process. * If queue.status != -EINTR we are woken up by another process.
* Leave without unlink_queue(), but with sem_unlock(). * Leave without unlink_queue(), but with sem_unlock().
*/ */
if (error != -EINTR)
if (error != -EINTR) {
goto out_unlock_free; goto out_unlock_free;
}
/* /*
* If an interrupt occurred we have to clean up the queue * If an interrupt occurred we have to clean up the queue
...@@ -1984,7 +1990,7 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops, ...@@ -1984,7 +1990,7 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
rcu_read_unlock(); rcu_read_unlock();
wake_up_sem_queue_do(&tasks); wake_up_sem_queue_do(&tasks);
out_free: out_free:
if(sops != fast_sops) if (sops != fast_sops)
kfree(sops); kfree(sops);
return error; return error;
} }
...@@ -2068,7 +2074,7 @@ void exit_sem(struct task_struct *tsk) ...@@ -2068,7 +2074,7 @@ void exit_sem(struct task_struct *tsk)
sem_lock(sma, NULL, -1); sem_lock(sma, NULL, -1);
/* exit_sem raced with IPC_RMID, nothing to do */ /* exit_sem raced with IPC_RMID, nothing to do */
if (sma->sem_perm.deleted) { if (!ipc_valid_object(&sma->sem_perm)) {
sem_unlock(sma, -1); sem_unlock(sma, -1);
rcu_read_unlock(); rcu_read_unlock();
continue; continue;
...@@ -2093,7 +2099,7 @@ void exit_sem(struct task_struct *tsk) ...@@ -2093,7 +2099,7 @@ void exit_sem(struct task_struct *tsk)
/* perform adjustments registered in un */ /* perform adjustments registered in un */
for (i = 0; i < sma->sem_nsems; i++) { for (i = 0; i < sma->sem_nsems; i++) {
struct sem * semaphore = &sma->sem_base[i]; struct sem *semaphore = &sma->sem_base[i];
if (un->semadj[i]) { if (un->semadj[i]) {
semaphore->semval += un->semadj[i]; semaphore->semval += un->semadj[i];
/* /*
...@@ -2107,7 +2113,7 @@ void exit_sem(struct task_struct *tsk) ...@@ -2107,7 +2113,7 @@ void exit_sem(struct task_struct *tsk)
* Linux caps the semaphore value, both at 0 * Linux caps the semaphore value, both at 0
* and at SEMVMX. * and at SEMVMX.
* *
* Manfred <manfred@colorfullife.com> * Manfred <manfred@colorfullife.com>
*/ */
if (semaphore->semval < 0) if (semaphore->semval < 0)
semaphore->semval = 0; semaphore->semval = 0;
......
...@@ -67,7 +67,7 @@ static const struct vm_operations_struct shm_vm_ops; ...@@ -67,7 +67,7 @@ static const struct vm_operations_struct shm_vm_ops;
static int newseg(struct ipc_namespace *, struct ipc_params *); static int newseg(struct ipc_namespace *, struct ipc_params *);
static void shm_open(struct vm_area_struct *vma); static void shm_open(struct vm_area_struct *vma);
static void shm_close(struct vm_area_struct *vma); static void shm_close(struct vm_area_struct *vma);
static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp); static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp);
#ifdef CONFIG_PROC_FS #ifdef CONFIG_PROC_FS
static int sysvipc_shm_proc_show(struct seq_file *s, void *it); static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
#endif #endif
...@@ -91,7 +91,7 @@ static void do_shm_rmid(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp) ...@@ -91,7 +91,7 @@ static void do_shm_rmid(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
struct shmid_kernel *shp; struct shmid_kernel *shp;
shp = container_of(ipcp, struct shmid_kernel, shm_perm); shp = container_of(ipcp, struct shmid_kernel, shm_perm);
if (shp->shm_nattch){ if (shp->shm_nattch) {
shp->shm_perm.mode |= SHM_DEST; shp->shm_perm.mode |= SHM_DEST;
/* Do not find it any more */ /* Do not find it any more */
shp->shm_perm.key = IPC_PRIVATE; shp->shm_perm.key = IPC_PRIVATE;
...@@ -116,7 +116,7 @@ static int __init ipc_ns_init(void) ...@@ -116,7 +116,7 @@ static int __init ipc_ns_init(void)
pure_initcall(ipc_ns_init); pure_initcall(ipc_ns_init);
void __init shm_init (void) void __init shm_init(void)
{ {
ipc_init_proc_interface("sysvipc/shm", ipc_init_proc_interface("sysvipc/shm",
#if BITS_PER_LONG <= 32 #if BITS_PER_LONG <= 32
...@@ -248,7 +248,7 @@ static bool shm_may_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp) ...@@ -248,7 +248,7 @@ static bool shm_may_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
*/ */
static void shm_close(struct vm_area_struct *vma) static void shm_close(struct vm_area_struct *vma)
{ {
struct file * file = vma->vm_file; struct file *file = vma->vm_file;
struct shm_file_data *sfd = shm_file_data(file); struct shm_file_data *sfd = shm_file_data(file);
struct shmid_kernel *shp; struct shmid_kernel *shp;
struct ipc_namespace *ns = sfd->ns; struct ipc_namespace *ns = sfd->ns;
...@@ -379,7 +379,7 @@ static struct mempolicy *shm_get_policy(struct vm_area_struct *vma, ...@@ -379,7 +379,7 @@ static struct mempolicy *shm_get_policy(struct vm_area_struct *vma,
} }
#endif #endif
static int shm_mmap(struct file * file, struct vm_area_struct * vma) static int shm_mmap(struct file *file, struct vm_area_struct *vma)
{ {
struct shm_file_data *sfd = shm_file_data(file); struct shm_file_data *sfd = shm_file_data(file);
int ret; int ret;
...@@ -477,7 +477,6 @@ static const struct vm_operations_struct shm_vm_ops = { ...@@ -477,7 +477,6 @@ static const struct vm_operations_struct shm_vm_ops = {
* *
* Called with shm_ids.rwsem held as a writer. * Called with shm_ids.rwsem held as a writer.
*/ */
static int newseg(struct ipc_namespace *ns, struct ipc_params *params) static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
{ {
key_t key = params->key; key_t key = params->key;
...@@ -486,7 +485,7 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params) ...@@ -486,7 +485,7 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
int error; int error;
struct shmid_kernel *shp; struct shmid_kernel *shp;
size_t numpages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; size_t numpages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
struct file * file; struct file *file;
char name[13]; char name[13];
int id; int id;
vm_flags_t acctflag = 0; vm_flags_t acctflag = 0;
...@@ -512,7 +511,7 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params) ...@@ -512,7 +511,7 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
return error; return error;
} }
sprintf (name, "SYSV%08x", key); sprintf(name, "SYSV%08x", key);
if (shmflg & SHM_HUGETLB) { if (shmflg & SHM_HUGETLB) {
struct hstate *hs; struct hstate *hs;
size_t hugesize; size_t hugesize;
...@@ -533,7 +532,7 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params) ...@@ -533,7 +532,7 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
} else { } else {
/* /*
* Do not allow no accounting for OVERCOMMIT_NEVER, even * Do not allow no accounting for OVERCOMMIT_NEVER, even
* if it's asked for. * if it's asked for.
*/ */
if ((shmflg & SHM_NORESERVE) && if ((shmflg & SHM_NORESERVE) &&
sysctl_overcommit_memory != OVERCOMMIT_NEVER) sysctl_overcommit_memory != OVERCOMMIT_NEVER)
...@@ -628,7 +627,7 @@ SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg) ...@@ -628,7 +627,7 @@ SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
static inline unsigned long copy_shmid_to_user(void __user *buf, struct shmid64_ds *in, int version) static inline unsigned long copy_shmid_to_user(void __user *buf, struct shmid64_ds *in, int version)
{ {
switch(version) { switch (version) {
case IPC_64: case IPC_64:
return copy_to_user(buf, in, sizeof(*in)); return copy_to_user(buf, in, sizeof(*in));
case IPC_OLD: case IPC_OLD:
...@@ -655,7 +654,7 @@ static inline unsigned long copy_shmid_to_user(void __user *buf, struct shmid64_ ...@@ -655,7 +654,7 @@ static inline unsigned long copy_shmid_to_user(void __user *buf, struct shmid64_
static inline unsigned long static inline unsigned long
copy_shmid_from_user(struct shmid64_ds *out, void __user *buf, int version) copy_shmid_from_user(struct shmid64_ds *out, void __user *buf, int version)
{ {
switch(version) { switch (version) {
case IPC_64: case IPC_64:
if (copy_from_user(out, buf, sizeof(*out))) if (copy_from_user(out, buf, sizeof(*out)))
return -EFAULT; return -EFAULT;
...@@ -680,14 +679,14 @@ copy_shmid_from_user(struct shmid64_ds *out, void __user *buf, int version) ...@@ -680,14 +679,14 @@ copy_shmid_from_user(struct shmid64_ds *out, void __user *buf, int version)
static inline unsigned long copy_shminfo_to_user(void __user *buf, struct shminfo64 *in, int version) static inline unsigned long copy_shminfo_to_user(void __user *buf, struct shminfo64 *in, int version)
{ {
switch(version) { switch (version) {
case IPC_64: case IPC_64:
return copy_to_user(buf, in, sizeof(*in)); return copy_to_user(buf, in, sizeof(*in));
case IPC_OLD: case IPC_OLD:
{ {
struct shminfo out; struct shminfo out;
if(in->shmmax > INT_MAX) if (in->shmmax > INT_MAX)
out.shmmax = INT_MAX; out.shmmax = INT_MAX;
else else
out.shmmax = (int)in->shmmax; out.shmmax = (int)in->shmmax;
...@@ -846,14 +845,14 @@ static int shmctl_nolock(struct ipc_namespace *ns, int shmid, ...@@ -846,14 +845,14 @@ static int shmctl_nolock(struct ipc_namespace *ns, int shmid,
shminfo.shmall = ns->shm_ctlall; shminfo.shmall = ns->shm_ctlall;
shminfo.shmmin = SHMMIN; shminfo.shmmin = SHMMIN;
if(copy_shminfo_to_user (buf, &shminfo, version)) if (copy_shminfo_to_user(buf, &shminfo, version))
return -EFAULT; return -EFAULT;
down_read(&shm_ids(ns).rwsem); down_read(&shm_ids(ns).rwsem);
err = ipc_get_maxid(&shm_ids(ns)); err = ipc_get_maxid(&shm_ids(ns));
up_read(&shm_ids(ns).rwsem); up_read(&shm_ids(ns).rwsem);
if(err<0) if (err < 0)
err = 0; err = 0;
goto out; goto out;
} }
...@@ -864,7 +863,7 @@ static int shmctl_nolock(struct ipc_namespace *ns, int shmid, ...@@ -864,7 +863,7 @@ static int shmctl_nolock(struct ipc_namespace *ns, int shmid,
memset(&shm_info, 0, sizeof(shm_info)); memset(&shm_info, 0, sizeof(shm_info));
down_read(&shm_ids(ns).rwsem); down_read(&shm_ids(ns).rwsem);
shm_info.used_ids = shm_ids(ns).in_use; shm_info.used_ids = shm_ids(ns).in_use;
shm_get_stat (ns, &shm_info.shm_rss, &shm_info.shm_swp); shm_get_stat(ns, &shm_info.shm_rss, &shm_info.shm_swp);
shm_info.shm_tot = ns->shm_tot; shm_info.shm_tot = ns->shm_tot;
shm_info.swap_attempts = 0; shm_info.swap_attempts = 0;
shm_info.swap_successes = 0; shm_info.swap_successes = 0;
...@@ -975,6 +974,13 @@ SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf) ...@@ -975,6 +974,13 @@ SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
goto out_unlock1; goto out_unlock1;
ipc_lock_object(&shp->shm_perm); ipc_lock_object(&shp->shm_perm);
/* check if shm_destroy() is tearing down shp */
if (!ipc_valid_object(&shp->shm_perm)) {
err = -EIDRM;
goto out_unlock0;
}
if (!ns_capable(ns->user_ns, CAP_IPC_LOCK)) { if (!ns_capable(ns->user_ns, CAP_IPC_LOCK)) {
kuid_t euid = current_euid(); kuid_t euid = current_euid();
if (!uid_eq(euid, shp->shm_perm.uid) && if (!uid_eq(euid, shp->shm_perm.uid) &&
...@@ -989,13 +995,6 @@ SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf) ...@@ -989,13 +995,6 @@ SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
} }
shm_file = shp->shm_file; shm_file = shp->shm_file;
/* check if shm_destroy() is tearing down shp */
if (shm_file == NULL) {
err = -EIDRM;
goto out_unlock0;
}
if (is_file_hugepages(shm_file)) if (is_file_hugepages(shm_file))
goto out_unlock0; goto out_unlock0;
...@@ -1047,7 +1046,7 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr, ...@@ -1047,7 +1046,7 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
struct shmid_kernel *shp; struct shmid_kernel *shp;
unsigned long addr; unsigned long addr;
unsigned long size; unsigned long size;
struct file * file; struct file *file;
int err; int err;
unsigned long flags; unsigned long flags;
unsigned long prot; unsigned long prot;
...@@ -1116,7 +1115,7 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr, ...@@ -1116,7 +1115,7 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
ipc_lock_object(&shp->shm_perm); ipc_lock_object(&shp->shm_perm);
/* check if shm_destroy() is tearing down shp */ /* check if shm_destroy() is tearing down shp */
if (shp->shm_file == NULL) { if (!ipc_valid_object(&shp->shm_perm)) {
ipc_unlock_object(&shp->shm_perm); ipc_unlock_object(&shp->shm_perm);
err = -EIDRM; err = -EIDRM;
goto out_unlock; goto out_unlock;
......
...@@ -110,15 +110,15 @@ static struct notifier_block ipc_memory_nb = { ...@@ -110,15 +110,15 @@ static struct notifier_block ipc_memory_nb = {
}; };
/** /**
* ipc_init - initialise IPC subsystem * ipc_init - initialise ipc subsystem
* *
* The various system5 IPC resources (semaphores, messages and shared * The various sysv ipc resources (semaphores, messages and shared
* memory) are initialised * memory) are initialised.
* A callback routine is registered into the memory hotplug notifier *
* chain: since msgmni scales to lowmem this callback routine will be * A callback routine is registered into the memory hotplug notifier
* called upon successful memory add / remove to recompute msmgni. * chain: since msgmni scales to lowmem this callback routine will be
* called upon successful memory add / remove to recompute msmgni.
*/ */
static int __init ipc_init(void) static int __init ipc_init(void)
{ {
sem_init(); sem_init();
...@@ -131,39 +131,29 @@ static int __init ipc_init(void) ...@@ -131,39 +131,29 @@ static int __init ipc_init(void)
__initcall(ipc_init); __initcall(ipc_init);
/** /**
* ipc_init_ids - initialise IPC identifiers * ipc_init_ids - initialise ipc identifiers
* @ids: Identifier set * @ids: ipc identifier set
* *
* Set up the sequence range to use for the ipc identifier range (limited * Set up the sequence range to use for the ipc identifier range (limited
* below IPCMNI) then initialise the ids idr. * below IPCMNI) then initialise the ids idr.
*/ */
void ipc_init_ids(struct ipc_ids *ids) void ipc_init_ids(struct ipc_ids *ids)
{ {
init_rwsem(&ids->rwsem);
ids->in_use = 0; ids->in_use = 0;
ids->seq = 0; ids->seq = 0;
ids->next_id = -1; ids->next_id = -1;
{ init_rwsem(&ids->rwsem);
int seq_limit = INT_MAX/SEQ_MULTIPLIER;
if (seq_limit > USHRT_MAX)
ids->seq_max = USHRT_MAX;
else
ids->seq_max = seq_limit;
}
idr_init(&ids->ipcs_idr); idr_init(&ids->ipcs_idr);
} }
#ifdef CONFIG_PROC_FS #ifdef CONFIG_PROC_FS
static const struct file_operations sysvipc_proc_fops; static const struct file_operations sysvipc_proc_fops;
/** /**
* ipc_init_proc_interface - Create a proc interface for sysipc types using a seq_file interface. * ipc_init_proc_interface - create a proc interface for sysipc types using a seq_file interface.
* @path: Path in procfs * @path: Path in procfs
* @header: Banner to be printed at the beginning of the file. * @header: Banner to be printed at the beginning of the file.
* @ids: ipc id table to iterate. * @ids: ipc id table to iterate.
* @show: show routine. * @show: show routine.
*/ */
void __init ipc_init_proc_interface(const char *path, const char *header, void __init ipc_init_proc_interface(const char *path, const char *header,
int ids, int (*show)(struct seq_file *, void *)) int ids, int (*show)(struct seq_file *, void *))
...@@ -184,23 +174,21 @@ void __init ipc_init_proc_interface(const char *path, const char *header, ...@@ -184,23 +174,21 @@ void __init ipc_init_proc_interface(const char *path, const char *header,
NULL, /* parent dir */ NULL, /* parent dir */
&sysvipc_proc_fops, &sysvipc_proc_fops,
iface); iface);
if (!pde) { if (!pde)
kfree(iface); kfree(iface);
}
} }
#endif #endif
/** /**
* ipc_findkey - find a key in an ipc identifier set * ipc_findkey - find a key in an ipc identifier set
* @ids: Identifier set * @ids: ipc identifier set
* @key: The key to find * @key: key to find
* *
* Requires ipc_ids.rwsem locked. * Returns the locked pointer to the ipc structure if found or NULL
* Returns the LOCKED pointer to the ipc structure if found or NULL * otherwise. If key is found ipc points to the owning ipc structure
* if not. *
* If key is found ipc points to the owning ipc structure * Called with ipc_ids.rwsem held.
*/ */
static struct kern_ipc_perm *ipc_findkey(struct ipc_ids *ids, key_t key) static struct kern_ipc_perm *ipc_findkey(struct ipc_ids *ids, key_t key)
{ {
struct kern_ipc_perm *ipc; struct kern_ipc_perm *ipc;
...@@ -227,12 +215,11 @@ static struct kern_ipc_perm *ipc_findkey(struct ipc_ids *ids, key_t key) ...@@ -227,12 +215,11 @@ static struct kern_ipc_perm *ipc_findkey(struct ipc_ids *ids, key_t key)
} }
/** /**
* ipc_get_maxid - get the last assigned id * ipc_get_maxid - get the last assigned id
* @ids: IPC identifier set * @ids: ipc identifier set
* *
* Called with ipc_ids.rwsem held. * Called with ipc_ids.rwsem held.
*/ */
int ipc_get_maxid(struct ipc_ids *ids) int ipc_get_maxid(struct ipc_ids *ids)
{ {
struct kern_ipc_perm *ipc; struct kern_ipc_perm *ipc;
...@@ -258,19 +245,19 @@ int ipc_get_maxid(struct ipc_ids *ids) ...@@ -258,19 +245,19 @@ int ipc_get_maxid(struct ipc_ids *ids)
} }
/** /**
* ipc_addid - add an IPC identifier * ipc_addid - add an ipc identifier
* @ids: IPC identifier set * @ids: ipc identifier set
* @new: new IPC permission set * @new: new ipc permission set
* @size: limit for the number of used ids * @size: limit for the number of used ids
* *
* Add an entry 'new' to the IPC ids idr. The permissions object is * Add an entry 'new' to the ipc ids idr. The permissions object is
* initialised and the first free entry is set up and the id assigned * initialised and the first free entry is set up and the id assigned
* is returned. The 'new' entry is returned in a locked state on success. * is returned. The 'new' entry is returned in a locked state on success.
* On failure the entry is not locked and a negative err-code is returned. * On failure the entry is not locked and a negative err-code is returned.
* *
* Called with writer ipc_ids.rwsem held. * Called with writer ipc_ids.rwsem held.
*/ */
int ipc_addid(struct ipc_ids* ids, struct kern_ipc_perm* new, int size) int ipc_addid(struct ipc_ids *ids, struct kern_ipc_perm *new, int size)
{ {
kuid_t euid; kuid_t euid;
kgid_t egid; kgid_t egid;
...@@ -286,7 +273,7 @@ int ipc_addid(struct ipc_ids* ids, struct kern_ipc_perm* new, int size) ...@@ -286,7 +273,7 @@ int ipc_addid(struct ipc_ids* ids, struct kern_ipc_perm* new, int size)
idr_preload(GFP_KERNEL); idr_preload(GFP_KERNEL);
spin_lock_init(&new->lock); spin_lock_init(&new->lock);
new->deleted = 0; new->deleted = false;
rcu_read_lock(); rcu_read_lock();
spin_lock(&new->lock); spin_lock(&new->lock);
...@@ -308,7 +295,7 @@ int ipc_addid(struct ipc_ids* ids, struct kern_ipc_perm* new, int size) ...@@ -308,7 +295,7 @@ int ipc_addid(struct ipc_ids* ids, struct kern_ipc_perm* new, int size)
if (next_id < 0) { if (next_id < 0) {
new->seq = ids->seq++; new->seq = ids->seq++;
if (ids->seq > ids->seq_max) if (ids->seq > IPCID_SEQ_MAX)
ids->seq = 0; ids->seq = 0;
} else { } else {
new->seq = ipcid_to_seqx(next_id); new->seq = ipcid_to_seqx(next_id);
...@@ -320,14 +307,14 @@ int ipc_addid(struct ipc_ids* ids, struct kern_ipc_perm* new, int size) ...@@ -320,14 +307,14 @@ int ipc_addid(struct ipc_ids* ids, struct kern_ipc_perm* new, int size)
} }
/** /**
* ipcget_new - create a new ipc object * ipcget_new - create a new ipc object
* @ns: namespace * @ns: ipc namespace
* @ids: IPC identifer set * @ids: ipc identifer set
* @ops: the actual creation routine to call * @ops: the actual creation routine to call
* @params: its parameters * @params: its parameters
* *
* This routine is called by sys_msgget, sys_semget() and sys_shmget() * This routine is called by sys_msgget, sys_semget() and sys_shmget()
* when the key is IPC_PRIVATE. * when the key is IPC_PRIVATE.
*/ */
static int ipcget_new(struct ipc_namespace *ns, struct ipc_ids *ids, static int ipcget_new(struct ipc_namespace *ns, struct ipc_ids *ids,
struct ipc_ops *ops, struct ipc_params *params) struct ipc_ops *ops, struct ipc_params *params)
...@@ -341,19 +328,19 @@ static int ipcget_new(struct ipc_namespace *ns, struct ipc_ids *ids, ...@@ -341,19 +328,19 @@ static int ipcget_new(struct ipc_namespace *ns, struct ipc_ids *ids,
} }
/** /**
* ipc_check_perms - check security and permissions for an IPC * ipc_check_perms - check security and permissions for an ipc object
* @ns: IPC namespace * @ns: ipc namespace
* @ipcp: ipc permission set * @ipcp: ipc permission set
* @ops: the actual security routine to call * @ops: the actual security routine to call
* @params: its parameters * @params: its parameters
* *
* This routine is called by sys_msgget(), sys_semget() and sys_shmget() * This routine is called by sys_msgget(), sys_semget() and sys_shmget()
* when the key is not IPC_PRIVATE and that key already exists in the * when the key is not IPC_PRIVATE and that key already exists in the
* ids IDR. * ds IDR.
* *
* On success, the IPC id is returned. * On success, the ipc id is returned.
* *
* It is called with ipc_ids.rwsem and ipcp->lock held. * It is called with ipc_ids.rwsem and ipcp->lock held.
*/ */
static int ipc_check_perms(struct ipc_namespace *ns, static int ipc_check_perms(struct ipc_namespace *ns,
struct kern_ipc_perm *ipcp, struct kern_ipc_perm *ipcp,
...@@ -374,18 +361,18 @@ static int ipc_check_perms(struct ipc_namespace *ns, ...@@ -374,18 +361,18 @@ static int ipc_check_perms(struct ipc_namespace *ns,
} }
/** /**
* ipcget_public - get an ipc object or create a new one * ipcget_public - get an ipc object or create a new one
* @ns: namespace * @ns: ipc namespace
* @ids: IPC identifer set * @ids: ipc identifer set
* @ops: the actual creation routine to call * @ops: the actual creation routine to call
* @params: its parameters * @params: its parameters
* *
* This routine is called by sys_msgget, sys_semget() and sys_shmget() * This routine is called by sys_msgget, sys_semget() and sys_shmget()
* when the key is not IPC_PRIVATE. * when the key is not IPC_PRIVATE.
* It adds a new entry if the key is not found and does some permission * It adds a new entry if the key is not found and does some permission
* / security checkings if the key is found. * / security checkings if the key is found.
* *
* On success, the ipc id is returned. * On success, the ipc id is returned.
*/ */
static int ipcget_public(struct ipc_namespace *ns, struct ipc_ids *ids, static int ipcget_public(struct ipc_namespace *ns, struct ipc_ids *ids,
struct ipc_ops *ops, struct ipc_params *params) struct ipc_ops *ops, struct ipc_params *params)
...@@ -431,39 +418,33 @@ static int ipcget_public(struct ipc_namespace *ns, struct ipc_ids *ids, ...@@ -431,39 +418,33 @@ static int ipcget_public(struct ipc_namespace *ns, struct ipc_ids *ids,
/** /**
* ipc_rmid - remove an IPC identifier * ipc_rmid - remove an ipc identifier
* @ids: IPC identifier set * @ids: ipc identifier set
* @ipcp: ipc perm structure containing the identifier to remove * @ipcp: ipc perm structure containing the identifier to remove
* *
* ipc_ids.rwsem (as a writer) and the spinlock for this ID are held * ipc_ids.rwsem (as a writer) and the spinlock for this ID are held
* before this function is called, and remain locked on the exit. * before this function is called, and remain locked on the exit.
*/ */
void ipc_rmid(struct ipc_ids *ids, struct kern_ipc_perm *ipcp) void ipc_rmid(struct ipc_ids *ids, struct kern_ipc_perm *ipcp)
{ {
int lid = ipcid_to_idx(ipcp->id); int lid = ipcid_to_idx(ipcp->id);
idr_remove(&ids->ipcs_idr, lid); idr_remove(&ids->ipcs_idr, lid);
ids->in_use--; ids->in_use--;
ipcp->deleted = true;
ipcp->deleted = 1;
return;
} }
/** /**
* ipc_alloc - allocate ipc space * ipc_alloc - allocate ipc space
* @size: size desired * @size: size desired
* *
* Allocate memory from the appropriate pools and return a pointer to it. * Allocate memory from the appropriate pools and return a pointer to it.
* NULL is returned if the allocation fails * NULL is returned if the allocation fails
*/ */
void *ipc_alloc(int size) void *ipc_alloc(int size)
{ {
void *out; void *out;
if(size > PAGE_SIZE) if (size > PAGE_SIZE)
out = vmalloc(size); out = vmalloc(size);
else else
out = kmalloc(size, GFP_KERNEL); out = kmalloc(size, GFP_KERNEL);
...@@ -471,28 +452,27 @@ void *ipc_alloc(int size) ...@@ -471,28 +452,27 @@ void *ipc_alloc(int size)
} }
/** /**
* ipc_free - free ipc space * ipc_free - free ipc space
* @ptr: pointer returned by ipc_alloc * @ptr: pointer returned by ipc_alloc
* @size: size of block * @size: size of block
* *
* Free a block created with ipc_alloc(). The caller must know the size * Free a block created with ipc_alloc(). The caller must know the size
* used in the allocation call. * used in the allocation call.
*/ */
void ipc_free(void *ptr, int size)
void ipc_free(void* ptr, int size)
{ {
if(size > PAGE_SIZE) if (size > PAGE_SIZE)
vfree(ptr); vfree(ptr);
else else
kfree(ptr); kfree(ptr);
} }
/** /**
* ipc_rcu_alloc - allocate ipc and rcu space * ipc_rcu_alloc - allocate ipc and rcu space
* @size: size desired * @size: size desired
* *
* Allocate memory for the rcu header structure + the object. * Allocate memory for the rcu header structure + the object.
* Returns the pointer to the object or NULL upon failure. * Returns the pointer to the object or NULL upon failure.
*/ */
void *ipc_rcu_alloc(int size) void *ipc_rcu_alloc(int size)
{ {
...@@ -534,17 +514,16 @@ void ipc_rcu_free(struct rcu_head *head) ...@@ -534,17 +514,16 @@ void ipc_rcu_free(struct rcu_head *head)
} }
/** /**
* ipcperms - check IPC permissions * ipcperms - check ipc permissions
* @ns: IPC namespace * @ns: ipc namespace
* @ipcp: IPC permission set * @ipcp: ipc permission set
* @flag: desired permission set. * @flag: desired permission set
* *
* Check user, group, other permissions for access * Check user, group, other permissions for access
* to ipc resources. return 0 if allowed * to ipc resources. return 0 if allowed
* *
* @flag will most probably be 0 or S_...UGO from <linux/stat.h> * @flag will most probably be 0 or S_...UGO from <linux/stat.h>
*/ */
int ipcperms(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, short flag) int ipcperms(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, short flag)
{ {
kuid_t euid = current_euid(); kuid_t euid = current_euid();
...@@ -572,16 +551,14 @@ int ipcperms(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, short flag) ...@@ -572,16 +551,14 @@ int ipcperms(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, short flag)
*/ */
/** /**
* kernel_to_ipc64_perm - convert kernel ipc permissions to user * kernel_to_ipc64_perm - convert kernel ipc permissions to user
* @in: kernel permissions * @in: kernel permissions
* @out: new style IPC permissions * @out: new style ipc permissions
* *
* Turn the kernel object @in into a set of permissions descriptions * Turn the kernel object @in into a set of permissions descriptions
* for returning to userspace (@out). * for returning to userspace (@out).
*/ */
void kernel_to_ipc64_perm(struct kern_ipc_perm *in, struct ipc64_perm *out)
void kernel_to_ipc64_perm (struct kern_ipc_perm *in, struct ipc64_perm *out)
{ {
out->key = in->key; out->key = in->key;
out->uid = from_kuid_munged(current_user_ns(), in->uid); out->uid = from_kuid_munged(current_user_ns(), in->uid);
...@@ -593,15 +570,14 @@ void kernel_to_ipc64_perm (struct kern_ipc_perm *in, struct ipc64_perm *out) ...@@ -593,15 +570,14 @@ void kernel_to_ipc64_perm (struct kern_ipc_perm *in, struct ipc64_perm *out)
} }
/** /**
* ipc64_perm_to_ipc_perm - convert new ipc permissions to old * ipc64_perm_to_ipc_perm - convert new ipc permissions to old
* @in: new style IPC permissions * @in: new style ipc permissions
* @out: old style IPC permissions * @out: old style ipc permissions
* *
* Turn the new style permissions object @in into a compatibility * Turn the new style permissions object @in into a compatibility
* object and store it into the @out pointer. * object and store it into the @out pointer.
*/ */
void ipc64_perm_to_ipc_perm(struct ipc64_perm *in, struct ipc_perm *out)
void ipc64_perm_to_ipc_perm (struct ipc64_perm *in, struct ipc_perm *out)
{ {
out->key = in->key; out->key = in->key;
SET_UID(out->uid, in->uid); SET_UID(out->uid, in->uid);
...@@ -635,8 +611,8 @@ struct kern_ipc_perm *ipc_obtain_object(struct ipc_ids *ids, int id) ...@@ -635,8 +611,8 @@ struct kern_ipc_perm *ipc_obtain_object(struct ipc_ids *ids, int id)
} }
/** /**
* ipc_lock - Lock an ipc structure without rwsem held * ipc_lock - lock an ipc structure without rwsem held
* @ids: IPC identifier set * @ids: ipc identifier set
* @id: ipc id to look for * @id: ipc id to look for
* *
* Look for an id in the ipc ids idr and lock the associated ipc object. * Look for an id in the ipc ids idr and lock the associated ipc object.
...@@ -657,7 +633,7 @@ struct kern_ipc_perm *ipc_lock(struct ipc_ids *ids, int id) ...@@ -657,7 +633,7 @@ struct kern_ipc_perm *ipc_lock(struct ipc_ids *ids, int id)
/* ipc_rmid() may have already freed the ID while ipc_lock /* ipc_rmid() may have already freed the ID while ipc_lock
* was spinning: here verify that the structure is still valid * was spinning: here verify that the structure is still valid
*/ */
if (!out->deleted) if (ipc_valid_object(out))
return out; return out;
spin_unlock(&out->lock); spin_unlock(&out->lock);
...@@ -693,11 +669,11 @@ struct kern_ipc_perm *ipc_obtain_object_check(struct ipc_ids *ids, int id) ...@@ -693,11 +669,11 @@ struct kern_ipc_perm *ipc_obtain_object_check(struct ipc_ids *ids, int id)
/** /**
* ipcget - Common sys_*get() code * ipcget - Common sys_*get() code
* @ns : namsepace * @ns: namsepace
* @ids : IPC identifier set * @ids: ipc identifier set
* @ops : operations to be called on ipc object creation, permission checks * @ops: operations to be called on ipc object creation, permission checks
* and further checks * and further checks
* @params : the parameters needed by the previous operations. * @params: the parameters needed by the previous operations.
* *
* Common routine called by sys_msgget(), sys_semget() and sys_shmget(). * Common routine called by sys_msgget(), sys_semget() and sys_shmget().
*/ */
...@@ -711,7 +687,7 @@ int ipcget(struct ipc_namespace *ns, struct ipc_ids *ids, ...@@ -711,7 +687,7 @@ int ipcget(struct ipc_namespace *ns, struct ipc_ids *ids,
} }
/** /**
* ipc_update_perm - update the permissions of an IPC. * ipc_update_perm - update the permissions of an ipc object
* @in: the permission given as input. * @in: the permission given as input.
* @out: the permission of the ipc to set. * @out: the permission of the ipc to set.
*/ */
...@@ -732,7 +708,7 @@ int ipc_update_perm(struct ipc64_perm *in, struct kern_ipc_perm *out) ...@@ -732,7 +708,7 @@ int ipc_update_perm(struct ipc64_perm *in, struct kern_ipc_perm *out)
/** /**
* ipcctl_pre_down_nolock - retrieve an ipc and check permissions for some IPC_XXX cmd * ipcctl_pre_down_nolock - retrieve an ipc and check permissions for some IPC_XXX cmd
* @ns: the ipc namespace * @ns: ipc namespace
* @ids: the table of ids where to look for the ipc * @ids: the table of ids where to look for the ipc
* @id: the id of the ipc to retrieve * @id: the id of the ipc to retrieve
* @cmd: the cmd to check * @cmd: the cmd to check
...@@ -779,15 +755,14 @@ struct kern_ipc_perm *ipcctl_pre_down_nolock(struct ipc_namespace *ns, ...@@ -779,15 +755,14 @@ struct kern_ipc_perm *ipcctl_pre_down_nolock(struct ipc_namespace *ns,
/** /**
* ipc_parse_version - IPC call version * ipc_parse_version - ipc call version
* @cmd: pointer to command * @cmd: pointer to command
* *
* Return IPC_64 for new style IPC and IPC_OLD for old style IPC. * Return IPC_64 for new style IPC and IPC_OLD for old style IPC.
* The @cmd value is turned from an encoding command and version into * The @cmd value is turned from an encoding command and version into
* just the command code. * just the command code.
*/ */
int ipc_parse_version(int *cmd)
int ipc_parse_version (int *cmd)
{ {
if (*cmd & IPC_64) { if (*cmd & IPC_64) {
*cmd ^= IPC_64; *cmd ^= IPC_64;
...@@ -824,7 +799,7 @@ static struct kern_ipc_perm *sysvipc_find_ipc(struct ipc_ids *ids, loff_t pos, ...@@ -824,7 +799,7 @@ static struct kern_ipc_perm *sysvipc_find_ipc(struct ipc_ids *ids, loff_t pos,
if (total >= ids->in_use) if (total >= ids->in_use)
return NULL; return NULL;
for ( ; pos < IPCMNI; pos++) { for (; pos < IPCMNI; pos++) {
ipc = idr_find(&ids->ipcs_idr, pos); ipc = idr_find(&ids->ipcs_idr, pos);
if (ipc != NULL) { if (ipc != NULL) {
*new_pos = pos + 1; *new_pos = pos + 1;
...@@ -927,8 +902,10 @@ static int sysvipc_proc_open(struct inode *inode, struct file *file) ...@@ -927,8 +902,10 @@ static int sysvipc_proc_open(struct inode *inode, struct file *file)
goto out; goto out;
ret = seq_open(file, &sysvipc_proc_seqops); ret = seq_open(file, &sysvipc_proc_seqops);
if (ret) if (ret) {
goto out_kfree; kfree(iter);
goto out;
}
seq = file->private_data; seq = file->private_data;
seq->private = iter; seq->private = iter;
...@@ -937,9 +914,6 @@ static int sysvipc_proc_open(struct inode *inode, struct file *file) ...@@ -937,9 +914,6 @@ static int sysvipc_proc_open(struct inode *inode, struct file *file)
iter->ns = get_ipc_ns(current->nsproxy->ipc_ns); iter->ns = get_ipc_ns(current->nsproxy->ipc_ns);
out: out:
return ret; return ret;
out_kfree:
kfree(iter);
goto out;
} }
static int sysvipc_proc_release(struct inode *inode, struct file *file) static int sysvipc_proc_release(struct inode *inode, struct file *file)
......
...@@ -15,9 +15,9 @@ ...@@ -15,9 +15,9 @@
#define SEQ_MULTIPLIER (IPCMNI) #define SEQ_MULTIPLIER (IPCMNI)
void sem_init (void); void sem_init(void);
void msg_init (void); void msg_init(void);
void shm_init (void); void shm_init(void);
struct ipc_namespace; struct ipc_namespace;
...@@ -100,6 +100,7 @@ void __init ipc_init_proc_interface(const char *path, const char *header, ...@@ -100,6 +100,7 @@ void __init ipc_init_proc_interface(const char *path, const char *header,
#define ipcid_to_idx(id) ((id) % SEQ_MULTIPLIER) #define ipcid_to_idx(id) ((id) % SEQ_MULTIPLIER)
#define ipcid_to_seqx(id) ((id) / SEQ_MULTIPLIER) #define ipcid_to_seqx(id) ((id) / SEQ_MULTIPLIER)
#define IPCID_SEQ_MAX min_t(int, INT_MAX/SEQ_MULTIPLIER, USHRT_MAX)
/* must be called with ids->rwsem acquired for writing */ /* must be called with ids->rwsem acquired for writing */
int ipc_addid(struct ipc_ids *, struct kern_ipc_perm *, int); int ipc_addid(struct ipc_ids *, struct kern_ipc_perm *, int);
...@@ -116,8 +117,8 @@ int ipcperms(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, short flg); ...@@ -116,8 +117,8 @@ int ipcperms(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, short flg);
/* for rare, potentially huge allocations. /* for rare, potentially huge allocations.
* both function can sleep * both function can sleep
*/ */
void* ipc_alloc(int size); void *ipc_alloc(int size);
void ipc_free(void* ptr, int size); void ipc_free(void *ptr, int size);
/* /*
* For allocation that need to be freed by RCU. * For allocation that need to be freed by RCU.
...@@ -125,7 +126,7 @@ void ipc_free(void* ptr, int size); ...@@ -125,7 +126,7 @@ void ipc_free(void* ptr, int size);
* getref increases the refcount, the putref call that reduces the recount * getref increases the refcount, the putref call that reduces the recount
* to 0 schedules the rcu destruction. Caller must guarantee locking. * to 0 schedules the rcu destruction. Caller must guarantee locking.
*/ */
void* ipc_rcu_alloc(int size); void *ipc_rcu_alloc(int size);
int ipc_rcu_getref(void *ptr); int ipc_rcu_getref(void *ptr);
void ipc_rcu_putref(void *ptr, void (*func)(struct rcu_head *head)); void ipc_rcu_putref(void *ptr, void (*func)(struct rcu_head *head));
void ipc_rcu_free(struct rcu_head *head); void ipc_rcu_free(struct rcu_head *head);
...@@ -144,7 +145,7 @@ struct kern_ipc_perm *ipcctl_pre_down_nolock(struct ipc_namespace *ns, ...@@ -144,7 +145,7 @@ struct kern_ipc_perm *ipcctl_pre_down_nolock(struct ipc_namespace *ns,
/* On IA-64, we always use the "64-bit version" of the IPC structures. */ /* On IA-64, we always use the "64-bit version" of the IPC structures. */
# define ipc_parse_version(cmd) IPC_64 # define ipc_parse_version(cmd) IPC_64
#else #else
int ipc_parse_version (int *cmd); int ipc_parse_version(int *cmd);
#endif #endif
extern void free_msg(struct msg_msg *msg); extern void free_msg(struct msg_msg *msg);
...@@ -185,6 +186,19 @@ static inline void ipc_unlock(struct kern_ipc_perm *perm) ...@@ -185,6 +186,19 @@ static inline void ipc_unlock(struct kern_ipc_perm *perm)
rcu_read_unlock(); rcu_read_unlock();
} }
/*
* ipc_valid_object() - helper to sort out IPC_RMID races for codepaths
* where the respective ipc_ids.rwsem is not being held down.
* Checks whether the ipc object is still around or if it's gone already, as
* ipc_rmid() may have already freed the ID while the ipc lock was spinning.
* Needs to be called with kern_ipc_perm.lock held -- exception made for one
* checkpoint case at sys_semtimedop() as noted in code commentary.
*/
static inline bool ipc_valid_object(struct kern_ipc_perm *perm)
{
return !perm->deleted;
}
struct kern_ipc_perm *ipc_obtain_object_check(struct ipc_ids *ids, int id); struct kern_ipc_perm *ipc_obtain_object_check(struct ipc_ids *ids, int id);
int ipcget(struct ipc_namespace *ns, struct ipc_ids *ids, int ipcget(struct ipc_namespace *ns, struct ipc_ids *ids,
struct ipc_ops *ops, struct ipc_params *params); struct ipc_ops *ops, struct ipc_params *params);
......
...@@ -1537,7 +1537,7 @@ void vmcoreinfo_append_str(const char *fmt, ...) ...@@ -1537,7 +1537,7 @@ void vmcoreinfo_append_str(const char *fmt, ...)
size_t r; size_t r;
va_start(args, fmt); va_start(args, fmt);
r = vsnprintf(buf, sizeof(buf), fmt, args); r = vscnprintf(buf, sizeof(buf), fmt, args);
va_end(args); va_end(args);
r = min(r, vmcoreinfo_max_size - vmcoreinfo_size); r = min(r, vmcoreinfo_max_size - vmcoreinfo_size);
......
...@@ -8,6 +8,8 @@ ...@@ -8,6 +8,8 @@
* Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903) * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
*/ */
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/export.h> #include <linux/export.h>
#include <linux/kernel_stat.h> #include <linux/kernel_stat.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
...@@ -54,7 +56,7 @@ static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp ...@@ -54,7 +56,7 @@ static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp
DEFINE_PER_CPU(struct task_struct *, ksoftirqd); DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
char *softirq_to_name[NR_SOFTIRQS] = { const char * const softirq_to_name[NR_SOFTIRQS] = {
"HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL", "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
"TASKLET", "SCHED", "HRTIMER", "RCU" "TASKLET", "SCHED", "HRTIMER", "RCU"
}; };
...@@ -136,7 +138,6 @@ void _local_bh_enable(void) ...@@ -136,7 +138,6 @@ void _local_bh_enable(void)
WARN_ON_ONCE(in_irq()); WARN_ON_ONCE(in_irq());
__local_bh_enable(SOFTIRQ_DISABLE_OFFSET); __local_bh_enable(SOFTIRQ_DISABLE_OFFSET);
} }
EXPORT_SYMBOL(_local_bh_enable); EXPORT_SYMBOL(_local_bh_enable);
void __local_bh_enable_ip(unsigned long ip, unsigned int cnt) void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
...@@ -153,7 +154,7 @@ void __local_bh_enable_ip(unsigned long ip, unsigned int cnt) ...@@ -153,7 +154,7 @@ void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
/* /*
* Keep preemption disabled until we are done with * Keep preemption disabled until we are done with
* softirq processing: * softirq processing:
*/ */
preempt_count_sub(cnt - 1); preempt_count_sub(cnt - 1);
if (unlikely(!in_interrupt() && local_softirq_pending())) { if (unlikely(!in_interrupt() && local_softirq_pending())) {
...@@ -229,6 +230,7 @@ asmlinkage void __do_softirq(void) ...@@ -229,6 +230,7 @@ asmlinkage void __do_softirq(void)
struct softirq_action *h; struct softirq_action *h;
bool in_hardirq; bool in_hardirq;
__u32 pending; __u32 pending;
int softirq_bit;
int cpu; int cpu;
/* /*
...@@ -253,30 +255,30 @@ asmlinkage void __do_softirq(void) ...@@ -253,30 +255,30 @@ asmlinkage void __do_softirq(void)
h = softirq_vec; h = softirq_vec;
do { while ((softirq_bit = ffs(pending))) {
if (pending & 1) { unsigned int vec_nr;
unsigned int vec_nr = h - softirq_vec; int prev_count;
int prev_count = preempt_count();
kstat_incr_softirqs_this_cpu(vec_nr);
trace_softirq_entry(vec_nr);
h->action(h);
trace_softirq_exit(vec_nr);
if (unlikely(prev_count != preempt_count())) {
printk(KERN_ERR "huh, entered softirq %u %s %p"
"with preempt_count %08x,"
" exited with %08x?\n", vec_nr,
softirq_to_name[vec_nr], h->action,
prev_count, preempt_count());
preempt_count_set(prev_count);
}
rcu_bh_qs(cpu); h += softirq_bit - 1;
vec_nr = h - softirq_vec;
prev_count = preempt_count();
kstat_incr_softirqs_this_cpu(vec_nr);
trace_softirq_entry(vec_nr);
h->action(h);
trace_softirq_exit(vec_nr);
if (unlikely(prev_count != preempt_count())) {
pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n",
vec_nr, softirq_to_name[vec_nr], h->action,
prev_count, preempt_count());
preempt_count_set(prev_count);
} }
rcu_bh_qs(cpu);
h++; h++;
pending >>= 1; pending >>= softirq_bit;
} while (pending); }
local_irq_disable(); local_irq_disable();
...@@ -433,8 +435,7 @@ void open_softirq(int nr, void (*action)(struct softirq_action *)) ...@@ -433,8 +435,7 @@ void open_softirq(int nr, void (*action)(struct softirq_action *))
/* /*
* Tasklets * Tasklets
*/ */
struct tasklet_head struct tasklet_head {
{
struct tasklet_struct *head; struct tasklet_struct *head;
struct tasklet_struct **tail; struct tasklet_struct **tail;
}; };
...@@ -453,7 +454,6 @@ void __tasklet_schedule(struct tasklet_struct *t) ...@@ -453,7 +454,6 @@ void __tasklet_schedule(struct tasklet_struct *t)
raise_softirq_irqoff(TASKLET_SOFTIRQ); raise_softirq_irqoff(TASKLET_SOFTIRQ);
local_irq_restore(flags); local_irq_restore(flags);
} }
EXPORT_SYMBOL(__tasklet_schedule); EXPORT_SYMBOL(__tasklet_schedule);
void __tasklet_hi_schedule(struct tasklet_struct *t) void __tasklet_hi_schedule(struct tasklet_struct *t)
...@@ -467,7 +467,6 @@ void __tasklet_hi_schedule(struct tasklet_struct *t) ...@@ -467,7 +467,6 @@ void __tasklet_hi_schedule(struct tasklet_struct *t)
raise_softirq_irqoff(HI_SOFTIRQ); raise_softirq_irqoff(HI_SOFTIRQ);
local_irq_restore(flags); local_irq_restore(flags);
} }
EXPORT_SYMBOL(__tasklet_hi_schedule); EXPORT_SYMBOL(__tasklet_hi_schedule);
void __tasklet_hi_schedule_first(struct tasklet_struct *t) void __tasklet_hi_schedule_first(struct tasklet_struct *t)
...@@ -478,7 +477,6 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t) ...@@ -478,7 +477,6 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
__this_cpu_write(tasklet_hi_vec.head, t); __this_cpu_write(tasklet_hi_vec.head, t);
__raise_softirq_irqoff(HI_SOFTIRQ); __raise_softirq_irqoff(HI_SOFTIRQ);
} }
EXPORT_SYMBOL(__tasklet_hi_schedule_first); EXPORT_SYMBOL(__tasklet_hi_schedule_first);
static void tasklet_action(struct softirq_action *a) static void tasklet_action(struct softirq_action *a)
...@@ -498,7 +496,8 @@ static void tasklet_action(struct softirq_action *a) ...@@ -498,7 +496,8 @@ static void tasklet_action(struct softirq_action *a)
if (tasklet_trylock(t)) { if (tasklet_trylock(t)) {
if (!atomic_read(&t->count)) { if (!atomic_read(&t->count)) {
if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) if (!test_and_clear_bit(TASKLET_STATE_SCHED,
&t->state))
BUG(); BUG();
t->func(t->data); t->func(t->data);
tasklet_unlock(t); tasklet_unlock(t);
...@@ -533,7 +532,8 @@ static void tasklet_hi_action(struct softirq_action *a) ...@@ -533,7 +532,8 @@ static void tasklet_hi_action(struct softirq_action *a)
if (tasklet_trylock(t)) { if (tasklet_trylock(t)) {
if (!atomic_read(&t->count)) { if (!atomic_read(&t->count)) {
if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) if (!test_and_clear_bit(TASKLET_STATE_SCHED,
&t->state))
BUG(); BUG();
t->func(t->data); t->func(t->data);
tasklet_unlock(t); tasklet_unlock(t);
...@@ -551,7 +551,6 @@ static void tasklet_hi_action(struct softirq_action *a) ...@@ -551,7 +551,6 @@ static void tasklet_hi_action(struct softirq_action *a)
} }
} }
void tasklet_init(struct tasklet_struct *t, void tasklet_init(struct tasklet_struct *t,
void (*func)(unsigned long), unsigned long data) void (*func)(unsigned long), unsigned long data)
{ {
...@@ -561,13 +560,12 @@ void tasklet_init(struct tasklet_struct *t, ...@@ -561,13 +560,12 @@ void tasklet_init(struct tasklet_struct *t,
t->func = func; t->func = func;
t->data = data; t->data = data;
} }
EXPORT_SYMBOL(tasklet_init); EXPORT_SYMBOL(tasklet_init);
void tasklet_kill(struct tasklet_struct *t) void tasklet_kill(struct tasklet_struct *t)
{ {
if (in_interrupt()) if (in_interrupt())
printk("Attempt to kill tasklet from interrupt\n"); pr_notice("Attempt to kill tasklet from interrupt\n");
while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) { while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
do { do {
...@@ -577,7 +575,6 @@ void tasklet_kill(struct tasklet_struct *t) ...@@ -577,7 +575,6 @@ void tasklet_kill(struct tasklet_struct *t)
tasklet_unlock_wait(t); tasklet_unlock_wait(t);
clear_bit(TASKLET_STATE_SCHED, &t->state); clear_bit(TASKLET_STATE_SCHED, &t->state);
} }
EXPORT_SYMBOL(tasklet_kill); EXPORT_SYMBOL(tasklet_kill);
/* /*
...@@ -727,9 +724,8 @@ static void takeover_tasklets(unsigned int cpu) ...@@ -727,9 +724,8 @@ static void takeover_tasklets(unsigned int cpu)
} }
#endif /* CONFIG_HOTPLUG_CPU */ #endif /* CONFIG_HOTPLUG_CPU */
static int cpu_callback(struct notifier_block *nfb, static int cpu_callback(struct notifier_block *nfb, unsigned long action,
unsigned long action, void *hcpu)
void *hcpu)
{ {
switch (action) { switch (action) {
#ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_HOTPLUG_CPU
......
...@@ -268,14 +268,12 @@ static int ddebug_tokenize(char *buf, char *words[], int maxwords) ...@@ -268,14 +268,12 @@ static int ddebug_tokenize(char *buf, char *words[], int maxwords)
*/ */
static inline int parse_lineno(const char *str, unsigned int *val) static inline int parse_lineno(const char *str, unsigned int *val)
{ {
char *end = NULL;
BUG_ON(str == NULL); BUG_ON(str == NULL);
if (*str == '\0') { if (*str == '\0') {
*val = 0; *val = 0;
return 0; return 0;
} }
*val = simple_strtoul(str, &end, 10); if (kstrtouint(str, 10, val) < 0) {
if (end == NULL || end == str || *end != '\0') {
pr_err("bad line-number: %s\n", str); pr_err("bad line-number: %s\n", str);
return -EINVAL; return -EINVAL;
} }
...@@ -348,14 +346,14 @@ static int ddebug_parse_query(char *words[], int nwords, ...@@ -348,14 +346,14 @@ static int ddebug_parse_query(char *words[], int nwords,
} }
if (last) if (last)
*last++ = '\0'; *last++ = '\0';
if (parse_lineno(first, &query->first_lineno) < 0) { if (parse_lineno(first, &query->first_lineno) < 0)
pr_err("line-number is <0\n");
return -EINVAL; return -EINVAL;
}
if (last) { if (last) {
/* range <first>-<last> */ /* range <first>-<last> */
if (parse_lineno(last, &query->last_lineno) if (parse_lineno(last, &query->last_lineno) < 0)
< query->first_lineno) { return -EINVAL;
if (query->last_lineno < query->first_lineno) {
pr_err("last-line:%d < 1st-line:%d\n", pr_err("last-line:%d < 1st-line:%d\n",
query->last_lineno, query->last_lineno,
query->first_lineno); query->first_lineno);
......
...@@ -172,7 +172,7 @@ int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose) ...@@ -172,7 +172,7 @@ int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
/* /*
* Get the overflow emergency buffer * Get the overflow emergency buffer
*/ */
v_overflow_buffer = memblock_virt_alloc_nopanic( v_overflow_buffer = memblock_virt_alloc_low_nopanic(
PAGE_ALIGN(io_tlb_overflow), PAGE_ALIGN(io_tlb_overflow),
PAGE_SIZE); PAGE_SIZE);
if (!v_overflow_buffer) if (!v_overflow_buffer)
...@@ -220,7 +220,7 @@ swiotlb_init(int verbose) ...@@ -220,7 +220,7 @@ swiotlb_init(int verbose)
bytes = io_tlb_nslabs << IO_TLB_SHIFT; bytes = io_tlb_nslabs << IO_TLB_SHIFT;
/* Get IO TLB memory from the low pages */ /* Get IO TLB memory from the low pages */
vstart = memblock_virt_alloc_nopanic(PAGE_ALIGN(bytes), PAGE_SIZE); vstart = memblock_virt_alloc_low_nopanic(PAGE_ALIGN(bytes), PAGE_SIZE);
if (vstart && !swiotlb_init_with_tbl(vstart, io_tlb_nslabs, verbose)) if (vstart && !swiotlb_init_with_tbl(vstart, io_tlb_nslabs, verbose))
return; return;
......
...@@ -984,9 +984,6 @@ static phys_addr_t __init memblock_alloc_base_nid(phys_addr_t size, ...@@ -984,9 +984,6 @@ static phys_addr_t __init memblock_alloc_base_nid(phys_addr_t size,
if (!align) if (!align)
align = SMP_CACHE_BYTES; align = SMP_CACHE_BYTES;
/* align @size to avoid excessive fragmentation on reserved array */
size = round_up(size, align);
found = memblock_find_in_range_node(size, align, 0, max_addr, nid); found = memblock_find_in_range_node(size, align, 0, max_addr, nid);
if (found && !memblock_reserve(found, size)) if (found && !memblock_reserve(found, size))
return found; return found;
...@@ -1080,9 +1077,6 @@ static void * __init memblock_virt_alloc_internal( ...@@ -1080,9 +1077,6 @@ static void * __init memblock_virt_alloc_internal(
if (!align) if (!align)
align = SMP_CACHE_BYTES; align = SMP_CACHE_BYTES;
/* align @size to avoid excessive fragmentation on reserved array */
size = round_up(size, align);
again: again:
alloc = memblock_find_in_range_node(size, align, min_addr, max_addr, alloc = memblock_find_in_range_node(size, align, min_addr, max_addr,
nid); nid);
......
...@@ -1548,8 +1548,6 @@ static struct page *alloc_misplaced_dst_page(struct page *page, ...@@ -1548,8 +1548,6 @@ static struct page *alloc_misplaced_dst_page(struct page *page,
__GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NORETRY |
__GFP_NOWARN) & __GFP_NOWARN) &
~GFP_IOFS, 0); ~GFP_IOFS, 0);
if (newpage)
page_cpupid_xchg_last(newpage, page_cpupid_last(page));
return newpage; return newpage;
} }
......
...@@ -202,4 +202,4 @@ static int __init mm_sysfs_init(void) ...@@ -202,4 +202,4 @@ static int __init mm_sysfs_init(void)
return 0; return 0;
} }
pure_initcall(mm_sysfs_init); postcore_initcall(mm_sysfs_init);
...@@ -220,12 +220,12 @@ int is_vmalloc_or_module_addr(const void *x) ...@@ -220,12 +220,12 @@ int is_vmalloc_or_module_addr(const void *x)
} }
/* /*
* Walk a vmap address to the physical pfn it maps to. * Walk a vmap address to the struct page it maps.
*/ */
unsigned long vmalloc_to_pfn(const void *vmalloc_addr) struct page *vmalloc_to_page(const void *vmalloc_addr)
{ {
unsigned long addr = (unsigned long) vmalloc_addr; unsigned long addr = (unsigned long) vmalloc_addr;
unsigned long pfn = 0; struct page *page = NULL;
pgd_t *pgd = pgd_offset_k(addr); pgd_t *pgd = pgd_offset_k(addr);
/* /*
...@@ -244,23 +244,23 @@ unsigned long vmalloc_to_pfn(const void *vmalloc_addr) ...@@ -244,23 +244,23 @@ unsigned long vmalloc_to_pfn(const void *vmalloc_addr)
ptep = pte_offset_map(pmd, addr); ptep = pte_offset_map(pmd, addr);
pte = *ptep; pte = *ptep;
if (pte_present(pte)) if (pte_present(pte))
pfn = pte_pfn(pte); page = pte_page(pte);
pte_unmap(ptep); pte_unmap(ptep);
} }
} }
} }
return pfn; return page;
} }
EXPORT_SYMBOL(vmalloc_to_pfn); EXPORT_SYMBOL(vmalloc_to_page);
/* /*
* Map a vmalloc()-space virtual address to the struct page. * Map a vmalloc()-space virtual address to the physical page frame number.
*/ */
struct page *vmalloc_to_page(const void *vmalloc_addr) unsigned long vmalloc_to_pfn(const void *vmalloc_addr)
{ {
return pfn_to_page(vmalloc_to_pfn(vmalloc_addr)); return page_to_pfn(vmalloc_to_page(vmalloc_addr));
} }
EXPORT_SYMBOL(vmalloc_to_page); EXPORT_SYMBOL(vmalloc_to_pfn);
/*** Global kva allocator ***/ /*** Global kva allocator ***/
......
...@@ -2665,6 +2665,15 @@ sub process { ...@@ -2665,6 +2665,15 @@ sub process {
$herecurr); $herecurr);
} }
# check for function declarations without arguments like "int foo()"
if ($line =~ /(\b$Type\s+$Ident)\s*\(\s*\)/) {
if (ERROR("FUNCTION_WITHOUT_ARGS",
"Bad function definition - $1() should probably be $1(void)\n" . $herecurr) &&
$fix) {
$fixed[$linenr - 1] =~ s/(\b($Type)\s+($Ident))\s*\(\s*\)/$2 $3(void)/;
}
}
# check for uses of DEFINE_PCI_DEVICE_TABLE # check for uses of DEFINE_PCI_DEVICE_TABLE
if ($line =~ /\bDEFINE_PCI_DEVICE_TABLE\s*\(\s*(\w+)\s*\)\s*=/) { if ($line =~ /\bDEFINE_PCI_DEVICE_TABLE\s*\(\s*(\w+)\s*\)\s*=/) {
if (WARN("DEFINE_PCI_DEVICE_TABLE", if (WARN("DEFINE_PCI_DEVICE_TABLE",
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment