Commit 5f34fe1c authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (63 commits)
  stacktrace: provide save_stack_trace_tsk() weak alias
  rcu: provide RCU options on non-preempt architectures too
  printk: fix discarding message when recursion_bug
  futex: clean up futex_(un)lock_pi fault handling
  "Tree RCU": scalable classic RCU implementation
  futex: rename field in futex_q to clarify single waiter semantics
  x86/swiotlb: add default swiotlb_arch_range_needs_mapping
  x86/swiotlb: add default phys<->bus conversion
  x86: unify pci iommu setup and allow swiotlb to compile for 32 bit
  x86: add swiotlb allocation functions
  swiotlb: consolidate swiotlb info message printing
  swiotlb: support bouncing of HighMem pages
  swiotlb: factor out copy to/from device
  swiotlb: add arch hook to force mapping
  swiotlb: allow architectures to override phys<->bus<->phys conversions
  swiotlb: add comment where we handle the overflow of a dma mask on 32 bit
  rcu: fix rcutorture behavior during reboot
  resources: skip sanity check of busy resources
  swiotlb: move some definitions to header
  swiotlb: allow architectures to override swiotlb pool allocation
  ...

Fix up trivial conflicts in
  arch/x86/kernel/Makefile
  arch/x86/mm/init_32.c
  include/linux/hardirq.h
as per Ingo's suggestions.
parents eca1bf5b 6638101c
...@@ -16,6 +16,8 @@ RTFP.txt ...@@ -16,6 +16,8 @@ RTFP.txt
- List of RCU papers (bibliography) going back to 1980. - List of RCU papers (bibliography) going back to 1980.
torture.txt torture.txt
- RCU Torture Test Operation (CONFIG_RCU_TORTURE_TEST) - RCU Torture Test Operation (CONFIG_RCU_TORTURE_TEST)
trace.txt
- CONFIG_RCU_TRACE debugfs files and formats
UP.txt UP.txt
- RCU on Uniprocessor Systems - RCU on Uniprocessor Systems
whatisRCU.txt whatisRCU.txt
......
This diff is collapsed.
...@@ -71,35 +71,50 @@ Look at the current lock statistics: ...@@ -71,35 +71,50 @@ Look at the current lock statistics:
# less /proc/lock_stat # less /proc/lock_stat
01 lock_stat version 0.2 01 lock_stat version 0.3
02 ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- 02 -----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
03 class name con-bounces contentions waittime-min waittime-max waittime-total acq-bounces acquisitions holdtime-min holdtime-max holdtime-total 03 class name con-bounces contentions waittime-min waittime-max waittime-total acq-bounces acquisitions holdtime-min holdtime-max holdtime-total
04 ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- 04 -----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
05 05
06 &inode->i_data.tree_lock-W: 15 21657 0.18 1093295.30 11547131054.85 58 10415 0.16 87.51 6387.60 06 &mm->mmap_sem-W: 233 538 18446744073708 22924.27 607243.51 1342 45806 1.71 8595.89 1180582.34
07 &inode->i_data.tree_lock-R: 0 0 0.00 0.00 0.00 23302 231198 0.25 8.45 98023.38 07 &mm->mmap_sem-R: 205 587 18446744073708 28403.36 731975.00 1940 412426 0.58 187825.45 6307502.88
08 -------------------------- 08 ---------------
09 &inode->i_data.tree_lock 0 [<ffffffff8027c08f>] add_to_page_cache+0x5f/0x190 09 &mm->mmap_sem 487 [<ffffffff8053491f>] do_page_fault+0x466/0x928
10 10 &mm->mmap_sem 179 [<ffffffff802a6200>] sys_mprotect+0xcd/0x21d
11 ............................................................................................................................................................................................... 11 &mm->mmap_sem 279 [<ffffffff80210a57>] sys_mmap+0x75/0xce
12 12 &mm->mmap_sem 76 [<ffffffff802a490b>] sys_munmap+0x32/0x59
13 dcache_lock: 1037 1161 0.38 45.32 774.51 6611 243371 0.15 306.48 77387.24 13 ---------------
14 ----------- 14 &mm->mmap_sem 270 [<ffffffff80210a57>] sys_mmap+0x75/0xce
15 dcache_lock 180 [<ffffffff802c0d7e>] sys_getcwd+0x11e/0x230 15 &mm->mmap_sem 431 [<ffffffff8053491f>] do_page_fault+0x466/0x928
16 dcache_lock 165 [<ffffffff802c002a>] d_alloc+0x15a/0x210 16 &mm->mmap_sem 138 [<ffffffff802a490b>] sys_munmap+0x32/0x59
17 dcache_lock 33 [<ffffffff8035818d>] _atomic_dec_and_lock+0x4d/0x70 17 &mm->mmap_sem 145 [<ffffffff802a6200>] sys_mprotect+0xcd/0x21d
18 dcache_lock 1 [<ffffffff802beef8>] shrink_dcache_parent+0x18/0x130 18
19 ...............................................................................................................................................................................................
20
21 dcache_lock: 621 623 0.52 118.26 1053.02 6745 91930 0.29 316.29 118423.41
22 -----------
23 dcache_lock 179 [<ffffffff80378274>] _atomic_dec_and_lock+0x34/0x54
24 dcache_lock 113 [<ffffffff802cc17b>] d_alloc+0x19a/0x1eb
25 dcache_lock 99 [<ffffffff802ca0dc>] d_rehash+0x1b/0x44
26 dcache_lock 104 [<ffffffff802cbca0>] d_instantiate+0x36/0x8a
27 -----------
28 dcache_lock 192 [<ffffffff80378274>] _atomic_dec_and_lock+0x34/0x54
29 dcache_lock 98 [<ffffffff802ca0dc>] d_rehash+0x1b/0x44
30 dcache_lock 72 [<ffffffff802cc17b>] d_alloc+0x19a/0x1eb
31 dcache_lock 112 [<ffffffff802cbca0>] d_instantiate+0x36/0x8a
This excerpt shows the first two lock class statistics. Line 01 shows the This excerpt shows the first two lock class statistics. Line 01 shows the
output version - each time the format changes this will be updated. Line 02-04 output version - each time the format changes this will be updated. Line 02-04
show the header with column descriptions. Lines 05-10 and 13-18 show the actual show the header with column descriptions. Lines 05-18 and 20-31 show the actual
statistics. These statistics come in two parts; the actual stats separated by a statistics. These statistics come in two parts; the actual stats separated by a
short separator (line 08, 14) from the contention points. short separator (line 08, 13) from the contention points.
The first lock (05-10) is a read/write lock, and shows two lines above the The first lock (05-18) is a read/write lock, and shows two lines above the
short separator. The contention points don't match the column descriptors, short separator. The contention points don't match the column descriptors,
they have two: contentions and [<IP>] symbol. they have two: contentions and [<IP>] symbol. The second set of contention
points are the points we're contending with.
The integer part of the time values is in us.
View the top contending locks: View the top contending locks:
......
...@@ -208,6 +208,7 @@ void pSeries_log_error(char *buf, unsigned int err_type, int fatal) ...@@ -208,6 +208,7 @@ void pSeries_log_error(char *buf, unsigned int err_type, int fatal)
break; break;
case ERR_TYPE_KERNEL_PANIC: case ERR_TYPE_KERNEL_PANIC:
default: default:
WARN_ON_ONCE(!irqs_disabled()); /* @@@ DEBUG @@@ */
spin_unlock_irqrestore(&rtasd_log_lock, s); spin_unlock_irqrestore(&rtasd_log_lock, s);
return; return;
} }
...@@ -227,6 +228,7 @@ void pSeries_log_error(char *buf, unsigned int err_type, int fatal) ...@@ -227,6 +228,7 @@ void pSeries_log_error(char *buf, unsigned int err_type, int fatal)
/* Check to see if we need to or have stopped logging */ /* Check to see if we need to or have stopped logging */
if (fatal || !logging_enabled) { if (fatal || !logging_enabled) {
logging_enabled = 0; logging_enabled = 0;
WARN_ON_ONCE(!irqs_disabled()); /* @@@ DEBUG @@@ */
spin_unlock_irqrestore(&rtasd_log_lock, s); spin_unlock_irqrestore(&rtasd_log_lock, s);
return; return;
} }
...@@ -249,11 +251,13 @@ void pSeries_log_error(char *buf, unsigned int err_type, int fatal) ...@@ -249,11 +251,13 @@ void pSeries_log_error(char *buf, unsigned int err_type, int fatal)
else else
rtas_log_start += 1; rtas_log_start += 1;
WARN_ON_ONCE(!irqs_disabled()); /* @@@ DEBUG @@@ */
spin_unlock_irqrestore(&rtasd_log_lock, s); spin_unlock_irqrestore(&rtasd_log_lock, s);
wake_up_interruptible(&rtas_log_wait); wake_up_interruptible(&rtas_log_wait);
break; break;
case ERR_TYPE_KERNEL_PANIC: case ERR_TYPE_KERNEL_PANIC:
default: default:
WARN_ON_ONCE(!irqs_disabled()); /* @@@ DEBUG @@@ */
spin_unlock_irqrestore(&rtasd_log_lock, s); spin_unlock_irqrestore(&rtasd_log_lock, s);
return; return;
} }
......
...@@ -11,21 +11,21 @@ extern int get_signals(void); ...@@ -11,21 +11,21 @@ extern int get_signals(void);
extern void block_signals(void); extern void block_signals(void);
extern void unblock_signals(void); extern void unblock_signals(void);
#define local_save_flags(flags) do { typecheck(unsigned long, flags); \ #define raw_local_save_flags(flags) do { typecheck(unsigned long, flags); \
(flags) = get_signals(); } while(0) (flags) = get_signals(); } while(0)
#define local_irq_restore(flags) do { typecheck(unsigned long, flags); \ #define raw_local_irq_restore(flags) do { typecheck(unsigned long, flags); \
set_signals(flags); } while(0) set_signals(flags); } while(0)
#define local_irq_save(flags) do { local_save_flags(flags); \ #define raw_local_irq_save(flags) do { raw_local_save_flags(flags); \
local_irq_disable(); } while(0) raw_local_irq_disable(); } while(0)
#define local_irq_enable() unblock_signals() #define raw_local_irq_enable() unblock_signals()
#define local_irq_disable() block_signals() #define raw_local_irq_disable() block_signals()
#define irqs_disabled() \ #define irqs_disabled() \
({ \ ({ \
unsigned long flags; \ unsigned long flags; \
local_save_flags(flags); \ raw_local_save_flags(flags); \
(flags == 0); \ (flags == 0); \
}) })
......
...@@ -65,7 +65,7 @@ static inline struct dma_mapping_ops *get_dma_ops(struct device *dev) ...@@ -65,7 +65,7 @@ static inline struct dma_mapping_ops *get_dma_ops(struct device *dev)
return dma_ops; return dma_ops;
else else
return dev->archdata.dma_ops; return dev->archdata.dma_ops;
#endif /* _ASM_X86_DMA_MAPPING_H */ #endif
} }
/* Make sure we keep the same behaviour */ /* Make sure we keep the same behaviour */
......
...@@ -7,8 +7,6 @@ extern struct dma_mapping_ops nommu_dma_ops; ...@@ -7,8 +7,6 @@ extern struct dma_mapping_ops nommu_dma_ops;
extern int force_iommu, no_iommu; extern int force_iommu, no_iommu;
extern int iommu_detected; extern int iommu_detected;
extern unsigned long iommu_nr_pages(unsigned long addr, unsigned long len);
/* 10 seconds */ /* 10 seconds */
#define DMAR_OPERATION_TIMEOUT ((cycles_t) tsc_khz*10*1000) #define DMAR_OPERATION_TIMEOUT ((cycles_t) tsc_khz*10*1000)
......
...@@ -84,6 +84,8 @@ static inline void pci_dma_burst_advice(struct pci_dev *pdev, ...@@ -84,6 +84,8 @@ static inline void pci_dma_burst_advice(struct pci_dev *pdev,
static inline void early_quirks(void) { } static inline void early_quirks(void) { }
#endif #endif
extern void pci_iommu_alloc(void);
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
......
...@@ -23,7 +23,6 @@ extern int (*pci_config_write)(int seg, int bus, int dev, int fn, ...@@ -23,7 +23,6 @@ extern int (*pci_config_write)(int seg, int bus, int dev, int fn,
int reg, int len, u32 value); int reg, int len, u32 value);
extern void dma32_reserve_bootmem(void); extern void dma32_reserve_bootmem(void);
extern void pci_iommu_alloc(void);
/* The PCI address space does equal the physical memory /* The PCI address space does equal the physical memory
* address space. The networking and block device layers use * address space. The networking and block device layers use
......
...@@ -157,6 +157,7 @@ extern int __get_user_bad(void); ...@@ -157,6 +157,7 @@ extern int __get_user_bad(void);
int __ret_gu; \ int __ret_gu; \
unsigned long __val_gu; \ unsigned long __val_gu; \
__chk_user_ptr(ptr); \ __chk_user_ptr(ptr); \
might_fault(); \
switch (sizeof(*(ptr))) { \ switch (sizeof(*(ptr))) { \
case 1: \ case 1: \
__get_user_x(1, __ret_gu, __val_gu, ptr); \ __get_user_x(1, __ret_gu, __val_gu, ptr); \
...@@ -241,6 +242,7 @@ extern void __put_user_8(void); ...@@ -241,6 +242,7 @@ extern void __put_user_8(void);
int __ret_pu; \ int __ret_pu; \
__typeof__(*(ptr)) __pu_val; \ __typeof__(*(ptr)) __pu_val; \
__chk_user_ptr(ptr); \ __chk_user_ptr(ptr); \
might_fault(); \
__pu_val = x; \ __pu_val = x; \
switch (sizeof(*(ptr))) { \ switch (sizeof(*(ptr))) { \
case 1: \ case 1: \
......
...@@ -82,8 +82,8 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n) ...@@ -82,8 +82,8 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
static __always_inline unsigned long __must_check static __always_inline unsigned long __must_check
__copy_to_user(void __user *to, const void *from, unsigned long n) __copy_to_user(void __user *to, const void *from, unsigned long n)
{ {
might_sleep(); might_fault();
return __copy_to_user_inatomic(to, from, n); return __copy_to_user_inatomic(to, from, n);
} }
static __always_inline unsigned long static __always_inline unsigned long
...@@ -137,7 +137,7 @@ __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n) ...@@ -137,7 +137,7 @@ __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
static __always_inline unsigned long static __always_inline unsigned long
__copy_from_user(void *to, const void __user *from, unsigned long n) __copy_from_user(void *to, const void __user *from, unsigned long n)
{ {
might_sleep(); might_fault();
if (__builtin_constant_p(n)) { if (__builtin_constant_p(n)) {
unsigned long ret; unsigned long ret;
...@@ -159,7 +159,7 @@ __copy_from_user(void *to, const void __user *from, unsigned long n) ...@@ -159,7 +159,7 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
static __always_inline unsigned long __copy_from_user_nocache(void *to, static __always_inline unsigned long __copy_from_user_nocache(void *to,
const void __user *from, unsigned long n) const void __user *from, unsigned long n)
{ {
might_sleep(); might_fault();
if (__builtin_constant_p(n)) { if (__builtin_constant_p(n)) {
unsigned long ret; unsigned long ret;
......
...@@ -29,6 +29,8 @@ static __always_inline __must_check ...@@ -29,6 +29,8 @@ static __always_inline __must_check
int __copy_from_user(void *dst, const void __user *src, unsigned size) int __copy_from_user(void *dst, const void __user *src, unsigned size)
{ {
int ret = 0; int ret = 0;
might_fault();
if (!__builtin_constant_p(size)) if (!__builtin_constant_p(size))
return copy_user_generic(dst, (__force void *)src, size); return copy_user_generic(dst, (__force void *)src, size);
switch (size) { switch (size) {
...@@ -71,6 +73,8 @@ static __always_inline __must_check ...@@ -71,6 +73,8 @@ static __always_inline __must_check
int __copy_to_user(void __user *dst, const void *src, unsigned size) int __copy_to_user(void __user *dst, const void *src, unsigned size)
{ {
int ret = 0; int ret = 0;
might_fault();
if (!__builtin_constant_p(size)) if (!__builtin_constant_p(size))
return copy_user_generic((__force void *)dst, src, size); return copy_user_generic((__force void *)dst, src, size);
switch (size) { switch (size) {
...@@ -113,6 +117,8 @@ static __always_inline __must_check ...@@ -113,6 +117,8 @@ static __always_inline __must_check
int __copy_in_user(void __user *dst, const void __user *src, unsigned size) int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
{ {
int ret = 0; int ret = 0;
might_fault();
if (!__builtin_constant_p(size)) if (!__builtin_constant_p(size))
return copy_user_generic((__force void *)dst, return copy_user_generic((__force void *)dst,
(__force void *)src, size); (__force void *)src, size);
......
...@@ -109,6 +109,8 @@ obj-$(CONFIG_MICROCODE) += microcode.o ...@@ -109,6 +109,8 @@ obj-$(CONFIG_MICROCODE) += microcode.o
obj-$(CONFIG_X86_CHECK_BIOS_CORRUPTION) += check.o obj-$(CONFIG_X86_CHECK_BIOS_CORRUPTION) += check.o
obj-$(CONFIG_SWIOTLB) += pci-swiotlb_64.o # NB rename without _64
### ###
# 64 bit specific files # 64 bit specific files
ifeq ($(CONFIG_X86_64),y) ifeq ($(CONFIG_X86_64),y)
...@@ -122,7 +124,6 @@ ifeq ($(CONFIG_X86_64),y) ...@@ -122,7 +124,6 @@ ifeq ($(CONFIG_X86_64),y)
obj-$(CONFIG_GART_IOMMU) += pci-gart_64.o aperture_64.o obj-$(CONFIG_GART_IOMMU) += pci-gart_64.o aperture_64.o
obj-$(CONFIG_CALGARY_IOMMU) += pci-calgary_64.o tce_64.o obj-$(CONFIG_CALGARY_IOMMU) += pci-calgary_64.o tce_64.o
obj-$(CONFIG_AMD_IOMMU) += amd_iommu_init.o amd_iommu.o obj-$(CONFIG_AMD_IOMMU) += amd_iommu_init.o amd_iommu.o
obj-$(CONFIG_SWIOTLB) += pci-swiotlb_64.o
obj-$(CONFIG_PCI_MMCONFIG) += mmconf-fam10h_64.o obj-$(CONFIG_PCI_MMCONFIG) += mmconf-fam10h_64.o
endif endif
...@@ -101,11 +101,15 @@ static void __init dma32_free_bootmem(void) ...@@ -101,11 +101,15 @@ static void __init dma32_free_bootmem(void)
dma32_bootmem_ptr = NULL; dma32_bootmem_ptr = NULL;
dma32_bootmem_size = 0; dma32_bootmem_size = 0;
} }
#endif
void __init pci_iommu_alloc(void) void __init pci_iommu_alloc(void)
{ {
#ifdef CONFIG_X86_64
/* free the range so iommu could get some range less than 4G */ /* free the range so iommu could get some range less than 4G */
dma32_free_bootmem(); dma32_free_bootmem();
#endif
/* /*
* The order of these functions is important for * The order of these functions is important for
* fall-back/fail-over reasons * fall-back/fail-over reasons
...@@ -121,15 +125,6 @@ void __init pci_iommu_alloc(void) ...@@ -121,15 +125,6 @@ void __init pci_iommu_alloc(void)
pci_swiotlb_init(); pci_swiotlb_init();
} }
unsigned long iommu_nr_pages(unsigned long addr, unsigned long len)
{
unsigned long size = roundup((addr & ~PAGE_MASK) + len, PAGE_SIZE);
return size >> PAGE_SHIFT;
}
EXPORT_SYMBOL(iommu_nr_pages);
#endif
void *dma_generic_alloc_coherent(struct device *dev, size_t size, void *dma_generic_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_addr, gfp_t flag) dma_addr_t *dma_addr, gfp_t flag)
{ {
......
...@@ -3,6 +3,8 @@ ...@@ -3,6 +3,8 @@
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/cache.h> #include <linux/cache.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/swiotlb.h>
#include <linux/bootmem.h>
#include <linux/dma-mapping.h> #include <linux/dma-mapping.h>
#include <asm/iommu.h> #include <asm/iommu.h>
...@@ -11,6 +13,31 @@ ...@@ -11,6 +13,31 @@
int swiotlb __read_mostly; int swiotlb __read_mostly;
void *swiotlb_alloc_boot(size_t size, unsigned long nslabs)
{
return alloc_bootmem_low_pages(size);
}
void *swiotlb_alloc(unsigned order, unsigned long nslabs)
{
return (void *)__get_free_pages(GFP_DMA | __GFP_NOWARN, order);
}
dma_addr_t swiotlb_phys_to_bus(phys_addr_t paddr)
{
return paddr;
}
phys_addr_t swiotlb_bus_to_phys(dma_addr_t baddr)
{
return baddr;
}
int __weak swiotlb_arch_range_needs_mapping(void *ptr, size_t size)
{
return 0;
}
static dma_addr_t static dma_addr_t
swiotlb_map_single_phys(struct device *hwdev, phys_addr_t paddr, size_t size, swiotlb_map_single_phys(struct device *hwdev, phys_addr_t paddr, size_t size,
int direction) int direction)
...@@ -50,8 +77,10 @@ struct dma_mapping_ops swiotlb_dma_ops = { ...@@ -50,8 +77,10 @@ struct dma_mapping_ops swiotlb_dma_ops = {
void __init pci_swiotlb_init(void) void __init pci_swiotlb_init(void)
{ {
/* don't initialize swiotlb if iommu=off (no_iommu=1) */ /* don't initialize swiotlb if iommu=off (no_iommu=1) */
#ifdef CONFIG_X86_64
if (!iommu_detected && !no_iommu && max_pfn > MAX_DMA32_PFN) if (!iommu_detected && !no_iommu && max_pfn > MAX_DMA32_PFN)
swiotlb = 1; swiotlb = 1;
#endif
if (swiotlb_force) if (swiotlb_force)
swiotlb = 1; swiotlb = 1;
if (swiotlb) { if (swiotlb) {
......
...@@ -39,7 +39,7 @@ static inline int __movsl_is_ok(unsigned long a1, unsigned long a2, unsigned lon ...@@ -39,7 +39,7 @@ static inline int __movsl_is_ok(unsigned long a1, unsigned long a2, unsigned lon
#define __do_strncpy_from_user(dst, src, count, res) \ #define __do_strncpy_from_user(dst, src, count, res) \
do { \ do { \
int __d0, __d1, __d2; \ int __d0, __d1, __d2; \
might_sleep(); \ might_fault(); \
__asm__ __volatile__( \ __asm__ __volatile__( \
" testl %1,%1\n" \ " testl %1,%1\n" \
" jz 2f\n" \ " jz 2f\n" \
...@@ -126,7 +126,7 @@ EXPORT_SYMBOL(strncpy_from_user); ...@@ -126,7 +126,7 @@ EXPORT_SYMBOL(strncpy_from_user);
#define __do_clear_user(addr,size) \ #define __do_clear_user(addr,size) \
do { \ do { \
int __d0; \ int __d0; \
might_sleep(); \ might_fault(); \
__asm__ __volatile__( \ __asm__ __volatile__( \
"0: rep; stosl\n" \ "0: rep; stosl\n" \
" movl %2,%0\n" \ " movl %2,%0\n" \
...@@ -155,7 +155,7 @@ do { \ ...@@ -155,7 +155,7 @@ do { \
unsigned long unsigned long
clear_user(void __user *to, unsigned long n) clear_user(void __user *to, unsigned long n)
{ {
might_sleep(); might_fault();
if (access_ok(VERIFY_WRITE, to, n)) if (access_ok(VERIFY_WRITE, to, n))
__do_clear_user(to, n); __do_clear_user(to, n);
return n; return n;
...@@ -197,7 +197,7 @@ long strnlen_user(const char __user *s, long n) ...@@ -197,7 +197,7 @@ long strnlen_user(const char __user *s, long n)
unsigned long mask = -__addr_ok(s); unsigned long mask = -__addr_ok(s);
unsigned long res, tmp; unsigned long res, tmp;
might_sleep(); might_fault();
__asm__ __volatile__( __asm__ __volatile__(
" testl %0, %0\n" " testl %0, %0\n"
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
#define __do_strncpy_from_user(dst,src,count,res) \ #define __do_strncpy_from_user(dst,src,count,res) \
do { \ do { \
long __d0, __d1, __d2; \ long __d0, __d1, __d2; \
might_sleep(); \ might_fault(); \
__asm__ __volatile__( \ __asm__ __volatile__( \
" testq %1,%1\n" \ " testq %1,%1\n" \
" jz 2f\n" \ " jz 2f\n" \
...@@ -64,7 +64,7 @@ EXPORT_SYMBOL(strncpy_from_user); ...@@ -64,7 +64,7 @@ EXPORT_SYMBOL(strncpy_from_user);
unsigned long __clear_user(void __user *addr, unsigned long size) unsigned long __clear_user(void __user *addr, unsigned long size)
{ {
long __d0; long __d0;
might_sleep(); might_fault();
/* no memory constraint because it doesn't change any memory gcc knows /* no memory constraint because it doesn't change any memory gcc knows
about */ about */
asm volatile( asm volatile(
......
...@@ -21,6 +21,7 @@ ...@@ -21,6 +21,7 @@
#include <linux/init.h> #include <linux/init.h>
#include <linux/highmem.h> #include <linux/highmem.h>
#include <linux/pagemap.h> #include <linux/pagemap.h>
#include <linux/pci.h>
#include <linux/pfn.h> #include <linux/pfn.h>
#include <linux/poison.h> #include <linux/poison.h>
#include <linux/bootmem.h> #include <linux/bootmem.h>
...@@ -967,6 +968,8 @@ void __init mem_init(void) ...@@ -967,6 +968,8 @@ void __init mem_init(void)
int codesize, reservedpages, datasize, initsize; int codesize, reservedpages, datasize, initsize;
int tmp; int tmp;
pci_iommu_alloc();
#ifdef CONFIG_FLATMEM #ifdef CONFIG_FLATMEM
BUG_ON(!mem_map); BUG_ON(!mem_map);
#endif #endif
......
...@@ -41,15 +41,14 @@ struct bug_entry { ...@@ -41,15 +41,14 @@ struct bug_entry {
#ifndef __WARN #ifndef __WARN
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
extern void warn_on_slowpath(const char *file, const int line);
extern void warn_slowpath(const char *file, const int line, extern void warn_slowpath(const char *file, const int line,
const char *fmt, ...) __attribute__((format(printf, 3, 4))); const char *fmt, ...) __attribute__((format(printf, 3, 4)));
#define WANT_WARN_ON_SLOWPATH #define WANT_WARN_ON_SLOWPATH
#endif #endif
#define __WARN() warn_on_slowpath(__FILE__, __LINE__) #define __WARN() warn_slowpath(__FILE__, __LINE__, NULL)
#define __WARN_printf(arg...) warn_slowpath(__FILE__, __LINE__, arg) #define __WARN_printf(arg...) warn_slowpath(__FILE__, __LINE__, arg)
#else #else
#define __WARN_printf(arg...) do { printk(arg); __WARN(); } while (0) #define __WARN_printf(arg...) do { printk(arg); __WARN(); } while (0)
#endif #endif
#ifndef WARN_ON #ifndef WARN_ON
......
...@@ -2,7 +2,6 @@ ...@@ -2,7 +2,6 @@
#define _LINUX_BH_H #define _LINUX_BH_H
extern void local_bh_disable(void); extern void local_bh_disable(void);
extern void __local_bh_enable(void);
extern void _local_bh_enable(void); extern void _local_bh_enable(void);
extern void local_bh_enable(void); extern void local_bh_enable(void);
extern void local_bh_enable_ip(unsigned long ip); extern void local_bh_enable_ip(unsigned long ip);
......
...@@ -17,7 +17,7 @@ extern int debug_locks_off(void); ...@@ -17,7 +17,7 @@ extern int debug_locks_off(void);
({ \ ({ \
int __ret = 0; \ int __ret = 0; \
\ \
if (unlikely(c)) { \ if (!oops_in_progress && unlikely(c)) { \
if (debug_locks_off() && !debug_locks_silent) \ if (debug_locks_off() && !debug_locks_silent) \
WARN_ON(1); \ WARN_ON(1); \
__ret = 1; \ __ret = 1; \
......
...@@ -25,7 +25,8 @@ union ktime; ...@@ -25,7 +25,8 @@ union ktime;
#define FUTEX_WAKE_BITSET 10 #define FUTEX_WAKE_BITSET 10
#define FUTEX_PRIVATE_FLAG 128 #define FUTEX_PRIVATE_FLAG 128
#define FUTEX_CMD_MASK ~FUTEX_PRIVATE_FLAG #define FUTEX_CLOCK_REALTIME 256
#define FUTEX_CMD_MASK ~(FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME)
#define FUTEX_WAIT_PRIVATE (FUTEX_WAIT | FUTEX_PRIVATE_FLAG) #define FUTEX_WAIT_PRIVATE (FUTEX_WAIT | FUTEX_PRIVATE_FLAG)
#define FUTEX_WAKE_PRIVATE (FUTEX_WAKE | FUTEX_PRIVATE_FLAG) #define FUTEX_WAKE_PRIVATE (FUTEX_WAKE | FUTEX_PRIVATE_FLAG)
...@@ -164,6 +165,8 @@ union futex_key { ...@@ -164,6 +165,8 @@ union futex_key {
} both; } both;
}; };
#define FUTEX_KEY_INIT (union futex_key) { .both = { .ptr = NULL } }
#ifdef CONFIG_FUTEX #ifdef CONFIG_FUTEX
extern void exit_robust_list(struct task_struct *curr); extern void exit_robust_list(struct task_struct *curr);
extern void exit_pi_state_list(struct task_struct *curr); extern void exit_pi_state_list(struct task_struct *curr);
......
...@@ -119,13 +119,17 @@ static inline void account_system_vtime(struct task_struct *tsk) ...@@ -119,13 +119,17 @@ static inline void account_system_vtime(struct task_struct *tsk)
} }
#endif #endif
#if defined(CONFIG_PREEMPT_RCU) && defined(CONFIG_NO_HZ) #if defined(CONFIG_NO_HZ) && !defined(CONFIG_CLASSIC_RCU)
extern void rcu_irq_enter(void); extern void rcu_irq_enter(void);
extern void rcu_irq_exit(void); extern void rcu_irq_exit(void);
extern void rcu_nmi_enter(void);
extern void rcu_nmi_exit(void);
#else #else
# define rcu_irq_enter() do { } while (0) # define rcu_irq_enter() do { } while (0)
# define rcu_irq_exit() do { } while (0) # define rcu_irq_exit() do { } while (0)
#endif /* CONFIG_PREEMPT_RCU */ # define rcu_nmi_enter() do { } while (0)
# define rcu_nmi_exit() do { } while (0)
#endif /* #if defined(CONFIG_NO_HZ) && !defined(CONFIG_CLASSIC_RCU) */
/* /*
* It is safe to do non-atomic ops on ->hardirq_context, * It is safe to do non-atomic ops on ->hardirq_context,
...@@ -135,7 +139,6 @@ extern void rcu_irq_exit(void); ...@@ -135,7 +139,6 @@ extern void rcu_irq_exit(void);
*/ */
#define __irq_enter() \ #define __irq_enter() \
do { \ do { \
rcu_irq_enter(); \
account_system_vtime(current); \ account_system_vtime(current); \
add_preempt_count(HARDIRQ_OFFSET); \ add_preempt_count(HARDIRQ_OFFSET); \
trace_hardirq_enter(); \ trace_hardirq_enter(); \
...@@ -154,7 +157,6 @@ extern void irq_enter(void); ...@@ -154,7 +157,6 @@ extern void irq_enter(void);
trace_hardirq_exit(); \ trace_hardirq_exit(); \
account_system_vtime(current); \ account_system_vtime(current); \
sub_preempt_count(HARDIRQ_OFFSET); \ sub_preempt_count(HARDIRQ_OFFSET); \
rcu_irq_exit(); \
} while (0) } while (0)
/* /*
...@@ -166,11 +168,14 @@ extern void irq_exit(void); ...@@ -166,11 +168,14 @@ extern void irq_exit(void);
do { \ do { \
ftrace_nmi_enter(); \ ftrace_nmi_enter(); \
lockdep_off(); \ lockdep_off(); \
rcu_nmi_enter(); \
__irq_enter(); \ __irq_enter(); \
} while (0) } while (0)
#define nmi_exit() \ #define nmi_exit() \
do { \ do { \
__irq_exit(); \ __irq_exit(); \
rcu_nmi_exit(); \
lockdep_on(); \ lockdep_on(); \
ftrace_nmi_exit(); \ ftrace_nmi_exit(); \
} while (0) } while (0)
......
...@@ -141,6 +141,15 @@ extern int _cond_resched(void); ...@@ -141,6 +141,15 @@ extern int _cond_resched(void);
(__x < 0) ? -__x : __x; \ (__x < 0) ? -__x : __x; \
}) })
#ifdef CONFIG_PROVE_LOCKING
void might_fault(void);
#else
static inline void might_fault(void)
{
might_sleep();
}
#endif
extern struct atomic_notifier_head panic_notifier_list; extern struct atomic_notifier_head panic_notifier_list;
extern long (*panic_blink)(long time); extern long (*panic_blink)(long time);
NORET_TYPE void panic(const char * fmt, ...) NORET_TYPE void panic(const char * fmt, ...)
...@@ -188,6 +197,8 @@ extern unsigned long long memparse(const char *ptr, char **retptr); ...@@ -188,6 +197,8 @@ extern unsigned long long memparse(const char *ptr, char **retptr);
extern int core_kernel_text(unsigned long addr); extern int core_kernel_text(unsigned long addr);
extern int __kernel_text_address(unsigned long addr); extern int __kernel_text_address(unsigned long addr);
extern int kernel_text_address(unsigned long addr); extern int kernel_text_address(unsigned long addr);
extern int func_ptr_is_kernel_text(void *ptr);
struct pid; struct pid;
extern struct pid *session_of_pgrp(struct pid *pgrp); extern struct pid *session_of_pgrp(struct pid *pgrp);
......
...@@ -73,6 +73,8 @@ struct lock_class_key { ...@@ -73,6 +73,8 @@ struct lock_class_key {
struct lockdep_subclass_key subkeys[MAX_LOCKDEP_SUBCLASSES]; struct lockdep_subclass_key subkeys[MAX_LOCKDEP_SUBCLASSES];
}; };
#define LOCKSTAT_POINTS 4
/* /*
* The lock-class itself: * The lock-class itself:
*/ */
...@@ -119,7 +121,8 @@ struct lock_class { ...@@ -119,7 +121,8 @@ struct lock_class {
int name_version; int name_version;
#ifdef CONFIG_LOCK_STAT #ifdef CONFIG_LOCK_STAT
unsigned long contention_point[4]; unsigned long contention_point[LOCKSTAT_POINTS];
unsigned long contending_point[LOCKSTAT_POINTS];
#endif #endif
}; };
...@@ -144,6 +147,7 @@ enum bounce_type { ...@@ -144,6 +147,7 @@ enum bounce_type {
struct lock_class_stats { struct lock_class_stats {
unsigned long contention_point[4]; unsigned long contention_point[4];
unsigned long contending_point[4];
struct lock_time read_waittime; struct lock_time read_waittime;
struct lock_time write_waittime; struct lock_time write_waittime;
struct lock_time read_holdtime; struct lock_time read_holdtime;
...@@ -165,6 +169,7 @@ struct lockdep_map { ...@@ -165,6 +169,7 @@ struct lockdep_map {
const char *name; const char *name;
#ifdef CONFIG_LOCK_STAT #ifdef CONFIG_LOCK_STAT
int cpu; int cpu;
unsigned long ip;
#endif #endif
}; };
...@@ -309,8 +314,15 @@ extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass, ...@@ -309,8 +314,15 @@ extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
extern void lock_release(struct lockdep_map *lock, int nested, extern void lock_release(struct lockdep_map *lock, int nested,
unsigned long ip); unsigned long ip);
extern void lock_set_subclass(struct lockdep_map *lock, unsigned int subclass, extern void lock_set_class(struct lockdep_map *lock, const char *name,
unsigned long ip); struct lock_class_key *key, unsigned int subclass,
unsigned long ip);
static inline void lock_set_subclass(struct lockdep_map *lock,
unsigned int subclass, unsigned long ip)
{
lock_set_class(lock, lock->name, lock->key, subclass, ip);
}
# define INIT_LOCKDEP .lockdep_recursion = 0, # define INIT_LOCKDEP .lockdep_recursion = 0,
...@@ -328,6 +340,7 @@ static inline void lockdep_on(void) ...@@ -328,6 +340,7 @@ static inline void lockdep_on(void)
# define lock_acquire(l, s, t, r, c, n, i) do { } while (0) # define lock_acquire(l, s, t, r, c, n, i) do { } while (0)
# define lock_release(l, n, i) do { } while (0) # define lock_release(l, n, i) do { } while (0)
# define lock_set_class(l, n, k, s, i) do { } while (0)
# define lock_set_subclass(l, s, i) do { } while (0) # define lock_set_subclass(l, s, i) do { } while (0)
# define lockdep_init() do { } while (0) # define lockdep_init() do { } while (0)
# define lockdep_info() do { } while (0) # define lockdep_info() do { } while (0)
...@@ -356,7 +369,7 @@ struct lock_class_key { }; ...@@ -356,7 +369,7 @@ struct lock_class_key { };
#ifdef CONFIG_LOCK_STAT #ifdef CONFIG_LOCK_STAT
extern void lock_contended(struct lockdep_map *lock, unsigned long ip); extern void lock_contended(struct lockdep_map *lock, unsigned long ip);
extern void lock_acquired(struct lockdep_map *lock); extern void lock_acquired(struct lockdep_map *lock, unsigned long ip);
#define LOCK_CONTENDED(_lock, try, lock) \ #define LOCK_CONTENDED(_lock, try, lock) \
do { \ do { \
...@@ -364,13 +377,13 @@ do { \ ...@@ -364,13 +377,13 @@ do { \
lock_contended(&(_lock)->dep_map, _RET_IP_); \ lock_contended(&(_lock)->dep_map, _RET_IP_); \
lock(_lock); \ lock(_lock); \
} \ } \
lock_acquired(&(_lock)->dep_map); \ lock_acquired(&(_lock)->dep_map, _RET_IP_); \
} while (0) } while (0)
#else /* CONFIG_LOCK_STAT */ #else /* CONFIG_LOCK_STAT */
#define lock_contended(lockdep_map, ip) do {} while (0) #define lock_contended(lockdep_map, ip) do {} while (0)
#define lock_acquired(lockdep_map) do {} while (0) #define lock_acquired(lockdep_map, ip) do {} while (0)
#define LOCK_CONTENDED(_lock, try, lock) \ #define LOCK_CONTENDED(_lock, try, lock) \
lock(_lock) lock(_lock)
...@@ -481,4 +494,22 @@ static inline void print_irqtrace_events(struct task_struct *curr) ...@@ -481,4 +494,22 @@ static inline void print_irqtrace_events(struct task_struct *curr)
# define lock_map_release(l) do { } while (0) # define lock_map_release(l) do { } while (0)
#endif #endif
#ifdef CONFIG_PROVE_LOCKING
# define might_lock(lock) \
do { \
typecheck(struct lockdep_map *, &(lock)->dep_map); \
lock_acquire(&(lock)->dep_map, 0, 0, 0, 2, NULL, _THIS_IP_); \
lock_release(&(lock)->dep_map, 0, _THIS_IP_); \
} while (0)
# define might_lock_read(lock) \
do { \
typecheck(struct lockdep_map *, &(lock)->dep_map); \
lock_acquire(&(lock)->dep_map, 0, 0, 1, 2, NULL, _THIS_IP_); \
lock_release(&(lock)->dep_map, 0, _THIS_IP_); \
} while (0)
#else
# define might_lock(lock) do { } while (0)
# define might_lock_read(lock) do { } while (0)
#endif
#endif /* __LINUX_LOCKDEP_H */ #endif /* __LINUX_LOCKDEP_H */
...@@ -144,6 +144,8 @@ extern int __must_check mutex_lock_killable(struct mutex *lock); ...@@ -144,6 +144,8 @@ extern int __must_check mutex_lock_killable(struct mutex *lock);
/* /*
* NOTE: mutex_trylock() follows the spin_trylock() convention, * NOTE: mutex_trylock() follows the spin_trylock() convention,
* not the down_trylock() convention! * not the down_trylock() convention!
*
* Returns 1 if the mutex has been acquired successfully, and 0 on contention.
*/ */
extern int mutex_trylock(struct mutex *lock); extern int mutex_trylock(struct mutex *lock);
extern void mutex_unlock(struct mutex *lock); extern void mutex_unlock(struct mutex *lock);
......
...@@ -41,7 +41,7 @@ ...@@ -41,7 +41,7 @@
#include <linux/seqlock.h> #include <linux/seqlock.h>
#ifdef CONFIG_RCU_CPU_STALL_DETECTOR #ifdef CONFIG_RCU_CPU_STALL_DETECTOR
#define RCU_SECONDS_TILL_STALL_CHECK ( 3 * HZ) /* for rcp->jiffies_stall */ #define RCU_SECONDS_TILL_STALL_CHECK (10 * HZ) /* for rcp->jiffies_stall */
#define RCU_SECONDS_TILL_STALL_RECHECK (30 * HZ) /* for rcp->jiffies_stall */ #define RCU_SECONDS_TILL_STALL_RECHECK (30 * HZ) /* for rcp->jiffies_stall */
#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
......
...@@ -52,11 +52,15 @@ struct rcu_head { ...@@ -52,11 +52,15 @@ struct rcu_head {
void (*func)(struct rcu_head *head); void (*func)(struct rcu_head *head);
}; };
#ifdef CONFIG_CLASSIC_RCU #if defined(CONFIG_CLASSIC_RCU)
#include <linux/rcuclassic.h> #include <linux/rcuclassic.h>
#else /* #ifdef CONFIG_CLASSIC_RCU */ #elif defined(CONFIG_TREE_RCU)
#include <linux/rcutree.h>
#elif defined(CONFIG_PREEMPT_RCU)
#include <linux/rcupreempt.h> #include <linux/rcupreempt.h>
#endif /* #else #ifdef CONFIG_CLASSIC_RCU */ #else
#error "Unknown RCU implementation specified to kernel configuration"
#endif /* #else #if defined(CONFIG_CLASSIC_RCU) */
#define RCU_HEAD_INIT { .next = NULL, .func = NULL } #define RCU_HEAD_INIT { .next = NULL, .func = NULL }
#define RCU_HEAD(head) struct rcu_head head = RCU_HEAD_INIT #define RCU_HEAD(head) struct rcu_head head = RCU_HEAD_INIT
......
This diff is collapsed.
...@@ -7,9 +7,31 @@ struct device; ...@@ -7,9 +7,31 @@ struct device;
struct dma_attrs; struct dma_attrs;
struct scatterlist; struct scatterlist;
/*
* Maximum allowable number of contiguous slabs to map,
* must be a power of 2. What is the appropriate value ?
* The complexity of {map,unmap}_single is linearly dependent on this value.
*/
#define IO_TLB_SEGSIZE 128
/*
* log of the size of each IO TLB slab. The number of slabs is command line
* controllable.
*/
#define IO_TLB_SHIFT 11
extern void extern void
swiotlb_init(void); swiotlb_init(void);
extern void *swiotlb_alloc_boot(size_t bytes, unsigned long nslabs);
extern void *swiotlb_alloc(unsigned order, unsigned long nslabs);
extern dma_addr_t swiotlb_phys_to_bus(phys_addr_t address);
extern phys_addr_t swiotlb_bus_to_phys(dma_addr_t address);
extern int swiotlb_arch_range_needs_mapping(void *ptr, size_t size);
extern void extern void
*swiotlb_alloc_coherent(struct device *hwdev, size_t size, *swiotlb_alloc_coherent(struct device *hwdev, size_t size,
dma_addr_t *dma_handle, gfp_t flags); dma_addr_t *dma_handle, gfp_t flags);
......
...@@ -78,7 +78,7 @@ static inline unsigned long __copy_from_user_nocache(void *to, ...@@ -78,7 +78,7 @@ static inline unsigned long __copy_from_user_nocache(void *to,
\ \
set_fs(KERNEL_DS); \ set_fs(KERNEL_DS); \
pagefault_disable(); \ pagefault_disable(); \
ret = __get_user(retval, (__force typeof(retval) __user *)(addr)); \ ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
pagefault_enable(); \ pagefault_enable(); \
set_fs(old_fs); \ set_fs(old_fs); \
ret; \ ret; \
......
...@@ -936,10 +936,90 @@ source "block/Kconfig" ...@@ -936,10 +936,90 @@ source "block/Kconfig"
config PREEMPT_NOTIFIERS config PREEMPT_NOTIFIERS
bool bool
choice
prompt "RCU Implementation"
default CLASSIC_RCU
config CLASSIC_RCU config CLASSIC_RCU
def_bool !PREEMPT_RCU bool "Classic RCU"
help help
This option selects the classic RCU implementation that is This option selects the classic RCU implementation that is
designed for best read-side performance on non-realtime designed for best read-side performance on non-realtime
systems. Classic RCU is the default. Note that the systems.
PREEMPT_RCU symbol is used to select/deselect this option.
Select this option if you are unsure.
config TREE_RCU
bool "Tree-based hierarchical RCU"
help
This option selects the RCU implementation that is
designed for very large SMP system with hundreds or
thousands of CPUs.
config PREEMPT_RCU
bool "Preemptible RCU"
depends on PREEMPT
help
This option reduces the latency of the kernel by making certain
RCU sections preemptible. Normally RCU code is non-preemptible, if
this option is selected then read-only RCU sections become
preemptible. This helps latency, but may expose bugs due to
now-naive assumptions about each RCU read-side critical section
remaining on a given CPU through its execution.
endchoice
config RCU_TRACE
bool "Enable tracing for RCU"
depends on TREE_RCU || PREEMPT_RCU
help
This option provides tracing in RCU which presents stats
in debugfs for debugging RCU implementation.
Say Y here if you want to enable RCU tracing
Say N if you are unsure.
config RCU_FANOUT
int "Tree-based hierarchical RCU fanout value"
range 2 64 if 64BIT
range 2 32 if !64BIT
depends on TREE_RCU
default 64 if 64BIT
default 32 if !64BIT
help
This option controls the fanout of hierarchical implementations
of RCU, allowing RCU to work efficiently on machines with
large numbers of CPUs. This value must be at least the cube
root of NR_CPUS, which allows NR_CPUS up to 32,768 for 32-bit
systems and up to 262,144 for 64-bit systems.
Select a specific number if testing RCU itself.
Take the default if unsure.
config RCU_FANOUT_EXACT
bool "Disable tree-based hierarchical RCU auto-balancing"
depends on TREE_RCU
default n
help
This option forces use of the exact RCU_FANOUT value specified,
regardless of imbalances in the hierarchy. This is useful for
testing RCU itself, and might one day be useful on systems with
strong NUMA behavior.
Without RCU_FANOUT_EXACT, the code will balance the hierarchy.
Say N if unsure.
config TREE_RCU_TRACE
def_bool RCU_TRACE && TREE_RCU
select DEBUG_FS
help
This option provides tracing for the TREE_RCU implementation,
permitting Makefile to trivially select kernel/rcutree_trace.c.
config PREEMPT_RCU_TRACE
def_bool RCU_TRACE && PREEMPT_RCU
select DEBUG_FS
help
This option provides tracing for the PREEMPT_RCU implementation,
permitting Makefile to trivially select kernel/rcupreempt_trace.c.
...@@ -52,28 +52,3 @@ config PREEMPT ...@@ -52,28 +52,3 @@ config PREEMPT
endchoice endchoice
config PREEMPT_RCU
bool "Preemptible RCU"
depends on PREEMPT
default n
help
This option reduces the latency of the kernel by making certain
RCU sections preemptible. Normally RCU code is non-preemptible, if
this option is selected then read-only RCU sections become
preemptible. This helps latency, but may expose bugs due to
now-naive assumptions about each RCU read-side critical section
remaining on a given CPU through its execution.
Say N if you are unsure.
config RCU_TRACE
bool "Enable tracing for RCU - currently stats in debugfs"
depends on PREEMPT_RCU
select DEBUG_FS
default y
help
This option provides tracing in RCU which presents stats
in debugfs for debugging RCU implementation.
Say Y here if you want to enable RCU tracing
Say N if you are unsure.
...@@ -73,10 +73,10 @@ obj-$(CONFIG_GENERIC_HARDIRQS) += irq/ ...@@ -73,10 +73,10 @@ obj-$(CONFIG_GENERIC_HARDIRQS) += irq/
obj-$(CONFIG_SECCOMP) += seccomp.o obj-$(CONFIG_SECCOMP) += seccomp.o
obj-$(CONFIG_RCU_TORTURE_TEST) += rcutorture.o obj-$(CONFIG_RCU_TORTURE_TEST) += rcutorture.o
obj-$(CONFIG_CLASSIC_RCU) += rcuclassic.o obj-$(CONFIG_CLASSIC_RCU) += rcuclassic.o
obj-$(CONFIG_TREE_RCU) += rcutree.o
obj-$(CONFIG_PREEMPT_RCU) += rcupreempt.o obj-$(CONFIG_PREEMPT_RCU) += rcupreempt.o
ifeq ($(CONFIG_PREEMPT_RCU),y) obj-$(CONFIG_TREE_RCU_TRACE) += rcutree_trace.o
obj-$(CONFIG_RCU_TRACE) += rcupreempt_trace.o obj-$(CONFIG_PREEMPT_RCU_TRACE) += rcupreempt_trace.o
endif
obj-$(CONFIG_RELAY) += relay.o obj-$(CONFIG_RELAY) += relay.o
obj-$(CONFIG_SYSCTL) += utsname_sysctl.o obj-$(CONFIG_SYSCTL) += utsname_sysctl.o
obj-$(CONFIG_TASK_DELAY_ACCT) += delayacct.o obj-$(CONFIG_TASK_DELAY_ACCT) += delayacct.o
......
...@@ -1328,10 +1328,10 @@ static int wait_task_zombie(struct task_struct *p, int options, ...@@ -1328,10 +1328,10 @@ static int wait_task_zombie(struct task_struct *p, int options,
* group, which consolidates times for all threads in the * group, which consolidates times for all threads in the
* group including the group leader. * group including the group leader.
*/ */
thread_group_cputime(p, &cputime);
spin_lock_irq(&p->parent->sighand->siglock); spin_lock_irq(&p->parent->sighand->siglock);
psig = p->parent->signal; psig = p->parent->signal;
sig = p->signal; sig = p->signal;
thread_group_cputime(p, &cputime);
psig->cutime = psig->cutime =
cputime_add(psig->cutime, cputime_add(psig->cutime,
cputime_add(cputime.utime, cputime_add(cputime.utime,
......
...@@ -67,3 +67,19 @@ int kernel_text_address(unsigned long addr) ...@@ -67,3 +67,19 @@ int kernel_text_address(unsigned long addr)
return 1; return 1;
return module_text_address(addr) != NULL; return module_text_address(addr) != NULL;
} }
/*
* On some architectures (PPC64, IA64) function pointers
* are actually only tokens to some data that then holds the
* real function address. As a result, to find if a function
* pointer is part of the kernel text, we need to do some
* special dereferencing first.
*/
int func_ptr_is_kernel_text(void *ptr)
{
unsigned long addr;
addr = (unsigned long) dereference_function_descriptor(ptr);
if (core_kernel_text(addr))
return 1;
return module_text_address(addr) != NULL;
}
This diff is collapsed.
...@@ -673,6 +673,18 @@ int request_irq(unsigned int irq, irq_handler_t handler, ...@@ -673,6 +673,18 @@ int request_irq(unsigned int irq, irq_handler_t handler,
struct irq_desc *desc; struct irq_desc *desc;
int retval; int retval;
/*
* handle_IRQ_event() always ignores IRQF_DISABLED except for
* the _first_ irqaction (sigh). That can cause oopsing, but
* the behavior is classified as "will not fix" so we need to
* start nudging drivers away from using that idiom.
*/
if ((irqflags & (IRQF_SHARED|IRQF_DISABLED))
== (IRQF_SHARED|IRQF_DISABLED))
pr_warning("IRQ %d/%s: IRQF_DISABLED is not "
"guaranteed on shared IRQs\n",
irq, devname);
#ifdef CONFIG_LOCKDEP #ifdef CONFIG_LOCKDEP
/* /*
* Lockdep wants atomic interrupt handlers: * Lockdep wants atomic interrupt handlers:
......
...@@ -137,16 +137,16 @@ static inline struct lock_class *hlock_class(struct held_lock *hlock) ...@@ -137,16 +137,16 @@ static inline struct lock_class *hlock_class(struct held_lock *hlock)
#ifdef CONFIG_LOCK_STAT #ifdef CONFIG_LOCK_STAT
static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], lock_stats); static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], lock_stats);
static int lock_contention_point(struct lock_class *class, unsigned long ip) static int lock_point(unsigned long points[], unsigned long ip)
{ {
int i; int i;
for (i = 0; i < ARRAY_SIZE(class->contention_point); i++) { for (i = 0; i < LOCKSTAT_POINTS; i++) {
if (class->contention_point[i] == 0) { if (points[i] == 0) {
class->contention_point[i] = ip; points[i] = ip;
break; break;
} }
if (class->contention_point[i] == ip) if (points[i] == ip)
break; break;
} }
...@@ -186,6 +186,9 @@ struct lock_class_stats lock_stats(struct lock_class *class) ...@@ -186,6 +186,9 @@ struct lock_class_stats lock_stats(struct lock_class *class)
for (i = 0; i < ARRAY_SIZE(stats.contention_point); i++) for (i = 0; i < ARRAY_SIZE(stats.contention_point); i++)
stats.contention_point[i] += pcs->contention_point[i]; stats.contention_point[i] += pcs->contention_point[i];
for (i = 0; i < ARRAY_SIZE(stats.contending_point); i++)
stats.contending_point[i] += pcs->contending_point[i];
lock_time_add(&pcs->read_waittime, &stats.read_waittime); lock_time_add(&pcs->read_waittime, &stats.read_waittime);
lock_time_add(&pcs->write_waittime, &stats.write_waittime); lock_time_add(&pcs->write_waittime, &stats.write_waittime);
...@@ -210,6 +213,7 @@ void clear_lock_stats(struct lock_class *class) ...@@ -210,6 +213,7 @@ void clear_lock_stats(struct lock_class *class)
memset(cpu_stats, 0, sizeof(struct lock_class_stats)); memset(cpu_stats, 0, sizeof(struct lock_class_stats));
} }
memset(class->contention_point, 0, sizeof(class->contention_point)); memset(class->contention_point, 0, sizeof(class->contention_point));
memset(class->contending_point, 0, sizeof(class->contending_point));
} }
static struct lock_class_stats *get_lock_stats(struct lock_class *class) static struct lock_class_stats *get_lock_stats(struct lock_class *class)
...@@ -288,14 +292,12 @@ void lockdep_off(void) ...@@ -288,14 +292,12 @@ void lockdep_off(void)
{ {
current->lockdep_recursion++; current->lockdep_recursion++;
} }
EXPORT_SYMBOL(lockdep_off); EXPORT_SYMBOL(lockdep_off);
void lockdep_on(void) void lockdep_on(void)
{ {
current->lockdep_recursion--; current->lockdep_recursion--;
} }
EXPORT_SYMBOL(lockdep_on); EXPORT_SYMBOL(lockdep_on);
/* /*
...@@ -577,7 +579,8 @@ static void print_lock_class_header(struct lock_class *class, int depth) ...@@ -577,7 +579,8 @@ static void print_lock_class_header(struct lock_class *class, int depth)
/* /*
* printk all lock dependencies starting at <entry>: * printk all lock dependencies starting at <entry>:
*/ */
static void print_lock_dependencies(struct lock_class *class, int depth) static void __used
print_lock_dependencies(struct lock_class *class, int depth)
{ {
struct lock_list *entry; struct lock_list *entry;
...@@ -2509,7 +2512,6 @@ void lockdep_init_map(struct lockdep_map *lock, const char *name, ...@@ -2509,7 +2512,6 @@ void lockdep_init_map(struct lockdep_map *lock, const char *name,
if (subclass) if (subclass)
register_lock_class(lock, subclass, 1); register_lock_class(lock, subclass, 1);
} }
EXPORT_SYMBOL_GPL(lockdep_init_map); EXPORT_SYMBOL_GPL(lockdep_init_map);
/* /*
...@@ -2690,8 +2692,9 @@ static int check_unlock(struct task_struct *curr, struct lockdep_map *lock, ...@@ -2690,8 +2692,9 @@ static int check_unlock(struct task_struct *curr, struct lockdep_map *lock,
} }
static int static int
__lock_set_subclass(struct lockdep_map *lock, __lock_set_class(struct lockdep_map *lock, const char *name,
unsigned int subclass, unsigned long ip) struct lock_class_key *key, unsigned int subclass,
unsigned long ip)
{ {
struct task_struct *curr = current; struct task_struct *curr = current;
struct held_lock *hlock, *prev_hlock; struct held_lock *hlock, *prev_hlock;
...@@ -2718,6 +2721,7 @@ __lock_set_subclass(struct lockdep_map *lock, ...@@ -2718,6 +2721,7 @@ __lock_set_subclass(struct lockdep_map *lock,
return print_unlock_inbalance_bug(curr, lock, ip); return print_unlock_inbalance_bug(curr, lock, ip);
found_it: found_it:
lockdep_init_map(lock, name, key, 0);
class = register_lock_class(lock, subclass, 0); class = register_lock_class(lock, subclass, 0);
hlock->class_idx = class - lock_classes + 1; hlock->class_idx = class - lock_classes + 1;
...@@ -2902,9 +2906,9 @@ static void check_flags(unsigned long flags) ...@@ -2902,9 +2906,9 @@ static void check_flags(unsigned long flags)
#endif #endif
} }
void void lock_set_class(struct lockdep_map *lock, const char *name,
lock_set_subclass(struct lockdep_map *lock, struct lock_class_key *key, unsigned int subclass,
unsigned int subclass, unsigned long ip) unsigned long ip)
{ {
unsigned long flags; unsigned long flags;
...@@ -2914,13 +2918,12 @@ lock_set_subclass(struct lockdep_map *lock, ...@@ -2914,13 +2918,12 @@ lock_set_subclass(struct lockdep_map *lock,
raw_local_irq_save(flags); raw_local_irq_save(flags);
current->lockdep_recursion = 1; current->lockdep_recursion = 1;
check_flags(flags); check_flags(flags);
if (__lock_set_subclass(lock, subclass, ip)) if (__lock_set_class(lock, name, key, subclass, ip))
check_chain_key(current); check_chain_key(current);
current->lockdep_recursion = 0; current->lockdep_recursion = 0;
raw_local_irq_restore(flags); raw_local_irq_restore(flags);
} }
EXPORT_SYMBOL_GPL(lock_set_class);
EXPORT_SYMBOL_GPL(lock_set_subclass);
/* /*
* We are not always called with irqs disabled - do that here, * We are not always called with irqs disabled - do that here,
...@@ -2944,7 +2947,6 @@ void lock_acquire(struct lockdep_map *lock, unsigned int subclass, ...@@ -2944,7 +2947,6 @@ void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
current->lockdep_recursion = 0; current->lockdep_recursion = 0;
raw_local_irq_restore(flags); raw_local_irq_restore(flags);
} }
EXPORT_SYMBOL_GPL(lock_acquire); EXPORT_SYMBOL_GPL(lock_acquire);
void lock_release(struct lockdep_map *lock, int nested, void lock_release(struct lockdep_map *lock, int nested,
...@@ -2962,7 +2964,6 @@ void lock_release(struct lockdep_map *lock, int nested, ...@@ -2962,7 +2964,6 @@ void lock_release(struct lockdep_map *lock, int nested,
current->lockdep_recursion = 0; current->lockdep_recursion = 0;
raw_local_irq_restore(flags); raw_local_irq_restore(flags);
} }
EXPORT_SYMBOL_GPL(lock_release); EXPORT_SYMBOL_GPL(lock_release);
#ifdef CONFIG_LOCK_STAT #ifdef CONFIG_LOCK_STAT
...@@ -3000,7 +3001,7 @@ __lock_contended(struct lockdep_map *lock, unsigned long ip) ...@@ -3000,7 +3001,7 @@ __lock_contended(struct lockdep_map *lock, unsigned long ip)
struct held_lock *hlock, *prev_hlock; struct held_lock *hlock, *prev_hlock;
struct lock_class_stats *stats; struct lock_class_stats *stats;
unsigned int depth; unsigned int depth;
int i, point; int i, contention_point, contending_point;
depth = curr->lockdep_depth; depth = curr->lockdep_depth;
if (DEBUG_LOCKS_WARN_ON(!depth)) if (DEBUG_LOCKS_WARN_ON(!depth))
...@@ -3024,18 +3025,22 @@ __lock_contended(struct lockdep_map *lock, unsigned long ip) ...@@ -3024,18 +3025,22 @@ __lock_contended(struct lockdep_map *lock, unsigned long ip)
found_it: found_it:
hlock->waittime_stamp = sched_clock(); hlock->waittime_stamp = sched_clock();
point = lock_contention_point(hlock_class(hlock), ip); contention_point = lock_point(hlock_class(hlock)->contention_point, ip);
contending_point = lock_point(hlock_class(hlock)->contending_point,
lock->ip);
stats = get_lock_stats(hlock_class(hlock)); stats = get_lock_stats(hlock_class(hlock));
if (point < ARRAY_SIZE(stats->contention_point)) if (contention_point < LOCKSTAT_POINTS)
stats->contention_point[point]++; stats->contention_point[contention_point]++;
if (contending_point < LOCKSTAT_POINTS)
stats->contending_point[contending_point]++;
if (lock->cpu != smp_processor_id()) if (lock->cpu != smp_processor_id())
stats->bounces[bounce_contended + !!hlock->read]++; stats->bounces[bounce_contended + !!hlock->read]++;
put_lock_stats(stats); put_lock_stats(stats);
} }
static void static void
__lock_acquired(struct lockdep_map *lock) __lock_acquired(struct lockdep_map *lock, unsigned long ip)
{ {
struct task_struct *curr = current; struct task_struct *curr = current;
struct held_lock *hlock, *prev_hlock; struct held_lock *hlock, *prev_hlock;
...@@ -3084,6 +3089,7 @@ __lock_acquired(struct lockdep_map *lock) ...@@ -3084,6 +3089,7 @@ __lock_acquired(struct lockdep_map *lock)
put_lock_stats(stats); put_lock_stats(stats);
lock->cpu = cpu; lock->cpu = cpu;
lock->ip = ip;
} }
void lock_contended(struct lockdep_map *lock, unsigned long ip) void lock_contended(struct lockdep_map *lock, unsigned long ip)
...@@ -3105,7 +3111,7 @@ void lock_contended(struct lockdep_map *lock, unsigned long ip) ...@@ -3105,7 +3111,7 @@ void lock_contended(struct lockdep_map *lock, unsigned long ip)
} }
EXPORT_SYMBOL_GPL(lock_contended); EXPORT_SYMBOL_GPL(lock_contended);
void lock_acquired(struct lockdep_map *lock) void lock_acquired(struct lockdep_map *lock, unsigned long ip)
{ {
unsigned long flags; unsigned long flags;
...@@ -3118,7 +3124,7 @@ void lock_acquired(struct lockdep_map *lock) ...@@ -3118,7 +3124,7 @@ void lock_acquired(struct lockdep_map *lock)
raw_local_irq_save(flags); raw_local_irq_save(flags);
check_flags(flags); check_flags(flags);
current->lockdep_recursion = 1; current->lockdep_recursion = 1;
__lock_acquired(lock); __lock_acquired(lock, ip);
current->lockdep_recursion = 0; current->lockdep_recursion = 0;
raw_local_irq_restore(flags); raw_local_irq_restore(flags);
} }
...@@ -3442,7 +3448,6 @@ void debug_show_all_locks(void) ...@@ -3442,7 +3448,6 @@ void debug_show_all_locks(void)
if (unlock) if (unlock)
read_unlock(&tasklist_lock); read_unlock(&tasklist_lock);
} }
EXPORT_SYMBOL_GPL(debug_show_all_locks); EXPORT_SYMBOL_GPL(debug_show_all_locks);
/* /*
...@@ -3463,7 +3468,6 @@ void debug_show_held_locks(struct task_struct *task) ...@@ -3463,7 +3468,6 @@ void debug_show_held_locks(struct task_struct *task)
{ {
__debug_show_held_locks(task); __debug_show_held_locks(task);
} }
EXPORT_SYMBOL_GPL(debug_show_held_locks); EXPORT_SYMBOL_GPL(debug_show_held_locks);
void lockdep_sys_exit(void) void lockdep_sys_exit(void)
......
...@@ -470,11 +470,12 @@ static void seq_line(struct seq_file *m, char c, int offset, int length) ...@@ -470,11 +470,12 @@ static void seq_line(struct seq_file *m, char c, int offset, int length)
static void snprint_time(char *buf, size_t bufsiz, s64 nr) static void snprint_time(char *buf, size_t bufsiz, s64 nr)
{ {
unsigned long rem; s64 div;
s32 rem;
nr += 5; /* for display rounding */ nr += 5; /* for display rounding */
rem = do_div(nr, 1000); /* XXX: do_div_signed */ div = div_s64_rem(nr, 1000, &rem);
snprintf(buf, bufsiz, "%lld.%02d", (long long)nr, (int)rem/10); snprintf(buf, bufsiz, "%lld.%02d", (long long)div, (int)rem/10);
} }
static void seq_time(struct seq_file *m, s64 time) static void seq_time(struct seq_file *m, s64 time)
...@@ -556,7 +557,7 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data) ...@@ -556,7 +557,7 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
if (stats->read_holdtime.nr) if (stats->read_holdtime.nr)
namelen += 2; namelen += 2;
for (i = 0; i < ARRAY_SIZE(class->contention_point); i++) { for (i = 0; i < LOCKSTAT_POINTS; i++) {
char sym[KSYM_SYMBOL_LEN]; char sym[KSYM_SYMBOL_LEN];
char ip[32]; char ip[32];
...@@ -573,6 +574,23 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data) ...@@ -573,6 +574,23 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
stats->contention_point[i], stats->contention_point[i],
ip, sym); ip, sym);
} }
for (i = 0; i < LOCKSTAT_POINTS; i++) {
char sym[KSYM_SYMBOL_LEN];
char ip[32];
if (class->contending_point[i] == 0)
break;
if (!i)
seq_line(m, '-', 40-namelen, namelen);
sprint_symbol(sym, class->contending_point[i]);
snprintf(ip, sizeof(ip), "[<%p>]",
(void *)class->contending_point[i]);
seq_printf(m, "%40s %14lu %29s %s\n", name,
stats->contending_point[i],
ip, sym);
}
if (i) { if (i) {
seq_puts(m, "\n"); seq_puts(m, "\n");
seq_line(m, '.', 0, 40 + 1 + 10 * (14 + 1)); seq_line(m, '.', 0, 40 + 1 + 10 * (14 + 1));
...@@ -582,7 +600,7 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data) ...@@ -582,7 +600,7 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
static void seq_header(struct seq_file *m) static void seq_header(struct seq_file *m)
{ {
seq_printf(m, "lock_stat version 0.2\n"); seq_printf(m, "lock_stat version 0.3\n");
seq_line(m, '-', 0, 40 + 1 + 10 * (14 + 1)); seq_line(m, '-', 0, 40 + 1 + 10 * (14 + 1));
seq_printf(m, "%40s %14s %14s %14s %14s %14s %14s %14s %14s " seq_printf(m, "%40s %14s %14s %14s %14s %14s %14s %14s %14s "
"%14s %14s\n", "%14s %14s\n",
......
...@@ -59,7 +59,7 @@ EXPORT_SYMBOL(__mutex_init); ...@@ -59,7 +59,7 @@ EXPORT_SYMBOL(__mutex_init);
* We also put the fastpath first in the kernel image, to make sure the * We also put the fastpath first in the kernel image, to make sure the
* branch is predicted by the CPU as default-untaken. * branch is predicted by the CPU as default-untaken.
*/ */
static void noinline __sched static __used noinline void __sched
__mutex_lock_slowpath(atomic_t *lock_count); __mutex_lock_slowpath(atomic_t *lock_count);
/*** /***
...@@ -96,7 +96,7 @@ void inline __sched mutex_lock(struct mutex *lock) ...@@ -96,7 +96,7 @@ void inline __sched mutex_lock(struct mutex *lock)
EXPORT_SYMBOL(mutex_lock); EXPORT_SYMBOL(mutex_lock);
#endif #endif
static noinline void __sched __mutex_unlock_slowpath(atomic_t *lock_count); static __used noinline void __sched __mutex_unlock_slowpath(atomic_t *lock_count);
/*** /***
* mutex_unlock - release the mutex * mutex_unlock - release the mutex
...@@ -184,7 +184,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, ...@@ -184,7 +184,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
} }
done: done:
lock_acquired(&lock->dep_map); lock_acquired(&lock->dep_map, ip);
/* got the lock - rejoice! */ /* got the lock - rejoice! */
mutex_remove_waiter(lock, &waiter, task_thread_info(task)); mutex_remove_waiter(lock, &waiter, task_thread_info(task));
debug_mutex_set_owner(lock, task_thread_info(task)); debug_mutex_set_owner(lock, task_thread_info(task));
...@@ -268,7 +268,7 @@ __mutex_unlock_common_slowpath(atomic_t *lock_count, int nested) ...@@ -268,7 +268,7 @@ __mutex_unlock_common_slowpath(atomic_t *lock_count, int nested)
/* /*
* Release the lock, slowpath: * Release the lock, slowpath:
*/ */
static noinline void static __used noinline void
__mutex_unlock_slowpath(atomic_t *lock_count) __mutex_unlock_slowpath(atomic_t *lock_count)
{ {
__mutex_unlock_common_slowpath(lock_count, 1); __mutex_unlock_common_slowpath(lock_count, 1);
...@@ -313,7 +313,7 @@ int __sched mutex_lock_killable(struct mutex *lock) ...@@ -313,7 +313,7 @@ int __sched mutex_lock_killable(struct mutex *lock)
} }
EXPORT_SYMBOL(mutex_lock_killable); EXPORT_SYMBOL(mutex_lock_killable);
static noinline void __sched static __used noinline void __sched
__mutex_lock_slowpath(atomic_t *lock_count) __mutex_lock_slowpath(atomic_t *lock_count)
{ {
struct mutex *lock = container_of(lock_count, struct mutex, count); struct mutex *lock = container_of(lock_count, struct mutex, count);
......
...@@ -82,6 +82,14 @@ static int __kprobes notifier_call_chain(struct notifier_block **nl, ...@@ -82,6 +82,14 @@ static int __kprobes notifier_call_chain(struct notifier_block **nl,
while (nb && nr_to_call) { while (nb && nr_to_call) {
next_nb = rcu_dereference(nb->next); next_nb = rcu_dereference(nb->next);
#ifdef CONFIG_DEBUG_NOTIFIERS
if (unlikely(!func_ptr_is_kernel_text(nb->notifier_call))) {
WARN(1, "Invalid notifier called!");
nb = next_nb;
continue;
}
#endif
ret = nb->notifier_call(nb, val, v); ret = nb->notifier_call(nb, val, v);
if (nr_calls) if (nr_calls)
......
...@@ -21,6 +21,7 @@ ...@@ -21,6 +21,7 @@
#include <linux/debug_locks.h> #include <linux/debug_locks.h>
#include <linux/random.h> #include <linux/random.h>
#include <linux/kallsyms.h> #include <linux/kallsyms.h>
#include <linux/dmi.h>
int panic_on_oops; int panic_on_oops;
static unsigned long tainted_mask; static unsigned long tainted_mask;
...@@ -321,36 +322,27 @@ void oops_exit(void) ...@@ -321,36 +322,27 @@ void oops_exit(void)
} }
#ifdef WANT_WARN_ON_SLOWPATH #ifdef WANT_WARN_ON_SLOWPATH
void warn_on_slowpath(const char *file, int line)
{
char function[KSYM_SYMBOL_LEN];
unsigned long caller = (unsigned long) __builtin_return_address(0);
sprint_symbol(function, caller);
printk(KERN_WARNING "------------[ cut here ]------------\n");
printk(KERN_WARNING "WARNING: at %s:%d %s()\n", file,
line, function);
print_modules();
dump_stack();
print_oops_end_marker();
add_taint(TAINT_WARN);
}
EXPORT_SYMBOL(warn_on_slowpath);
void warn_slowpath(const char *file, int line, const char *fmt, ...) void warn_slowpath(const char *file, int line, const char *fmt, ...)
{ {
va_list args; va_list args;
char function[KSYM_SYMBOL_LEN]; char function[KSYM_SYMBOL_LEN];
unsigned long caller = (unsigned long)__builtin_return_address(0); unsigned long caller = (unsigned long)__builtin_return_address(0);
const char *board;
sprint_symbol(function, caller); sprint_symbol(function, caller);
printk(KERN_WARNING "------------[ cut here ]------------\n"); printk(KERN_WARNING "------------[ cut here ]------------\n");
printk(KERN_WARNING "WARNING: at %s:%d %s()\n", file, printk(KERN_WARNING "WARNING: at %s:%d %s()\n", file,
line, function); line, function);
va_start(args, fmt); board = dmi_get_system_info(DMI_PRODUCT_NAME);
vprintk(fmt, args); if (board)
va_end(args); printk(KERN_WARNING "Hardware name: %s\n", board);
if (fmt) {
va_start(args, fmt);
vprintk(fmt, args);
va_end(args);
}
print_modules(); print_modules();
dump_stack(); dump_stack();
......
...@@ -58,21 +58,21 @@ void thread_group_cputime( ...@@ -58,21 +58,21 @@ void thread_group_cputime(
struct task_struct *tsk, struct task_struct *tsk,
struct task_cputime *times) struct task_cputime *times)
{ {
struct signal_struct *sig; struct task_cputime *totals, *tot;
int i; int i;
struct task_cputime *tot;
sig = tsk->signal; totals = tsk->signal->cputime.totals;
if (unlikely(!sig) || !sig->cputime.totals) { if (!totals) {
times->utime = tsk->utime; times->utime = tsk->utime;
times->stime = tsk->stime; times->stime = tsk->stime;
times->sum_exec_runtime = tsk->se.sum_exec_runtime; times->sum_exec_runtime = tsk->se.sum_exec_runtime;
return; return;
} }
times->stime = times->utime = cputime_zero; times->stime = times->utime = cputime_zero;
times->sum_exec_runtime = 0; times->sum_exec_runtime = 0;
for_each_possible_cpu(i) { for_each_possible_cpu(i) {
tot = per_cpu_ptr(tsk->signal->cputime.totals, i); tot = per_cpu_ptr(totals, i);
times->utime = cputime_add(times->utime, tot->utime); times->utime = cputime_add(times->utime, tot->utime);
times->stime = cputime_add(times->stime, tot->stime); times->stime = cputime_add(times->stime, tot->stime);
times->sum_exec_runtime += tot->sum_exec_runtime; times->sum_exec_runtime += tot->sum_exec_runtime;
......
...@@ -662,7 +662,7 @@ asmlinkage int vprintk(const char *fmt, va_list args) ...@@ -662,7 +662,7 @@ asmlinkage int vprintk(const char *fmt, va_list args)
if (recursion_bug) { if (recursion_bug) {
recursion_bug = 0; recursion_bug = 0;
strcpy(printk_buf, recursion_bug_msg); strcpy(printk_buf, recursion_bug_msg);
printed_len = sizeof(recursion_bug_msg); printed_len = strlen(recursion_bug_msg);
} }
/* Emit the output into the temporary buffer */ /* Emit the output into the temporary buffer */
printed_len += vscnprintf(printk_buf + printed_len, printed_len += vscnprintf(printk_buf + printed_len,
......
...@@ -191,7 +191,7 @@ static void print_other_cpu_stall(struct rcu_ctrlblk *rcp) ...@@ -191,7 +191,7 @@ static void print_other_cpu_stall(struct rcu_ctrlblk *rcp)
/* OK, time to rat on our buddy... */ /* OK, time to rat on our buddy... */
printk(KERN_ERR "RCU detected CPU stalls:"); printk(KERN_ERR "INFO: RCU detected CPU stalls:");
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
if (cpu_isset(cpu, rcp->cpumask)) if (cpu_isset(cpu, rcp->cpumask))
printk(" %d", cpu); printk(" %d", cpu);
...@@ -204,7 +204,7 @@ static void print_cpu_stall(struct rcu_ctrlblk *rcp) ...@@ -204,7 +204,7 @@ static void print_cpu_stall(struct rcu_ctrlblk *rcp)
{ {
unsigned long flags; unsigned long flags;
printk(KERN_ERR "RCU detected CPU %d stall (t=%lu/%lu jiffies)\n", printk(KERN_ERR "INFO: RCU detected CPU %d stall (t=%lu/%lu jiffies)\n",
smp_processor_id(), jiffies, smp_processor_id(), jiffies,
jiffies - rcp->gp_start); jiffies - rcp->gp_start);
dump_stack(); dump_stack();
......
...@@ -551,6 +551,16 @@ void rcu_irq_exit(void) ...@@ -551,6 +551,16 @@ void rcu_irq_exit(void)
} }
} }
void rcu_nmi_enter(void)
{
rcu_irq_enter();
}
void rcu_nmi_exit(void)
{
rcu_irq_exit();
}
static void dyntick_save_progress_counter(int cpu) static void dyntick_save_progress_counter(int cpu)
{ {
struct rcu_dyntick_sched *rdssp = &per_cpu(rcu_dyntick_sched, cpu); struct rcu_dyntick_sched *rdssp = &per_cpu(rcu_dyntick_sched, cpu);
......
...@@ -149,12 +149,12 @@ static void rcupreempt_trace_sum(struct rcupreempt_trace *sp) ...@@ -149,12 +149,12 @@ static void rcupreempt_trace_sum(struct rcupreempt_trace *sp)
sp->done_length += cp->done_length; sp->done_length += cp->done_length;
sp->done_add += cp->done_add; sp->done_add += cp->done_add;
sp->done_remove += cp->done_remove; sp->done_remove += cp->done_remove;
atomic_set(&sp->done_invoked, atomic_read(&cp->done_invoked)); atomic_add(atomic_read(&cp->done_invoked), &sp->done_invoked);
sp->rcu_check_callbacks += cp->rcu_check_callbacks; sp->rcu_check_callbacks += cp->rcu_check_callbacks;
atomic_set(&sp->rcu_try_flip_1, atomic_add(atomic_read(&cp->rcu_try_flip_1),
atomic_read(&cp->rcu_try_flip_1)); &sp->rcu_try_flip_1);
atomic_set(&sp->rcu_try_flip_e1, atomic_add(atomic_read(&cp->rcu_try_flip_e1),
atomic_read(&cp->rcu_try_flip_e1)); &sp->rcu_try_flip_e1);
sp->rcu_try_flip_i1 += cp->rcu_try_flip_i1; sp->rcu_try_flip_i1 += cp->rcu_try_flip_i1;
sp->rcu_try_flip_ie1 += cp->rcu_try_flip_ie1; sp->rcu_try_flip_ie1 += cp->rcu_try_flip_ie1;
sp->rcu_try_flip_g1 += cp->rcu_try_flip_g1; sp->rcu_try_flip_g1 += cp->rcu_try_flip_g1;
......
...@@ -39,6 +39,7 @@ ...@@ -39,6 +39,7 @@
#include <linux/moduleparam.h> #include <linux/moduleparam.h>
#include <linux/percpu.h> #include <linux/percpu.h>
#include <linux/notifier.h> #include <linux/notifier.h>
#include <linux/reboot.h>
#include <linux/freezer.h> #include <linux/freezer.h>
#include <linux/cpu.h> #include <linux/cpu.h>
#include <linux/delay.h> #include <linux/delay.h>
...@@ -108,7 +109,6 @@ struct rcu_torture { ...@@ -108,7 +109,6 @@ struct rcu_torture {
int rtort_mbtest; int rtort_mbtest;
}; };
static int fullstop = 0; /* stop generating callbacks at test end. */
static LIST_HEAD(rcu_torture_freelist); static LIST_HEAD(rcu_torture_freelist);
static struct rcu_torture *rcu_torture_current = NULL; static struct rcu_torture *rcu_torture_current = NULL;
static long rcu_torture_current_version = 0; static long rcu_torture_current_version = 0;
...@@ -136,6 +136,30 @@ static int stutter_pause_test = 0; ...@@ -136,6 +136,30 @@ static int stutter_pause_test = 0;
#endif #endif
int rcutorture_runnable = RCUTORTURE_RUNNABLE_INIT; int rcutorture_runnable = RCUTORTURE_RUNNABLE_INIT;
#define FULLSTOP_SIGNALED 1 /* Bail due to signal. */
#define FULLSTOP_CLEANUP 2 /* Orderly shutdown. */
static int fullstop; /* stop generating callbacks at test end. */
DEFINE_MUTEX(fullstop_mutex); /* protect fullstop transitions and */
/* spawning of kthreads. */
/*
* Detect and respond to a signal-based shutdown.
*/
static int
rcutorture_shutdown_notify(struct notifier_block *unused1,
unsigned long unused2, void *unused3)
{
if (fullstop)
return NOTIFY_DONE;
if (signal_pending(current)) {
mutex_lock(&fullstop_mutex);
if (!ACCESS_ONCE(fullstop))
fullstop = FULLSTOP_SIGNALED;
mutex_unlock(&fullstop_mutex);
}
return NOTIFY_DONE;
}
/* /*
* Allocate an element from the rcu_tortures pool. * Allocate an element from the rcu_tortures pool.
*/ */
...@@ -199,11 +223,12 @@ rcu_random(struct rcu_random_state *rrsp) ...@@ -199,11 +223,12 @@ rcu_random(struct rcu_random_state *rrsp)
static void static void
rcu_stutter_wait(void) rcu_stutter_wait(void)
{ {
while (stutter_pause_test || !rcutorture_runnable) while ((stutter_pause_test || !rcutorture_runnable) && !fullstop) {
if (rcutorture_runnable) if (rcutorture_runnable)
schedule_timeout_interruptible(1); schedule_timeout_interruptible(1);
else else
schedule_timeout_interruptible(round_jiffies_relative(HZ)); schedule_timeout_interruptible(round_jiffies_relative(HZ));
}
} }
/* /*
...@@ -599,7 +624,7 @@ rcu_torture_writer(void *arg) ...@@ -599,7 +624,7 @@ rcu_torture_writer(void *arg)
rcu_stutter_wait(); rcu_stutter_wait();
} while (!kthread_should_stop() && !fullstop); } while (!kthread_should_stop() && !fullstop);
VERBOSE_PRINTK_STRING("rcu_torture_writer task stopping"); VERBOSE_PRINTK_STRING("rcu_torture_writer task stopping");
while (!kthread_should_stop()) while (!kthread_should_stop() && fullstop != FULLSTOP_SIGNALED)
schedule_timeout_uninterruptible(1); schedule_timeout_uninterruptible(1);
return 0; return 0;
} }
...@@ -624,7 +649,7 @@ rcu_torture_fakewriter(void *arg) ...@@ -624,7 +649,7 @@ rcu_torture_fakewriter(void *arg)
} while (!kthread_should_stop() && !fullstop); } while (!kthread_should_stop() && !fullstop);
VERBOSE_PRINTK_STRING("rcu_torture_fakewriter task stopping"); VERBOSE_PRINTK_STRING("rcu_torture_fakewriter task stopping");
while (!kthread_should_stop()) while (!kthread_should_stop() && fullstop != FULLSTOP_SIGNALED)
schedule_timeout_uninterruptible(1); schedule_timeout_uninterruptible(1);
return 0; return 0;
} }
...@@ -734,7 +759,7 @@ rcu_torture_reader(void *arg) ...@@ -734,7 +759,7 @@ rcu_torture_reader(void *arg)
VERBOSE_PRINTK_STRING("rcu_torture_reader task stopping"); VERBOSE_PRINTK_STRING("rcu_torture_reader task stopping");
if (irqreader && cur_ops->irqcapable) if (irqreader && cur_ops->irqcapable)
del_timer_sync(&t); del_timer_sync(&t);
while (!kthread_should_stop()) while (!kthread_should_stop() && fullstop != FULLSTOP_SIGNALED)
schedule_timeout_uninterruptible(1); schedule_timeout_uninterruptible(1);
return 0; return 0;
} }
...@@ -831,7 +856,7 @@ rcu_torture_stats(void *arg) ...@@ -831,7 +856,7 @@ rcu_torture_stats(void *arg)
do { do {
schedule_timeout_interruptible(stat_interval * HZ); schedule_timeout_interruptible(stat_interval * HZ);
rcu_torture_stats_print(); rcu_torture_stats_print();
} while (!kthread_should_stop()); } while (!kthread_should_stop() && !fullstop);
VERBOSE_PRINTK_STRING("rcu_torture_stats task stopping"); VERBOSE_PRINTK_STRING("rcu_torture_stats task stopping");
return 0; return 0;
} }
...@@ -899,7 +924,7 @@ rcu_torture_shuffle(void *arg) ...@@ -899,7 +924,7 @@ rcu_torture_shuffle(void *arg)
do { do {
schedule_timeout_interruptible(shuffle_interval * HZ); schedule_timeout_interruptible(shuffle_interval * HZ);
rcu_torture_shuffle_tasks(); rcu_torture_shuffle_tasks();
} while (!kthread_should_stop()); } while (!kthread_should_stop() && !fullstop);
VERBOSE_PRINTK_STRING("rcu_torture_shuffle task stopping"); VERBOSE_PRINTK_STRING("rcu_torture_shuffle task stopping");
return 0; return 0;
} }
...@@ -914,10 +939,10 @@ rcu_torture_stutter(void *arg) ...@@ -914,10 +939,10 @@ rcu_torture_stutter(void *arg)
do { do {
schedule_timeout_interruptible(stutter * HZ); schedule_timeout_interruptible(stutter * HZ);
stutter_pause_test = 1; stutter_pause_test = 1;
if (!kthread_should_stop()) if (!kthread_should_stop() && !fullstop)
schedule_timeout_interruptible(stutter * HZ); schedule_timeout_interruptible(stutter * HZ);
stutter_pause_test = 0; stutter_pause_test = 0;
} while (!kthread_should_stop()); } while (!kthread_should_stop() && !fullstop);
VERBOSE_PRINTK_STRING("rcu_torture_stutter task stopping"); VERBOSE_PRINTK_STRING("rcu_torture_stutter task stopping");
return 0; return 0;
} }
...@@ -934,12 +959,27 @@ rcu_torture_print_module_parms(char *tag) ...@@ -934,12 +959,27 @@ rcu_torture_print_module_parms(char *tag)
stutter, irqreader); stutter, irqreader);
} }
static struct notifier_block rcutorture_nb = {
.notifier_call = rcutorture_shutdown_notify,
};
static void static void
rcu_torture_cleanup(void) rcu_torture_cleanup(void)
{ {
int i; int i;
fullstop = 1; mutex_lock(&fullstop_mutex);
if (!fullstop) {
/* If being signaled, let it happen, then exit. */
mutex_unlock(&fullstop_mutex);
schedule_timeout_interruptible(10 * HZ);
if (cur_ops->cb_barrier != NULL)
cur_ops->cb_barrier();
return;
}
fullstop = FULLSTOP_CLEANUP;
mutex_unlock(&fullstop_mutex);
unregister_reboot_notifier(&rcutorture_nb);
if (stutter_task) { if (stutter_task) {
VERBOSE_PRINTK_STRING("Stopping rcu_torture_stutter task"); VERBOSE_PRINTK_STRING("Stopping rcu_torture_stutter task");
kthread_stop(stutter_task); kthread_stop(stutter_task);
...@@ -1015,6 +1055,8 @@ rcu_torture_init(void) ...@@ -1015,6 +1055,8 @@ rcu_torture_init(void)
{ &rcu_ops, &rcu_sync_ops, &rcu_bh_ops, &rcu_bh_sync_ops, { &rcu_ops, &rcu_sync_ops, &rcu_bh_ops, &rcu_bh_sync_ops,
&srcu_ops, &sched_ops, &sched_ops_sync, }; &srcu_ops, &sched_ops, &sched_ops_sync, };
mutex_lock(&fullstop_mutex);
/* Process args and tell the world that the torturer is on the job. */ /* Process args and tell the world that the torturer is on the job. */
for (i = 0; i < ARRAY_SIZE(torture_ops); i++) { for (i = 0; i < ARRAY_SIZE(torture_ops); i++) {
cur_ops = torture_ops[i]; cur_ops = torture_ops[i];
...@@ -1024,6 +1066,7 @@ rcu_torture_init(void) ...@@ -1024,6 +1066,7 @@ rcu_torture_init(void)
if (i == ARRAY_SIZE(torture_ops)) { if (i == ARRAY_SIZE(torture_ops)) {
printk(KERN_ALERT "rcutorture: invalid torture type: \"%s\"\n", printk(KERN_ALERT "rcutorture: invalid torture type: \"%s\"\n",
torture_type); torture_type);
mutex_unlock(&fullstop_mutex);
return (-EINVAL); return (-EINVAL);
} }
if (cur_ops->init) if (cur_ops->init)
...@@ -1146,9 +1189,12 @@ rcu_torture_init(void) ...@@ -1146,9 +1189,12 @@ rcu_torture_init(void)
goto unwind; goto unwind;
} }
} }
register_reboot_notifier(&rcutorture_nb);
mutex_unlock(&fullstop_mutex);
return 0; return 0;
unwind: unwind:
mutex_unlock(&fullstop_mutex);
rcu_torture_cleanup(); rcu_torture_cleanup();
return firsterr; return firsterr;
} }
......
This diff is collapsed.
/*
* Read-Copy Update tracing for classic implementation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
* Copyright IBM Corporation, 2008
*
* Papers: http://www.rdrop.com/users/paulmck/RCU
*
* For detailed explanation of Read-Copy Update mechanism see -
* Documentation/RCU
*
*/
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/smp.h>
#include <linux/rcupdate.h>
#include <linux/interrupt.h>
#include <linux/sched.h>
#include <asm/atomic.h>
#include <linux/bitops.h>
#include <linux/module.h>
#include <linux/completion.h>
#include <linux/moduleparam.h>
#include <linux/percpu.h>
#include <linux/notifier.h>
#include <linux/cpu.h>
#include <linux/mutex.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
{
if (!rdp->beenonline)
return;
seq_printf(m, "%3d%cc=%ld g=%ld pq=%d pqc=%ld qp=%d rpfq=%ld rp=%x",
rdp->cpu,
cpu_is_offline(rdp->cpu) ? '!' : ' ',
rdp->completed, rdp->gpnum,
rdp->passed_quiesc, rdp->passed_quiesc_completed,
rdp->qs_pending,
rdp->n_rcu_pending_force_qs - rdp->n_rcu_pending,
(int)(rdp->n_rcu_pending & 0xffff));
#ifdef CONFIG_NO_HZ
seq_printf(m, " dt=%d/%d dn=%d df=%lu",
rdp->dynticks->dynticks,
rdp->dynticks->dynticks_nesting,
rdp->dynticks->dynticks_nmi,
rdp->dynticks_fqs);
#endif /* #ifdef CONFIG_NO_HZ */
seq_printf(m, " of=%lu ri=%lu", rdp->offline_fqs, rdp->resched_ipi);
seq_printf(m, " ql=%ld b=%ld\n", rdp->qlen, rdp->blimit);
}
#define PRINT_RCU_DATA(name, func, m) \
do { \
int _p_r_d_i; \
\
for_each_possible_cpu(_p_r_d_i) \
func(m, &per_cpu(name, _p_r_d_i)); \
} while (0)
static int show_rcudata(struct seq_file *m, void *unused)
{
seq_puts(m, "rcu:\n");
PRINT_RCU_DATA(rcu_data, print_one_rcu_data, m);
seq_puts(m, "rcu_bh:\n");
PRINT_RCU_DATA(rcu_bh_data, print_one_rcu_data, m);
return 0;
}
static int rcudata_open(struct inode *inode, struct file *file)
{
return single_open(file, show_rcudata, NULL);
}
static struct file_operations rcudata_fops = {
.owner = THIS_MODULE,
.open = rcudata_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static void print_one_rcu_data_csv(struct seq_file *m, struct rcu_data *rdp)
{
if (!rdp->beenonline)
return;
seq_printf(m, "%d,%s,%ld,%ld,%d,%ld,%d,%ld,%ld",
rdp->cpu,
cpu_is_offline(rdp->cpu) ? "\"Y\"" : "\"N\"",
rdp->completed, rdp->gpnum,
rdp->passed_quiesc, rdp->passed_quiesc_completed,
rdp->qs_pending,
rdp->n_rcu_pending_force_qs - rdp->n_rcu_pending,
rdp->n_rcu_pending);
#ifdef CONFIG_NO_HZ
seq_printf(m, ",%d,%d,%d,%lu",
rdp->dynticks->dynticks,
rdp->dynticks->dynticks_nesting,
rdp->dynticks->dynticks_nmi,
rdp->dynticks_fqs);
#endif /* #ifdef CONFIG_NO_HZ */
seq_printf(m, ",%lu,%lu", rdp->offline_fqs, rdp->resched_ipi);
seq_printf(m, ",%ld,%ld\n", rdp->qlen, rdp->blimit);
}
static int show_rcudata_csv(struct seq_file *m, void *unused)
{
seq_puts(m, "\"CPU\",\"Online?\",\"c\",\"g\",\"pq\",\"pqc\",\"pq\",\"rpfq\",\"rp\",");
#ifdef CONFIG_NO_HZ
seq_puts(m, "\"dt\",\"dt nesting\",\"dn\",\"df\",");
#endif /* #ifdef CONFIG_NO_HZ */
seq_puts(m, "\"of\",\"ri\",\"ql\",\"b\"\n");
seq_puts(m, "\"rcu:\"\n");
PRINT_RCU_DATA(rcu_data, print_one_rcu_data_csv, m);
seq_puts(m, "\"rcu_bh:\"\n");
PRINT_RCU_DATA(rcu_bh_data, print_one_rcu_data_csv, m);
return 0;
}
static int rcudata_csv_open(struct inode *inode, struct file *file)
{
return single_open(file, show_rcudata_csv, NULL);
}
static struct file_operations rcudata_csv_fops = {
.owner = THIS_MODULE,
.open = rcudata_csv_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static void print_one_rcu_state(struct seq_file *m, struct rcu_state *rsp)
{
int level = 0;
struct rcu_node *rnp;
seq_printf(m, "c=%ld g=%ld s=%d jfq=%ld j=%x "
"nfqs=%lu/nfqsng=%lu(%lu) fqlh=%lu\n",
rsp->completed, rsp->gpnum, rsp->signaled,
(long)(rsp->jiffies_force_qs - jiffies),
(int)(jiffies & 0xffff),
rsp->n_force_qs, rsp->n_force_qs_ngp,
rsp->n_force_qs - rsp->n_force_qs_ngp,
rsp->n_force_qs_lh);
for (rnp = &rsp->node[0]; rnp - &rsp->node[0] < NUM_RCU_NODES; rnp++) {
if (rnp->level != level) {
seq_puts(m, "\n");
level = rnp->level;
}
seq_printf(m, "%lx/%lx %d:%d ^%d ",
rnp->qsmask, rnp->qsmaskinit,
rnp->grplo, rnp->grphi, rnp->grpnum);
}
seq_puts(m, "\n");
}
static int show_rcuhier(struct seq_file *m, void *unused)
{
seq_puts(m, "rcu:\n");
print_one_rcu_state(m, &rcu_state);
seq_puts(m, "rcu_bh:\n");
print_one_rcu_state(m, &rcu_bh_state);
return 0;
}
static int rcuhier_open(struct inode *inode, struct file *file)
{
return single_open(file, show_rcuhier, NULL);
}
static struct file_operations rcuhier_fops = {
.owner = THIS_MODULE,
.open = rcuhier_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static int show_rcugp(struct seq_file *m, void *unused)
{
seq_printf(m, "rcu: completed=%ld gpnum=%ld\n",
rcu_state.completed, rcu_state.gpnum);
seq_printf(m, "rcu_bh: completed=%ld gpnum=%ld\n",
rcu_bh_state.completed, rcu_bh_state.gpnum);
return 0;
}
static int rcugp_open(struct inode *inode, struct file *file)
{
return single_open(file, show_rcugp, NULL);
}
static struct file_operations rcugp_fops = {
.owner = THIS_MODULE,
.open = rcugp_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static struct dentry *rcudir, *datadir, *datadir_csv, *hierdir, *gpdir;
static int __init rcuclassic_trace_init(void)
{
rcudir = debugfs_create_dir("rcu", NULL);
if (!rcudir)
goto out;
datadir = debugfs_create_file("rcudata", 0444, rcudir,
NULL, &rcudata_fops);
if (!datadir)
goto free_out;
datadir_csv = debugfs_create_file("rcudata.csv", 0444, rcudir,
NULL, &rcudata_csv_fops);
if (!datadir_csv)
goto free_out;
gpdir = debugfs_create_file("rcugp", 0444, rcudir, NULL, &rcugp_fops);
if (!gpdir)
goto free_out;
hierdir = debugfs_create_file("rcuhier", 0444, rcudir,
NULL, &rcuhier_fops);
if (!hierdir)
goto free_out;
return 0;
free_out:
if (datadir)
debugfs_remove(datadir);
if (datadir_csv)
debugfs_remove(datadir_csv);
if (gpdir)
debugfs_remove(gpdir);
debugfs_remove(rcudir);
out:
return 1;
}
static void __exit rcuclassic_trace_cleanup(void)
{
debugfs_remove(datadir);
debugfs_remove(datadir_csv);
debugfs_remove(gpdir);
debugfs_remove(hierdir);
debugfs_remove(rcudir);
}
module_init(rcuclassic_trace_init);
module_exit(rcuclassic_trace_cleanup);
MODULE_AUTHOR("Paul E. McKenney");
MODULE_DESCRIPTION("Read-Copy Update tracing for hierarchical implementation");
MODULE_LICENSE("GPL");
...@@ -853,6 +853,15 @@ int iomem_map_sanity_check(resource_size_t addr, unsigned long size) ...@@ -853,6 +853,15 @@ int iomem_map_sanity_check(resource_size_t addr, unsigned long size)
if (PFN_DOWN(p->start) <= PFN_DOWN(addr) && if (PFN_DOWN(p->start) <= PFN_DOWN(addr) &&
PFN_DOWN(p->end) >= PFN_DOWN(addr + size - 1)) PFN_DOWN(p->end) >= PFN_DOWN(addr + size - 1))
continue; continue;
/*
* if a resource is "BUSY", it's not a hardware resource
* but a driver mapping of such a resource; we don't want
* to warn for those; some drivers legitimately map only
* partial hardware resources. (example: vesafb)
*/
if (p->flags & IORESOURCE_BUSY)
continue;
printk(KERN_WARNING "resource map sanity check conflict: " printk(KERN_WARNING "resource map sanity check conflict: "
"0x%llx 0x%llx 0x%llx 0x%llx %s\n", "0x%llx 0x%llx 0x%llx 0x%llx %s\n",
(unsigned long long)addr, (unsigned long long)addr,
......
...@@ -4192,7 +4192,6 @@ void account_steal_time(struct task_struct *p, cputime_t steal) ...@@ -4192,7 +4192,6 @@ void account_steal_time(struct task_struct *p, cputime_t steal)
if (p == rq->idle) { if (p == rq->idle) {
p->stime = cputime_add(p->stime, steal); p->stime = cputime_add(p->stime, steal);
account_group_system_time(p, steal);
if (atomic_read(&rq->nr_iowait) > 0) if (atomic_read(&rq->nr_iowait) > 0)
cpustat->iowait = cputime64_add(cpustat->iowait, tmp); cpustat->iowait = cputime64_add(cpustat->iowait, tmp);
else else
...@@ -4328,7 +4327,7 @@ void __kprobes sub_preempt_count(int val) ...@@ -4328,7 +4327,7 @@ void __kprobes sub_preempt_count(int val)
/* /*
* Underflow? * Underflow?
*/ */
if (DEBUG_LOCKS_WARN_ON(val > preempt_count())) if (DEBUG_LOCKS_WARN_ON(val > preempt_count() - (!!kernel_locked())))
return; return;
/* /*
* Is the spinlock portion underflowing? * Is the spinlock portion underflowing?
......
...@@ -102,20 +102,6 @@ void local_bh_disable(void) ...@@ -102,20 +102,6 @@ void local_bh_disable(void)
EXPORT_SYMBOL(local_bh_disable); EXPORT_SYMBOL(local_bh_disable);
void __local_bh_enable(void)
{
WARN_ON_ONCE(in_irq());
/*
* softirqs should never be enabled by __local_bh_enable(),
* it always nests inside local_bh_enable() sections:
*/
WARN_ON_ONCE(softirq_count() == SOFTIRQ_OFFSET);
sub_preempt_count(SOFTIRQ_OFFSET);
}
EXPORT_SYMBOL_GPL(__local_bh_enable);
/* /*
* Special-case - softirqs can safely be enabled in * Special-case - softirqs can safely be enabled in
* cond_resched_softirq(), or by __do_softirq(), * cond_resched_softirq(), or by __do_softirq(),
...@@ -269,6 +255,7 @@ void irq_enter(void) ...@@ -269,6 +255,7 @@ void irq_enter(void)
{ {
int cpu = smp_processor_id(); int cpu = smp_processor_id();
rcu_irq_enter();
if (idle_cpu(cpu) && !in_interrupt()) { if (idle_cpu(cpu) && !in_interrupt()) {
__irq_enter(); __irq_enter();
tick_check_idle(cpu); tick_check_idle(cpu);
...@@ -295,9 +282,9 @@ void irq_exit(void) ...@@ -295,9 +282,9 @@ void irq_exit(void)
#ifdef CONFIG_NO_HZ #ifdef CONFIG_NO_HZ
/* Make sure that timer wheel updates are propagated */ /* Make sure that timer wheel updates are propagated */
if (!in_interrupt() && idle_cpu(smp_processor_id()) && !need_resched())
tick_nohz_stop_sched_tick(0);
rcu_irq_exit(); rcu_irq_exit();
if (idle_cpu(smp_processor_id()) && !in_interrupt() && !need_resched())
tick_nohz_stop_sched_tick(0);
#endif #endif
preempt_enable_no_resched(); preempt_enable_no_resched();
} }
......
...@@ -164,7 +164,7 @@ unsigned long __read_mostly sysctl_hung_task_check_count = 1024; ...@@ -164,7 +164,7 @@ unsigned long __read_mostly sysctl_hung_task_check_count = 1024;
/* /*
* Zero means infinite timeout - no checking done: * Zero means infinite timeout - no checking done:
*/ */
unsigned long __read_mostly sysctl_hung_task_timeout_secs = 120; unsigned long __read_mostly sysctl_hung_task_timeout_secs = 480;
unsigned long __read_mostly sysctl_hung_task_warnings = 10; unsigned long __read_mostly sysctl_hung_task_warnings = 10;
......
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
* Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
*/ */
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/kallsyms.h> #include <linux/kallsyms.h>
#include <linux/stacktrace.h> #include <linux/stacktrace.h>
...@@ -24,3 +25,13 @@ void print_stack_trace(struct stack_trace *trace, int spaces) ...@@ -24,3 +25,13 @@ void print_stack_trace(struct stack_trace *trace, int spaces)
} }
EXPORT_SYMBOL_GPL(print_stack_trace); EXPORT_SYMBOL_GPL(print_stack_trace);
/*
* Architectures that do not implement save_stack_trace_tsk get this
* weak alias and a once-per-bootup warning (whenever this facility
* is utilized - for example by procfs):
*/
__weak void
save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
{
WARN_ONCE(1, KERN_INFO "save_stack_trace_tsk() not implemented yet.\n");
}
...@@ -907,8 +907,8 @@ void do_sys_times(struct tms *tms) ...@@ -907,8 +907,8 @@ void do_sys_times(struct tms *tms)
struct task_cputime cputime; struct task_cputime cputime;
cputime_t cutime, cstime; cputime_t cutime, cstime;
spin_lock_irq(&current->sighand->siglock);
thread_group_cputime(current, &cputime); thread_group_cputime(current, &cputime);
spin_lock_irq(&current->sighand->siglock);
cutime = current->signal->cutime; cutime = current->signal->cutime;
cstime = current->signal->cstime; cstime = current->signal->cstime;
spin_unlock_irq(&current->sighand->siglock); spin_unlock_irq(&current->sighand->siglock);
......
...@@ -252,6 +252,14 @@ config DEBUG_OBJECTS_TIMERS ...@@ -252,6 +252,14 @@ config DEBUG_OBJECTS_TIMERS
timer routines to track the life time of timer objects and timer routines to track the life time of timer objects and
validate the timer operations. validate the timer operations.
config DEBUG_OBJECTS_ENABLE_DEFAULT
int "debug_objects bootup default value (0-1)"
range 0 1
default "1"
depends on DEBUG_OBJECTS
help
Debug objects boot parameter default value
config DEBUG_SLAB config DEBUG_SLAB
bool "Debug slab memory allocations" bool "Debug slab memory allocations"
depends on DEBUG_KERNEL && SLAB depends on DEBUG_KERNEL && SLAB
...@@ -545,6 +553,16 @@ config DEBUG_SG ...@@ -545,6 +553,16 @@ config DEBUG_SG
If unsure, say N. If unsure, say N.
config DEBUG_NOTIFIERS
bool "Debug notifier call chains"
depends on DEBUG_KERNEL
help
Enable this to turn on sanity checking for notifier call chains.
This is most useful for kernel developers to make sure that
modules properly unregister themselves from notifier chains.
This is a relatively cheap check but if you care about maximum
performance, say N.
config FRAME_POINTER config FRAME_POINTER
bool "Compile the kernel with frame pointers" bool "Compile the kernel with frame pointers"
depends on DEBUG_KERNEL && \ depends on DEBUG_KERNEL && \
...@@ -619,6 +637,19 @@ config RCU_CPU_STALL_DETECTOR ...@@ -619,6 +637,19 @@ config RCU_CPU_STALL_DETECTOR
Say N if you are unsure. Say N if you are unsure.
config RCU_CPU_STALL_DETECTOR
bool "Check for stalled CPUs delaying RCU grace periods"
depends on CLASSIC_RCU || TREE_RCU
default n
help
This option causes RCU to printk information on which
CPUs are delaying the current grace period, but only when
the grace period extends for excessive time periods.
Say Y if you want RCU to perform such checks.
Say N if you are unsure.
config KPROBES_SANITY_TEST config KPROBES_SANITY_TEST
bool "Kprobes sanity tests" bool "Kprobes sanity tests"
depends on DEBUG_KERNEL depends on DEBUG_KERNEL
......
...@@ -45,7 +45,9 @@ static struct kmem_cache *obj_cache; ...@@ -45,7 +45,9 @@ static struct kmem_cache *obj_cache;
static int debug_objects_maxchain __read_mostly; static int debug_objects_maxchain __read_mostly;
static int debug_objects_fixups __read_mostly; static int debug_objects_fixups __read_mostly;
static int debug_objects_warnings __read_mostly; static int debug_objects_warnings __read_mostly;
static int debug_objects_enabled __read_mostly; static int debug_objects_enabled __read_mostly
= CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT;
static struct debug_obj_descr *descr_test __read_mostly; static struct debug_obj_descr *descr_test __read_mostly;
static int __init enable_object_debug(char *str) static int __init enable_object_debug(char *str)
......
This diff is collapsed.
...@@ -3075,3 +3075,18 @@ void print_vma_addr(char *prefix, unsigned long ip) ...@@ -3075,3 +3075,18 @@ void print_vma_addr(char *prefix, unsigned long ip)
} }
up_read(&current->mm->mmap_sem); up_read(&current->mm->mmap_sem);
} }
#ifdef CONFIG_PROVE_LOCKING
void might_fault(void)
{
might_sleep();
/*
* it would be nicer only to annotate paths which are not under
* pagefault_disable, however that requires a larger audit and
* providing helpers like get_user_atomic.
*/
if (!in_atomic() && current->mm)
might_lock_read(&current->mm->mmap_sem);
}
EXPORT_SYMBOL(might_fault);
#endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment