Commit 8d1a2408 authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc

Pull sparc fixes from David Miller:

 1) With modern networking cards we can run out of 32-bit DMA space, so
    support 64-bit DMA addressing when possible on sparc64. From Dave
    Tushar.

 2) Some signal frame validation checks are inverted on sparc32, fix
    from Andreas Larsson.

 3) Lockdep tables can get too large in some circumstances on sparc64,
    add a way to adjust the size a bit. From Babu Moger.

 4) Fix NUMA node probing on some sun4v systems, from Thomas Tai.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc:
  sparc: drop duplicate header scatterlist.h
  lockdep: Limit static allocations if PROVE_LOCKING_SMALL is defined
  config: Adding the new config parameter CONFIG_PROVE_LOCKING_SMALL for sparc
  sunbmac: Fix compiler warning
  sunqe: Fix compiler warnings
  sparc64: Enable 64-bit DMA
  sparc64: Enable sun4v dma ops to use IOMMU v2 APIs
  sparc64: Bind PCIe devices to use IOMMU v2 service
  sparc64: Initialize iommu_map_table and iommu_pool
  sparc64: Add ATU (new IOMMU) support
  sparc64: Add FORCE_MAX_ZONEORDER and default to 13
  sparc64: fix compile warning section mismatch in find_node()
  sparc32: Fix inverted invalid_frame_pointer checks on sigreturns
  sparc64: Fix find_node warning if numa node cannot be found
parents 27e7ab99 9dd35d68
...@@ -43,6 +43,7 @@ config SPARC ...@@ -43,6 +43,7 @@ config SPARC
select ARCH_HAS_SG_CHAIN select ARCH_HAS_SG_CHAIN
select CPU_NO_EFFICIENT_FFS select CPU_NO_EFFICIENT_FFS
select HAVE_ARCH_HARDENED_USERCOPY select HAVE_ARCH_HARDENED_USERCOPY
select PROVE_LOCKING_SMALL if PROVE_LOCKING
config SPARC32 config SPARC32
def_bool !64BIT def_bool !64BIT
...@@ -89,6 +90,14 @@ config ARCH_DEFCONFIG ...@@ -89,6 +90,14 @@ config ARCH_DEFCONFIG
config ARCH_PROC_KCORE_TEXT config ARCH_PROC_KCORE_TEXT
def_bool y def_bool y
config ARCH_ATU
bool
default y if SPARC64
config ARCH_DMA_ADDR_T_64BIT
bool
default y if ARCH_ATU
config IOMMU_HELPER config IOMMU_HELPER
bool bool
default y if SPARC64 default y if SPARC64
...@@ -304,6 +313,20 @@ config ARCH_SPARSEMEM_ENABLE ...@@ -304,6 +313,20 @@ config ARCH_SPARSEMEM_ENABLE
config ARCH_SPARSEMEM_DEFAULT config ARCH_SPARSEMEM_DEFAULT
def_bool y if SPARC64 def_bool y if SPARC64
config FORCE_MAX_ZONEORDER
int "Maximum zone order"
default "13"
help
The kernel memory allocator divides physically contiguous memory
blocks into "zones", where each zone is a power of two number of
pages. This option selects the largest power of two that the kernel
keeps in the memory allocator. If you need to allocate very large
blocks of physically contiguous memory, then you may need to
increase this value.
This config option is actually maximum order plus one. For example,
a value of 13 means that the largest free memory block is 2^12 pages.
source "mm/Kconfig" source "mm/Kconfig"
if SPARC64 if SPARC64
......
This diff is collapsed.
...@@ -24,8 +24,36 @@ struct iommu_arena { ...@@ -24,8 +24,36 @@ struct iommu_arena {
unsigned int limit; unsigned int limit;
}; };
#define ATU_64_SPACE_SIZE 0x800000000 /* 32G */
/* Data structures for SPARC ATU architecture */
struct atu_iotsb {
void *table; /* IOTSB table base virtual addr*/
u64 ra; /* IOTSB table real addr */
u64 dvma_size; /* ranges[3].size or OS slected 32G size */
u64 dvma_base; /* ranges[3].base */
u64 table_size; /* IOTSB table size */
u64 page_size; /* IO PAGE size for IOTSB */
u32 iotsb_num; /* tsbnum is same as iotsb_handle */
};
struct atu_ranges {
u64 base;
u64 size;
};
struct atu {
struct atu_ranges *ranges;
struct atu_iotsb *iotsb;
struct iommu_map_table tbl;
u64 base;
u64 size;
u64 dma_addr_mask;
};
struct iommu { struct iommu {
struct iommu_map_table tbl; struct iommu_map_table tbl;
struct atu *atu;
spinlock_t lock; spinlock_t lock;
u32 dma_addr_mask; u32 dma_addr_mask;
iopte_t *page_table; iopte_t *page_table;
......
...@@ -39,6 +39,7 @@ static struct api_info api_table[] = { ...@@ -39,6 +39,7 @@ static struct api_info api_table[] = {
{ .group = HV_GRP_SDIO, }, { .group = HV_GRP_SDIO, },
{ .group = HV_GRP_SDIO_ERR, }, { .group = HV_GRP_SDIO_ERR, },
{ .group = HV_GRP_REBOOT_DATA, }, { .group = HV_GRP_REBOOT_DATA, },
{ .group = HV_GRP_ATU, .flags = FLAG_PRE_API },
{ .group = HV_GRP_NIAG_PERF, .flags = FLAG_PRE_API }, { .group = HV_GRP_NIAG_PERF, .flags = FLAG_PRE_API },
{ .group = HV_GRP_FIRE_PERF, }, { .group = HV_GRP_FIRE_PERF, },
{ .group = HV_GRP_N2_CPU, }, { .group = HV_GRP_N2_CPU, },
......
...@@ -760,8 +760,12 @@ int dma_supported(struct device *dev, u64 device_mask) ...@@ -760,8 +760,12 @@ int dma_supported(struct device *dev, u64 device_mask)
struct iommu *iommu = dev->archdata.iommu; struct iommu *iommu = dev->archdata.iommu;
u64 dma_addr_mask = iommu->dma_addr_mask; u64 dma_addr_mask = iommu->dma_addr_mask;
if (device_mask >= (1UL << 32UL)) if (device_mask > DMA_BIT_MASK(32)) {
return 0; if (iommu->atu)
dma_addr_mask = iommu->atu->dma_addr_mask;
else
return 0;
}
if ((device_mask & dma_addr_mask) == dma_addr_mask) if ((device_mask & dma_addr_mask) == dma_addr_mask)
return 1; return 1;
......
...@@ -13,7 +13,6 @@ ...@@ -13,7 +13,6 @@
#include <linux/scatterlist.h> #include <linux/scatterlist.h>
#include <linux/device.h> #include <linux/device.h>
#include <linux/iommu-helper.h> #include <linux/iommu-helper.h>
#include <linux/scatterlist.h>
#include <asm/iommu.h> #include <asm/iommu.h>
......
This diff is collapsed.
...@@ -89,4 +89,25 @@ unsigned long pci_sun4v_msg_setvalid(unsigned long devhandle, ...@@ -89,4 +89,25 @@ unsigned long pci_sun4v_msg_setvalid(unsigned long devhandle,
unsigned long msinum, unsigned long msinum,
unsigned long valid); unsigned long valid);
/* Sun4v HV IOMMU v2 APIs */
unsigned long pci_sun4v_iotsb_conf(unsigned long devhandle,
unsigned long ra,
unsigned long table_size,
unsigned long page_size,
unsigned long dvma_base,
u64 *iotsb_num);
unsigned long pci_sun4v_iotsb_bind(unsigned long devhandle,
unsigned long iotsb_num,
unsigned int pci_device);
unsigned long pci_sun4v_iotsb_map(unsigned long devhandle,
unsigned long iotsb_num,
unsigned long iotsb_index_iottes,
unsigned long io_attributes,
unsigned long io_page_list_pa,
long *mapped);
unsigned long pci_sun4v_iotsb_demap(unsigned long devhandle,
unsigned long iotsb_num,
unsigned long iotsb_index,
unsigned long iottes,
unsigned long *demapped);
#endif /* !(_PCI_SUN4V_H) */ #endif /* !(_PCI_SUN4V_H) */
...@@ -360,3 +360,71 @@ ENTRY(pci_sun4v_msg_setvalid) ...@@ -360,3 +360,71 @@ ENTRY(pci_sun4v_msg_setvalid)
mov %o0, %o0 mov %o0, %o0
ENDPROC(pci_sun4v_msg_setvalid) ENDPROC(pci_sun4v_msg_setvalid)
/*
* %o0: devhandle
* %o1: r_addr
* %o2: size
* %o3: pagesize
* %o4: virt
* %o5: &iotsb_num/&iotsb_handle
*
* returns %o0: status
* %o1: iotsb_num/iotsb_handle
*/
ENTRY(pci_sun4v_iotsb_conf)
mov %o5, %g1
mov HV_FAST_PCI_IOTSB_CONF, %o5
ta HV_FAST_TRAP
retl
stx %o1, [%g1]
ENDPROC(pci_sun4v_iotsb_conf)
/*
* %o0: devhandle
* %o1: iotsb_num/iotsb_handle
* %o2: pci_device
*
* returns %o0: status
*/
ENTRY(pci_sun4v_iotsb_bind)
mov HV_FAST_PCI_IOTSB_BIND, %o5
ta HV_FAST_TRAP
retl
nop
ENDPROC(pci_sun4v_iotsb_bind)
/*
* %o0: devhandle
* %o1: iotsb_num/iotsb_handle
* %o2: index_count
* %o3: iotte_attributes
* %o4: io_page_list_p
* %o5: &mapped
*
* returns %o0: status
* %o1: #mapped
*/
ENTRY(pci_sun4v_iotsb_map)
mov %o5, %g1
mov HV_FAST_PCI_IOTSB_MAP, %o5
ta HV_FAST_TRAP
retl
stx %o1, [%g1]
ENDPROC(pci_sun4v_iotsb_map)
/*
* %o0: devhandle
* %o1: iotsb_num/iotsb_handle
* %o2: iotsb_index
* %o3: #iottes
* %o4: &demapped
*
* returns %o0: status
* %o1: #demapped
*/
ENTRY(pci_sun4v_iotsb_demap)
mov HV_FAST_PCI_IOTSB_DEMAP, %o5
ta HV_FAST_TRAP
retl
stx %o1, [%o4]
ENDPROC(pci_sun4v_iotsb_demap)
...@@ -89,7 +89,7 @@ asmlinkage void do_sigreturn(struct pt_regs *regs) ...@@ -89,7 +89,7 @@ asmlinkage void do_sigreturn(struct pt_regs *regs)
sf = (struct signal_frame __user *) regs->u_regs[UREG_FP]; sf = (struct signal_frame __user *) regs->u_regs[UREG_FP];
/* 1. Make sure we are not getting garbage from the user */ /* 1. Make sure we are not getting garbage from the user */
if (!invalid_frame_pointer(sf, sizeof(*sf))) if (invalid_frame_pointer(sf, sizeof(*sf)))
goto segv_and_exit; goto segv_and_exit;
if (get_user(ufp, &sf->info.si_regs.u_regs[UREG_FP])) if (get_user(ufp, &sf->info.si_regs.u_regs[UREG_FP]))
...@@ -150,7 +150,7 @@ asmlinkage void do_rt_sigreturn(struct pt_regs *regs) ...@@ -150,7 +150,7 @@ asmlinkage void do_rt_sigreturn(struct pt_regs *regs)
synchronize_user_stack(); synchronize_user_stack();
sf = (struct rt_signal_frame __user *) regs->u_regs[UREG_FP]; sf = (struct rt_signal_frame __user *) regs->u_regs[UREG_FP];
if (!invalid_frame_pointer(sf, sizeof(*sf))) if (invalid_frame_pointer(sf, sizeof(*sf)))
goto segv; goto segv;
if (get_user(ufp, &sf->regs.u_regs[UREG_FP])) if (get_user(ufp, &sf->regs.u_regs[UREG_FP]))
......
...@@ -802,8 +802,10 @@ struct mdesc_mblock { ...@@ -802,8 +802,10 @@ struct mdesc_mblock {
}; };
static struct mdesc_mblock *mblocks; static struct mdesc_mblock *mblocks;
static int num_mblocks; static int num_mblocks;
static int find_numa_node_for_addr(unsigned long pa,
struct node_mem_mask *pnode_mask);
static unsigned long ra_to_pa(unsigned long addr) static unsigned long __init ra_to_pa(unsigned long addr)
{ {
int i; int i;
...@@ -819,8 +821,11 @@ static unsigned long ra_to_pa(unsigned long addr) ...@@ -819,8 +821,11 @@ static unsigned long ra_to_pa(unsigned long addr)
return addr; return addr;
} }
static int find_node(unsigned long addr) static int __init find_node(unsigned long addr)
{ {
static bool search_mdesc = true;
static struct node_mem_mask last_mem_mask = { ~0UL, ~0UL };
static int last_index;
int i; int i;
addr = ra_to_pa(addr); addr = ra_to_pa(addr);
...@@ -830,13 +835,30 @@ static int find_node(unsigned long addr) ...@@ -830,13 +835,30 @@ static int find_node(unsigned long addr)
if ((addr & p->mask) == p->val) if ((addr & p->mask) == p->val)
return i; return i;
} }
/* The following condition has been observed on LDOM guests.*/ /* The following condition has been observed on LDOM guests because
WARN_ONCE(1, "find_node: A physical address doesn't match a NUMA node" * node_masks only contains the best latency mask and value.
" rule. Some physical memory will be owned by node 0."); * LDOM guest's mdesc can contain a single latency group to
return 0; * cover multiple address range. Print warning message only if the
* address cannot be found in node_masks nor mdesc.
*/
if ((search_mdesc) &&
((addr & last_mem_mask.mask) != last_mem_mask.val)) {
/* find the available node in the mdesc */
last_index = find_numa_node_for_addr(addr, &last_mem_mask);
numadbg("find_node: latency group for address 0x%lx is %d\n",
addr, last_index);
if ((last_index < 0) || (last_index >= num_node_masks)) {
/* WARN_ONCE() and use default group 0 */
WARN_ONCE(1, "find_node: A physical address doesn't match a NUMA node rule. Some physical memory will be owned by node 0.");
search_mdesc = false;
last_index = 0;
}
}
return last_index;
} }
static u64 memblock_nid_range(u64 start, u64 end, int *nid) static u64 __init memblock_nid_range(u64 start, u64 end, int *nid)
{ {
*nid = find_node(start); *nid = find_node(start);
start += PAGE_SIZE; start += PAGE_SIZE;
...@@ -1160,6 +1182,41 @@ int __node_distance(int from, int to) ...@@ -1160,6 +1182,41 @@ int __node_distance(int from, int to)
return numa_latency[from][to]; return numa_latency[from][to];
} }
static int find_numa_node_for_addr(unsigned long pa,
struct node_mem_mask *pnode_mask)
{
struct mdesc_handle *md = mdesc_grab();
u64 node, arc;
int i = 0;
node = mdesc_node_by_name(md, MDESC_NODE_NULL, "latency-groups");
if (node == MDESC_NODE_NULL)
goto out;
mdesc_for_each_node_by_name(md, node, "group") {
mdesc_for_each_arc(arc, md, node, MDESC_ARC_TYPE_FWD) {
u64 target = mdesc_arc_target(md, arc);
struct mdesc_mlgroup *m = find_mlgroup(target);
if (!m)
continue;
if ((pa & m->mask) == m->match) {
if (pnode_mask) {
pnode_mask->mask = m->mask;
pnode_mask->val = m->match;
}
mdesc_release(md);
return i;
}
}
i++;
}
out:
mdesc_release(md);
return -1;
}
static int __init find_best_numa_node_for_mlgroup(struct mdesc_mlgroup *grp) static int __init find_best_numa_node_for_mlgroup(struct mdesc_mlgroup *grp)
{ {
int i; int i;
......
...@@ -623,6 +623,7 @@ static int bigmac_init_hw(struct bigmac *bp, int from_irq) ...@@ -623,6 +623,7 @@ static int bigmac_init_hw(struct bigmac *bp, int from_irq)
void __iomem *gregs = bp->gregs; void __iomem *gregs = bp->gregs;
void __iomem *cregs = bp->creg; void __iomem *cregs = bp->creg;
void __iomem *bregs = bp->bregs; void __iomem *bregs = bp->bregs;
__u32 bblk_dvma = (__u32)bp->bblock_dvma;
unsigned char *e = &bp->dev->dev_addr[0]; unsigned char *e = &bp->dev->dev_addr[0];
/* Latch current counters into statistics. */ /* Latch current counters into statistics. */
...@@ -671,9 +672,9 @@ static int bigmac_init_hw(struct bigmac *bp, int from_irq) ...@@ -671,9 +672,9 @@ static int bigmac_init_hw(struct bigmac *bp, int from_irq)
bregs + BMAC_XIFCFG); bregs + BMAC_XIFCFG);
/* Tell the QEC where the ring descriptors are. */ /* Tell the QEC where the ring descriptors are. */
sbus_writel(bp->bblock_dvma + bib_offset(be_rxd, 0), sbus_writel(bblk_dvma + bib_offset(be_rxd, 0),
cregs + CREG_RXDS); cregs + CREG_RXDS);
sbus_writel(bp->bblock_dvma + bib_offset(be_txd, 0), sbus_writel(bblk_dvma + bib_offset(be_txd, 0),
cregs + CREG_TXDS); cregs + CREG_TXDS);
/* Setup the FIFO pointers into QEC local memory. */ /* Setup the FIFO pointers into QEC local memory. */
......
...@@ -291,7 +291,7 @@ struct bigmac { ...@@ -291,7 +291,7 @@ struct bigmac {
void __iomem *bregs; /* BigMAC Registers */ void __iomem *bregs; /* BigMAC Registers */
void __iomem *tregs; /* BigMAC Transceiver */ void __iomem *tregs; /* BigMAC Transceiver */
struct bmac_init_block *bmac_block; /* RX and TX descriptors */ struct bmac_init_block *bmac_block; /* RX and TX descriptors */
__u32 bblock_dvma; /* RX and TX descriptors */ dma_addr_t bblock_dvma; /* RX and TX descriptors */
spinlock_t lock; spinlock_t lock;
......
...@@ -124,7 +124,7 @@ static void qe_init_rings(struct sunqe *qep) ...@@ -124,7 +124,7 @@ static void qe_init_rings(struct sunqe *qep)
{ {
struct qe_init_block *qb = qep->qe_block; struct qe_init_block *qb = qep->qe_block;
struct sunqe_buffers *qbufs = qep->buffers; struct sunqe_buffers *qbufs = qep->buffers;
__u32 qbufs_dvma = qep->buffers_dvma; __u32 qbufs_dvma = (__u32)qep->buffers_dvma;
int i; int i;
qep->rx_new = qep->rx_old = qep->tx_new = qep->tx_old = 0; qep->rx_new = qep->rx_old = qep->tx_new = qep->tx_old = 0;
...@@ -144,6 +144,7 @@ static int qe_init(struct sunqe *qep, int from_irq) ...@@ -144,6 +144,7 @@ static int qe_init(struct sunqe *qep, int from_irq)
void __iomem *mregs = qep->mregs; void __iomem *mregs = qep->mregs;
void __iomem *gregs = qecp->gregs; void __iomem *gregs = qecp->gregs;
unsigned char *e = &qep->dev->dev_addr[0]; unsigned char *e = &qep->dev->dev_addr[0];
__u32 qblk_dvma = (__u32)qep->qblock_dvma;
u32 tmp; u32 tmp;
int i; int i;
...@@ -152,8 +153,8 @@ static int qe_init(struct sunqe *qep, int from_irq) ...@@ -152,8 +153,8 @@ static int qe_init(struct sunqe *qep, int from_irq)
return -EAGAIN; return -EAGAIN;
/* Setup initial rx/tx init block pointers. */ /* Setup initial rx/tx init block pointers. */
sbus_writel(qep->qblock_dvma + qib_offset(qe_rxd, 0), cregs + CREG_RXDS); sbus_writel(qblk_dvma + qib_offset(qe_rxd, 0), cregs + CREG_RXDS);
sbus_writel(qep->qblock_dvma + qib_offset(qe_txd, 0), cregs + CREG_TXDS); sbus_writel(qblk_dvma + qib_offset(qe_txd, 0), cregs + CREG_TXDS);
/* Enable/mask the various irq's. */ /* Enable/mask the various irq's. */
sbus_writel(0, cregs + CREG_RIMASK); sbus_writel(0, cregs + CREG_RIMASK);
...@@ -413,7 +414,7 @@ static void qe_rx(struct sunqe *qep) ...@@ -413,7 +414,7 @@ static void qe_rx(struct sunqe *qep)
struct net_device *dev = qep->dev; struct net_device *dev = qep->dev;
struct qe_rxd *this; struct qe_rxd *this;
struct sunqe_buffers *qbufs = qep->buffers; struct sunqe_buffers *qbufs = qep->buffers;
__u32 qbufs_dvma = qep->buffers_dvma; __u32 qbufs_dvma = (__u32)qep->buffers_dvma;
int elem = qep->rx_new; int elem = qep->rx_new;
u32 flags; u32 flags;
...@@ -572,7 +573,7 @@ static int qe_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -572,7 +573,7 @@ static int qe_start_xmit(struct sk_buff *skb, struct net_device *dev)
{ {
struct sunqe *qep = netdev_priv(dev); struct sunqe *qep = netdev_priv(dev);
struct sunqe_buffers *qbufs = qep->buffers; struct sunqe_buffers *qbufs = qep->buffers;
__u32 txbuf_dvma, qbufs_dvma = qep->buffers_dvma; __u32 txbuf_dvma, qbufs_dvma = (__u32)qep->buffers_dvma;
unsigned char *txbuf; unsigned char *txbuf;
int len, entry; int len, entry;
......
...@@ -334,12 +334,12 @@ struct sunqe { ...@@ -334,12 +334,12 @@ struct sunqe {
void __iomem *qcregs; /* QEC per-channel Registers */ void __iomem *qcregs; /* QEC per-channel Registers */
void __iomem *mregs; /* Per-channel MACE Registers */ void __iomem *mregs; /* Per-channel MACE Registers */
struct qe_init_block *qe_block; /* RX and TX descriptors */ struct qe_init_block *qe_block; /* RX and TX descriptors */
__u32 qblock_dvma; /* RX and TX descriptors */ dma_addr_t qblock_dvma; /* RX and TX descriptors */
spinlock_t lock; /* Protects txfull state */ spinlock_t lock; /* Protects txfull state */
int rx_new, rx_old; /* RX ring extents */ int rx_new, rx_old; /* RX ring extents */
int tx_new, tx_old; /* TX ring extents */ int tx_new, tx_old; /* TX ring extents */
struct sunqe_buffers *buffers; /* CPU visible address. */ struct sunqe_buffers *buffers; /* CPU visible address. */
__u32 buffers_dvma; /* DVMA visible address. */ dma_addr_t buffers_dvma; /* DVMA visible address. */
struct sunqec *parent; struct sunqec *parent;
u8 mconfig; /* Base MACE mconfig value */ u8 mconfig; /* Base MACE mconfig value */
struct platform_device *op; /* QE's OF device struct */ struct platform_device *op; /* QE's OF device struct */
......
...@@ -45,6 +45,14 @@ enum { ...@@ -45,6 +45,14 @@ enum {
#define LOCKF_USED_IN_IRQ_READ \ #define LOCKF_USED_IN_IRQ_READ \
(LOCKF_USED_IN_HARDIRQ_READ | LOCKF_USED_IN_SOFTIRQ_READ) (LOCKF_USED_IN_HARDIRQ_READ | LOCKF_USED_IN_SOFTIRQ_READ)
/*
* CONFIG_PROVE_LOCKING_SMALL is defined for sparc. Sparc requires .text,
* .data and .bss to fit in required 32MB limit for the kernel. With
* PROVE_LOCKING we could go over this limit and cause system boot-up problems.
* So, reduce the static allocations for lockdeps related structures so that
* everything fits in current required size limit.
*/
#ifdef CONFIG_PROVE_LOCKING_SMALL
/* /*
* MAX_LOCKDEP_ENTRIES is the maximum number of lock dependencies * MAX_LOCKDEP_ENTRIES is the maximum number of lock dependencies
* we track. * we track.
...@@ -54,18 +62,24 @@ enum { ...@@ -54,18 +62,24 @@ enum {
* table (if it's not there yet), and we check it for lock order * table (if it's not there yet), and we check it for lock order
* conflicts and deadlocks. * conflicts and deadlocks.
*/ */
#define MAX_LOCKDEP_ENTRIES 16384UL
#define MAX_LOCKDEP_CHAINS_BITS 15
#define MAX_STACK_TRACE_ENTRIES 262144UL
#else
#define MAX_LOCKDEP_ENTRIES 32768UL #define MAX_LOCKDEP_ENTRIES 32768UL
#define MAX_LOCKDEP_CHAINS_BITS 16 #define MAX_LOCKDEP_CHAINS_BITS 16
#define MAX_LOCKDEP_CHAINS (1UL << MAX_LOCKDEP_CHAINS_BITS)
#define MAX_LOCKDEP_CHAIN_HLOCKS (MAX_LOCKDEP_CHAINS*5)
/* /*
* Stack-trace: tightly packed array of stack backtrace * Stack-trace: tightly packed array of stack backtrace
* addresses. Protected by the hash_lock. * addresses. Protected by the hash_lock.
*/ */
#define MAX_STACK_TRACE_ENTRIES 524288UL #define MAX_STACK_TRACE_ENTRIES 524288UL
#endif
#define MAX_LOCKDEP_CHAINS (1UL << MAX_LOCKDEP_CHAINS_BITS)
#define MAX_LOCKDEP_CHAIN_HLOCKS (MAX_LOCKDEP_CHAINS*5)
extern struct list_head all_lock_classes; extern struct list_head all_lock_classes;
extern struct lock_chain lock_chains[]; extern struct lock_chain lock_chains[];
......
...@@ -1085,6 +1085,9 @@ config PROVE_LOCKING ...@@ -1085,6 +1085,9 @@ config PROVE_LOCKING
For more details, see Documentation/locking/lockdep-design.txt. For more details, see Documentation/locking/lockdep-design.txt.
config PROVE_LOCKING_SMALL
bool
config LOCKDEP config LOCKDEP
bool bool
depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment