Commit fbf71762 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc

Pull powerpc fixes from Benjamin Herrenschmidt:
 "Here are a handful more fixes for powerpc.  The irq stuff are all
  regression fixes, and Gavin's patch is a simple compile fix."

* 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc:
  tty/serial/pmac_zilog: Fix "nobody cared" IRQ message
  powerpc/pseries: Rivet CONFIG_EEH for pSeries platform
  powerpc/irqdomain: Fix broken NR_IRQ references
  powerpc/8xx: Fix NR_IRQ bugs and refactor 8xx interrupt controller
parents 84e92ef4 810b4de2
......@@ -18,10 +18,6 @@
#include <linux/atomic.h>
/* Define a way to iterate across irqs. */
#define for_each_irq(i) \
for ((i) = 0; (i) < NR_IRQS; ++(i))
extern atomic_t ppc_n_lost_interrupts;
/* This number is used when no interrupt has been assigned */
......
......@@ -330,14 +330,10 @@ void migrate_irqs(void)
alloc_cpumask_var(&mask, GFP_KERNEL);
for_each_irq(irq) {
for_each_irq_desc(irq, desc) {
struct irq_data *data;
struct irq_chip *chip;
desc = irq_to_desc(irq);
if (!desc)
continue;
data = irq_desc_get_irq_data(desc);
if (irqd_is_per_cpu(data))
continue;
......
......@@ -23,14 +23,11 @@
void machine_kexec_mask_interrupts(void) {
unsigned int i;
struct irq_desc *desc;
for_each_irq(i) {
struct irq_desc *desc = irq_to_desc(i);
for_each_irq_desc(i, desc) {
struct irq_chip *chip;
if (!desc)
continue;
chip = irq_desc_get_chip(desc);
if (!chip)
continue;
......
......@@ -114,7 +114,7 @@ static void axon_msi_cascade(unsigned int irq, struct irq_desc *desc)
pr_devel("axon_msi: woff %x roff %x msi %x\n",
write_offset, msic->read_offset, msi);
if (msi < NR_IRQS && irq_get_chip_data(msi) == msic) {
if (msi < nr_irqs && irq_get_chip_data(msi) == msic) {
generic_handle_irq(msi);
msic->fifo_virt[idx] = cpu_to_le32(0xffffffff);
} else {
......@@ -276,9 +276,6 @@ static int axon_msi_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
if (rc)
return rc;
/* We rely on being able to stash a virq in a u16 */
BUILD_BUG_ON(NR_IRQS > 65536);
list_for_each_entry(entry, &dev->msi_list, list) {
virq = irq_create_direct_mapping(msic->irq_domain);
if (virq == NO_IRQ) {
......@@ -392,7 +389,8 @@ static int axon_msi_probe(struct platform_device *device)
}
memset(msic->fifo_virt, 0xff, MSIC_FIFO_SIZE_BYTES);
msic->irq_domain = irq_domain_add_nomap(dn, 0, &msic_host_ops, msic);
/* We rely on being able to stash a virq in a u16, so limit irqs to < 65536 */
msic->irq_domain = irq_domain_add_nomap(dn, 65536, &msic_host_ops, msic);
if (!msic->irq_domain) {
printk(KERN_ERR "axon_msi: couldn't allocate irq_domain for %s\n",
dn->full_name);
......
......@@ -248,6 +248,6 @@ void beatic_deinit_IRQ(void)
{
int i;
for (i = 1; i < NR_IRQS; i++)
for (i = 1; i < nr_irqs; i++)
beat_destruct_irq_plug(i);
}
......@@ -57,9 +57,9 @@ static int max_real_irqs;
static DEFINE_RAW_SPINLOCK(pmac_pic_lock);
#define NR_MASK_WORDS ((NR_IRQS + 31) / 32)
static unsigned long ppc_lost_interrupts[NR_MASK_WORDS];
static unsigned long ppc_cached_irq_mask[NR_MASK_WORDS];
/* The max irq number this driver deals with is 128; see max_irqs */
static DECLARE_BITMAP(ppc_lost_interrupts, 128);
static DECLARE_BITMAP(ppc_cached_irq_mask, 128);
static int pmac_irq_cascade = -1;
static struct irq_domain *pmac_pic_host;
......
......@@ -30,9 +30,9 @@ config PPC_SPLPAR
two or more partitions.
config EEH
bool "PCI Extended Error Handling (EEH)" if EXPERT
bool
depends on PPC_PSERIES && PCI
default y if !EXPERT
default y
config PSERIES_MSI
bool
......
......@@ -51,8 +51,7 @@
static intctl_cpm2_t __iomem *cpm2_intctl;
static struct irq_domain *cpm2_pic_host;
#define NR_MASK_WORDS ((NR_IRQS + 31) / 32)
static unsigned long ppc_cached_irq_mask[NR_MASK_WORDS];
static unsigned long ppc_cached_irq_mask[2]; /* 2 32-bit registers */
static const u_char irq_to_siureg[] = {
1, 1, 1, 1, 1, 1, 1, 1,
......
......@@ -18,69 +18,45 @@
extern int cpm_get_irq(struct pt_regs *regs);
static struct irq_domain *mpc8xx_pic_host;
#define NR_MASK_WORDS ((NR_IRQS + 31) / 32)
static unsigned long ppc_cached_irq_mask[NR_MASK_WORDS];
static unsigned long mpc8xx_cached_irq_mask;
static sysconf8xx_t __iomem *siu_reg;
int cpm_get_irq(struct pt_regs *regs);
static inline unsigned long mpc8xx_irqd_to_bit(struct irq_data *d)
{
return 0x80000000 >> irqd_to_hwirq(d);
}
static void mpc8xx_unmask_irq(struct irq_data *d)
{
int bit, word;
unsigned int irq_nr = (unsigned int)irqd_to_hwirq(d);
bit = irq_nr & 0x1f;
word = irq_nr >> 5;
ppc_cached_irq_mask[word] |= (1 << (31-bit));
out_be32(&siu_reg->sc_simask, ppc_cached_irq_mask[word]);
mpc8xx_cached_irq_mask |= mpc8xx_irqd_to_bit(d);
out_be32(&siu_reg->sc_simask, mpc8xx_cached_irq_mask);
}
static void mpc8xx_mask_irq(struct irq_data *d)
{
int bit, word;
unsigned int irq_nr = (unsigned int)irqd_to_hwirq(d);
bit = irq_nr & 0x1f;
word = irq_nr >> 5;
ppc_cached_irq_mask[word] &= ~(1 << (31-bit));
out_be32(&siu_reg->sc_simask, ppc_cached_irq_mask[word]);
mpc8xx_cached_irq_mask &= ~mpc8xx_irqd_to_bit(d);
out_be32(&siu_reg->sc_simask, mpc8xx_cached_irq_mask);
}
static void mpc8xx_ack(struct irq_data *d)
{
int bit;
unsigned int irq_nr = (unsigned int)irqd_to_hwirq(d);
bit = irq_nr & 0x1f;
out_be32(&siu_reg->sc_sipend, 1 << (31-bit));
out_be32(&siu_reg->sc_sipend, mpc8xx_irqd_to_bit(d));
}
static void mpc8xx_end_irq(struct irq_data *d)
{
int bit, word;
unsigned int irq_nr = (unsigned int)irqd_to_hwirq(d);
bit = irq_nr & 0x1f;
word = irq_nr >> 5;
ppc_cached_irq_mask[word] |= (1 << (31-bit));
out_be32(&siu_reg->sc_simask, ppc_cached_irq_mask[word]);
mpc8xx_cached_irq_mask |= mpc8xx_irqd_to_bit(d);
out_be32(&siu_reg->sc_simask, mpc8xx_cached_irq_mask);
}
static int mpc8xx_set_irq_type(struct irq_data *d, unsigned int flow_type)
{
if (flow_type & IRQ_TYPE_EDGE_FALLING) {
irq_hw_number_t hw = (unsigned int)irqd_to_hwirq(d);
/* only external IRQ senses are programmable */
if ((flow_type & IRQ_TYPE_EDGE_FALLING) && !(irqd_to_hwirq(d) & 1)) {
unsigned int siel = in_be32(&siu_reg->sc_siel);
/* only external IRQ senses are programmable */
if ((hw & 1) == 0) {
siel |= (0x80000000 >> hw);
out_be32(&siu_reg->sc_siel, siel);
__irq_set_handler_locked(d->irq, handle_edge_irq);
}
siel |= mpc8xx_irqd_to_bit(d);
out_be32(&siu_reg->sc_siel, siel);
__irq_set_handler_locked(d->irq, handle_edge_irq);
}
return 0;
}
......@@ -132,6 +108,9 @@ static int mpc8xx_pic_host_xlate(struct irq_domain *h, struct device_node *ct,
IRQ_TYPE_EDGE_FALLING,
};
if (intspec[0] > 0x1f)
return 0;
*out_hwirq = intspec[0];
if (intsize > 1 && intspec[1] < 4)
*out_flags = map_pic_senses[intspec[1]];
......
......@@ -188,6 +188,7 @@ void xics_migrate_irqs_away(void)
{
int cpu = smp_processor_id(), hw_cpu = hard_smp_processor_id();
unsigned int irq, virq;
struct irq_desc *desc;
/* If we used to be the default server, move to the new "boot_cpuid" */
if (hw_cpu == xics_default_server)
......@@ -202,8 +203,7 @@ void xics_migrate_irqs_away(void)
/* Allow IPIs again... */
icp_ops->set_priority(DEFAULT_PRIORITY);
for_each_irq(virq) {
struct irq_desc *desc;
for_each_irq_desc(virq, desc) {
struct irq_chip *chip;
long server;
unsigned long flags;
......@@ -212,9 +212,8 @@ void xics_migrate_irqs_away(void)
/* We can't set affinity on ISA interrupts */
if (virq < NUM_ISA_INTERRUPTS)
continue;
desc = irq_to_desc(virq);
/* We only need to migrate enabled IRQS */
if (!desc || !desc->action)
if (!desc->action)
continue;
if (desc->irq_data.domain != xics_host)
continue;
......
......@@ -469,7 +469,7 @@ static irqreturn_t pmz_interrupt(int irq, void *dev_id)
tty = NULL;
if (r3 & (CHAEXT | CHATxIP | CHARxIP)) {
if (!ZS_IS_OPEN(uap_a)) {
pmz_debug("ChanA interrupt while open !\n");
pmz_debug("ChanA interrupt while not open !\n");
goto skip_a;
}
write_zsreg(uap_a, R0, RES_H_IUS);
......@@ -493,8 +493,8 @@ static irqreturn_t pmz_interrupt(int irq, void *dev_id)
spin_lock(&uap_b->port.lock);
tty = NULL;
if (r3 & (CHBEXT | CHBTxIP | CHBRxIP)) {
if (!ZS_IS_OPEN(uap_a)) {
pmz_debug("ChanB interrupt while open !\n");
if (!ZS_IS_OPEN(uap_b)) {
pmz_debug("ChanB interrupt while not open !\n");
goto skip_b;
}
write_zsreg(uap_b, R0, RES_H_IUS);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment