Commit a87af778 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux

Pull s390 bugfixes from Martin Schwidefsky:
 "A collection a bug fixes.  Most of them are minor but two of them are
  more severe.  The linkage stack bug can be used by user space to force
  an oops, with panic_on_oops this is a denial-of-service.  And the dump
  memory detection issue can cause incomplete memory dumps"

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux:
  s390/cio: improve cio_commit_config
  s390: fix kernel crash due to linkage stack instructions
  s390/dump: Fix dump memory detection
  s390/appldata: restore missing init_virt_timer()
  s390/qdio: correct program-controlled interruption checking
  s390/qdio: for_each macro correctness
parents 16e5a2ed 1bc8927c
...@@ -529,6 +529,7 @@ static int __init appldata_init(void) ...@@ -529,6 +529,7 @@ static int __init appldata_init(void)
{ {
int rc; int rc;
init_virt_timer(&appldata_timer);
appldata_timer.function = appldata_timer_function; appldata_timer.function = appldata_timer_function;
appldata_timer.data = (unsigned long) &appldata_work; appldata_timer.data = (unsigned long) &appldata_work;
......
...@@ -59,7 +59,7 @@ ENTRY(startup_continue) ...@@ -59,7 +59,7 @@ ENTRY(startup_continue)
.quad 0 # cr12: tracing off .quad 0 # cr12: tracing off
.quad 0 # cr13: home space segment table .quad 0 # cr13: home space segment table
.quad 0xc0000000 # cr14: machine check handling off .quad 0xc0000000 # cr14: machine check handling off
.quad 0 # cr15: linkage stack operations .quad .Llinkage_stack # cr15: linkage stack operations
.Lpcmsk:.quad 0x0000000180000000 .Lpcmsk:.quad 0x0000000180000000
.L4malign:.quad 0xffffffffffc00000 .L4malign:.quad 0xffffffffffc00000
.Lscan2g:.quad 0x80000000 + 0x20000 - 8 # 2GB + 128K - 8 .Lscan2g:.quad 0x80000000 + 0x20000 - 8 # 2GB + 128K - 8
...@@ -67,12 +67,15 @@ ENTRY(startup_continue) ...@@ -67,12 +67,15 @@ ENTRY(startup_continue)
.Lparmaddr: .Lparmaddr:
.quad PARMAREA .quad PARMAREA
.align 64 .align 64
.Lduct: .long 0,0,0,0,.Lduald,0,0,0 .Lduct: .long 0,.Laste,.Laste,0,.Lduald,0,0,0
.long 0,0,0,0,0,0,0,0 .long 0,0,0,0,0,0,0,0
.Laste: .quad 0,0xffffffffffffffff,0,0,0,0,0,0
.align 128 .align 128
.Lduald:.rept 8 .Lduald:.rept 8
.long 0x80000000,0,0,0 # invalid access-list entries .long 0x80000000,0,0,0 # invalid access-list entries
.endr .endr
.Llinkage_stack:
.long 0,0,0x89000000,0,0,0,0x8a000000,0
ENTRY(_ehead) ENTRY(_ehead)
......
...@@ -12,6 +12,8 @@ ...@@ -12,6 +12,8 @@
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/gfp.h> #include <linux/gfp.h>
#include <linux/init.h> #include <linux/init.h>
#include <asm/setup.h>
#include <asm/ipl.h>
#define ESSA_SET_STABLE 1 #define ESSA_SET_STABLE 1
#define ESSA_SET_UNUSED 2 #define ESSA_SET_UNUSED 2
...@@ -41,6 +43,14 @@ void __init cmma_init(void) ...@@ -41,6 +43,14 @@ void __init cmma_init(void)
if (!cmma_flag) if (!cmma_flag)
return; return;
/*
* Disable CMM for dump, otherwise the tprot based memory
* detection can fail because of unstable pages.
*/
if (OLDMEM_BASE || ipl_info.type == IPL_TYPE_FCP_DUMP) {
cmma_flag = 0;
return;
}
asm volatile( asm volatile(
" .insn rrf,0xb9ab0000,%1,%1,0,0\n" " .insn rrf,0xb9ab0000,%1,%1,0,0\n"
"0: la %0,0\n" "0: la %0,0\n"
......
...@@ -342,8 +342,9 @@ static int cio_check_config(struct subchannel *sch, struct schib *schib) ...@@ -342,8 +342,9 @@ static int cio_check_config(struct subchannel *sch, struct schib *schib)
*/ */
int cio_commit_config(struct subchannel *sch) int cio_commit_config(struct subchannel *sch)
{ {
struct schib schib;
int ccode, retry, ret = 0; int ccode, retry, ret = 0;
struct schib schib;
struct irb irb;
if (stsch_err(sch->schid, &schib) || !css_sch_is_valid(&schib)) if (stsch_err(sch->schid, &schib) || !css_sch_is_valid(&schib))
return -ENODEV; return -ENODEV;
...@@ -367,7 +368,10 @@ int cio_commit_config(struct subchannel *sch) ...@@ -367,7 +368,10 @@ int cio_commit_config(struct subchannel *sch)
ret = -EAGAIN; ret = -EAGAIN;
break; break;
case 1: /* status pending */ case 1: /* status pending */
return -EBUSY; ret = -EBUSY;
if (tsch(sch->schid, &irb))
return ret;
break;
case 2: /* busy */ case 2: /* busy */
udelay(100); /* allow for recovery */ udelay(100); /* allow for recovery */
ret = -EBUSY; ret = -EBUSY;
...@@ -403,7 +407,6 @@ EXPORT_SYMBOL_GPL(cio_update_schib); ...@@ -403,7 +407,6 @@ EXPORT_SYMBOL_GPL(cio_update_schib);
*/ */
int cio_enable_subchannel(struct subchannel *sch, u32 intparm) int cio_enable_subchannel(struct subchannel *sch, u32 intparm)
{ {
int retry;
int ret; int ret;
CIO_TRACE_EVENT(2, "ensch"); CIO_TRACE_EVENT(2, "ensch");
...@@ -418,7 +421,6 @@ int cio_enable_subchannel(struct subchannel *sch, u32 intparm) ...@@ -418,7 +421,6 @@ int cio_enable_subchannel(struct subchannel *sch, u32 intparm)
sch->config.isc = sch->isc; sch->config.isc = sch->isc;
sch->config.intparm = intparm; sch->config.intparm = intparm;
for (retry = 0; retry < 3; retry++) {
ret = cio_commit_config(sch); ret = cio_commit_config(sch);
if (ret == -EIO) { if (ret == -EIO) {
/* /*
...@@ -426,12 +428,7 @@ int cio_enable_subchannel(struct subchannel *sch, u32 intparm) ...@@ -426,12 +428,7 @@ int cio_enable_subchannel(struct subchannel *sch, u32 intparm)
* the concurrent sense bit the next time. * the concurrent sense bit the next time.
*/ */
sch->config.csense = 0; sch->config.csense = 0;
} else if (ret == -EBUSY) { ret = cio_commit_config(sch);
struct irb irb;
if (tsch(sch->schid, &irb) != 0)
break;
} else
break;
} }
CIO_HEX_EVENT(2, &ret, sizeof(ret)); CIO_HEX_EVENT(2, &ret, sizeof(ret));
return ret; return ret;
...@@ -444,7 +441,6 @@ EXPORT_SYMBOL_GPL(cio_enable_subchannel); ...@@ -444,7 +441,6 @@ EXPORT_SYMBOL_GPL(cio_enable_subchannel);
*/ */
int cio_disable_subchannel(struct subchannel *sch) int cio_disable_subchannel(struct subchannel *sch)
{ {
int retry;
int ret; int ret;
CIO_TRACE_EVENT(2, "dissch"); CIO_TRACE_EVENT(2, "dissch");
...@@ -456,16 +452,8 @@ int cio_disable_subchannel(struct subchannel *sch) ...@@ -456,16 +452,8 @@ int cio_disable_subchannel(struct subchannel *sch)
return -ENODEV; return -ENODEV;
sch->config.ena = 0; sch->config.ena = 0;
for (retry = 0; retry < 3; retry++) {
ret = cio_commit_config(sch); ret = cio_commit_config(sch);
if (ret == -EBUSY) {
struct irb irb;
if (tsch(sch->schid, &irb) != 0)
break;
} else
break;
}
CIO_HEX_EVENT(2, &ret, sizeof(ret)); CIO_HEX_EVENT(2, &ret, sizeof(ret));
return ret; return ret;
} }
......
...@@ -360,13 +360,11 @@ static inline int multicast_outbound(struct qdio_q *q) ...@@ -360,13 +360,11 @@ static inline int multicast_outbound(struct qdio_q *q)
(unlikely(q->irq_ptr->siga_flag.sync_out_after_pci)) (unlikely(q->irq_ptr->siga_flag.sync_out_after_pci))
#define for_each_input_queue(irq_ptr, q, i) \ #define for_each_input_queue(irq_ptr, q, i) \
for (i = 0, q = irq_ptr->input_qs[0]; \ for (i = 0; i < irq_ptr->nr_input_qs && \
i < irq_ptr->nr_input_qs; \ ({ q = irq_ptr->input_qs[i]; 1; }); i++)
q = irq_ptr->input_qs[++i])
#define for_each_output_queue(irq_ptr, q, i) \ #define for_each_output_queue(irq_ptr, q, i) \
for (i = 0, q = irq_ptr->output_qs[0]; \ for (i = 0; i < irq_ptr->nr_output_qs && \
i < irq_ptr->nr_output_qs; \ ({ q = irq_ptr->output_qs[i]; 1; }); i++)
q = irq_ptr->output_qs[++i])
#define prev_buf(bufnr) \ #define prev_buf(bufnr) \
((bufnr + QDIO_MAX_BUFFERS_MASK) & QDIO_MAX_BUFFERS_MASK) ((bufnr + QDIO_MAX_BUFFERS_MASK) & QDIO_MAX_BUFFERS_MASK)
......
...@@ -996,7 +996,7 @@ static void qdio_int_handler_pci(struct qdio_irq *irq_ptr) ...@@ -996,7 +996,7 @@ static void qdio_int_handler_pci(struct qdio_irq *irq_ptr)
} }
} }
if (!pci_out_supported(q)) if (!(irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED))
return; return;
for_each_output_queue(irq_ptr, q, i) { for_each_output_queue(irq_ptr, q, i) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment