Commit 4f325184 authored by Jan Glauber's avatar Jan Glauber Committed by Martin Schwidefsky

[S390] qdio: prevent race for shared indicators

If the shared indicator is used the following race leads to
an inbound stall:

Device                  CPU0                    CPU1
========================================================

non-shared DSCI =>1
ALSI => 1
                        Thin INT
                        ALSI => 0

                        non-shared DSCI
                        tasklets scheduled

shared DSCI => 1
ALSI => 1

                        shared DSCI => 0
                        ALSI ? -> set
                                                Thin INT
                                                ALSI => 0
                        ALSI was set,
                        shared DSCI => 1

After that no more interrupts occur because the DSCI is still set.
Fix that race by only resetting the shared DSCI if it was actually
set so the tasklets for all shared devices are scheduled and will
run after the interrupt.
Signed-off-by: default avatarJan Glauber <jang@linux.vnet.ibm.com>
Signed-off-by: default avatarMartin Schwidefsky <schwidefsky@de.ibm.com>
parent 078f8eca
...@@ -423,9 +423,9 @@ struct indicator_t { ...@@ -423,9 +423,9 @@ struct indicator_t {
extern struct indicator_t *q_indicators; extern struct indicator_t *q_indicators;
static inline int shared_ind(struct qdio_irq *irq_ptr) static inline int shared_ind(u32 *dsci)
{ {
return irq_ptr->dsci == &q_indicators[TIQDIO_SHARED_IND].ind; return dsci == &q_indicators[TIQDIO_SHARED_IND].ind;
} }
/* prototypes for thin interrupt */ /* prototypes for thin interrupt */
......
...@@ -1552,7 +1552,7 @@ int qdio_start_irq(struct ccw_device *cdev, int nr) ...@@ -1552,7 +1552,7 @@ int qdio_start_irq(struct ccw_device *cdev, int nr)
WARN_ON(queue_irqs_enabled(q)); WARN_ON(queue_irqs_enabled(q));
if (!shared_ind(q->irq_ptr)) if (!shared_ind(q->irq_ptr->dsci))
xchg(q->irq_ptr->dsci, 0); xchg(q->irq_ptr->dsci, 0);
qdio_stop_polling(q); qdio_stop_polling(q);
...@@ -1562,7 +1562,7 @@ int qdio_start_irq(struct ccw_device *cdev, int nr) ...@@ -1562,7 +1562,7 @@ int qdio_start_irq(struct ccw_device *cdev, int nr)
* We need to check again to not lose initiative after * We need to check again to not lose initiative after
* resetting the ACK state. * resetting the ACK state.
*/ */
if (!shared_ind(q->irq_ptr) && *q->irq_ptr->dsci) if (!shared_ind(q->irq_ptr->dsci) && *q->irq_ptr->dsci)
goto rescan; goto rescan;
if (!qdio_inbound_q_done(q)) if (!qdio_inbound_q_done(q))
goto rescan; goto rescan;
......
...@@ -36,22 +36,8 @@ static u8 *tiqdio_alsi; ...@@ -36,22 +36,8 @@ static u8 *tiqdio_alsi;
struct indicator_t *q_indicators; struct indicator_t *q_indicators;
static int css_qdio_omit_svs;
static u64 last_ai_time; static u64 last_ai_time;
static inline unsigned long do_clear_global_summary(void)
{
register unsigned long __fn asm("1") = 3;
register unsigned long __tmp asm("2");
register unsigned long __time asm("3");
asm volatile(
" .insn rre,0xb2650000,2,0"
: "+d" (__fn), "=d" (__tmp), "=d" (__time));
return __time;
}
/* returns addr for the device state change indicator */ /* returns addr for the device state change indicator */
static u32 *get_indicator(void) static u32 *get_indicator(void)
{ {
...@@ -84,10 +70,6 @@ void tiqdio_add_input_queues(struct qdio_irq *irq_ptr) ...@@ -84,10 +70,6 @@ void tiqdio_add_input_queues(struct qdio_irq *irq_ptr)
struct qdio_q *q; struct qdio_q *q;
int i; int i;
/* No TDD facility? If we must use SIGA-s we can also omit SVS. */
if (!css_qdio_omit_svs && irq_ptr->siga_flag.sync)
css_qdio_omit_svs = 1;
mutex_lock(&tiq_list_lock); mutex_lock(&tiq_list_lock);
for_each_input_queue(irq_ptr, q, i) for_each_input_queue(irq_ptr, q, i)
list_add_rcu(&q->entry, &tiq_list); list_add_rcu(&q->entry, &tiq_list);
...@@ -113,9 +95,9 @@ void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr) ...@@ -113,9 +95,9 @@ void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr)
} }
} }
static inline int shared_ind_used(void) static inline u32 shared_ind_set(void)
{ {
return atomic_read(&q_indicators[TIQDIO_SHARED_IND].count); return q_indicators[TIQDIO_SHARED_IND].ind;
} }
/** /**
...@@ -125,22 +107,12 @@ static inline int shared_ind_used(void) ...@@ -125,22 +107,12 @@ static inline int shared_ind_used(void)
*/ */
static void tiqdio_thinint_handler(void *alsi, void *data) static void tiqdio_thinint_handler(void *alsi, void *data)
{ {
u32 si_used = shared_ind_set();
struct qdio_q *q; struct qdio_q *q;
last_ai_time = S390_lowcore.int_clock; last_ai_time = S390_lowcore.int_clock;
kstat_cpu(smp_processor_id()).irqs[IOINT_QAI]++; kstat_cpu(smp_processor_id()).irqs[IOINT_QAI]++;
/*
* SVS only when needed: issue SVS to benefit from iqdio interrupt
* avoidance (SVS clears adapter interrupt suppression overwrite).
*/
if (!css_qdio_omit_svs)
do_clear_global_summary();
/* reset local summary indicator */
if (shared_ind_used())
xchg(tiqdio_alsi, 0);
/* protect tiq_list entries, only changed in activate or shutdown */ /* protect tiq_list entries, only changed in activate or shutdown */
rcu_read_lock(); rcu_read_lock();
...@@ -148,7 +120,10 @@ static void tiqdio_thinint_handler(void *alsi, void *data) ...@@ -148,7 +120,10 @@ static void tiqdio_thinint_handler(void *alsi, void *data)
list_for_each_entry_rcu(q, &tiq_list, entry) { list_for_each_entry_rcu(q, &tiq_list, entry) {
/* only process queues from changed sets */ /* only process queues from changed sets */
if (!*q->irq_ptr->dsci) if (unlikely(shared_ind(q->irq_ptr->dsci))) {
if (!si_used)
continue;
} else if (!*q->irq_ptr->dsci)
continue; continue;
if (q->u.in.queue_start_poll) { if (q->u.in.queue_start_poll) {
...@@ -164,7 +139,7 @@ static void tiqdio_thinint_handler(void *alsi, void *data) ...@@ -164,7 +139,7 @@ static void tiqdio_thinint_handler(void *alsi, void *data)
q->irq_ptr->int_parm); q->irq_ptr->int_parm);
} else { } else {
/* only clear it if the indicator is non-shared */ /* only clear it if the indicator is non-shared */
if (!shared_ind(q->irq_ptr)) if (!shared_ind(q->irq_ptr->dsci))
xchg(q->irq_ptr->dsci, 0); xchg(q->irq_ptr->dsci, 0);
/* /*
* Call inbound processing but not directly * Call inbound processing but not directly
...@@ -180,13 +155,8 @@ static void tiqdio_thinint_handler(void *alsi, void *data) ...@@ -180,13 +155,8 @@ static void tiqdio_thinint_handler(void *alsi, void *data)
* If the shared indicator was used clear it now after all queues * If the shared indicator was used clear it now after all queues
* were processed. * were processed.
*/ */
if (shared_ind_used()) { if (si_used && shared_ind_set())
xchg(&q_indicators[TIQDIO_SHARED_IND].ind, 0); xchg(&q_indicators[TIQDIO_SHARED_IND].ind, 0);
/* prevent racing */
if (*tiqdio_alsi)
xchg(&q_indicators[TIQDIO_SHARED_IND].ind, 1 << 7);
}
} }
static int set_subchannel_ind(struct qdio_irq *irq_ptr, int reset) static int set_subchannel_ind(struct qdio_irq *irq_ptr, int reset)
...@@ -271,12 +241,6 @@ int qdio_establish_thinint(struct qdio_irq *irq_ptr) ...@@ -271,12 +241,6 @@ int qdio_establish_thinint(struct qdio_irq *irq_ptr)
{ {
if (!is_thinint_irq(irq_ptr)) if (!is_thinint_irq(irq_ptr))
return 0; return 0;
/* Check for aif time delay disablement. If installed,
* omit SVS even under LPAR
*/
if (css_general_characteristics.aif_tdd)
css_qdio_omit_svs = 1;
return set_subchannel_ind(irq_ptr, 0); return set_subchannel_ind(irq_ptr, 0);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment