Commit ce28867f authored by Julian Wiedmann's avatar Julian Wiedmann Committed by David S. Miller

s390/qeth: don't clobber buffer on async TX completion

If qeth_qdio_output_handler() detects that a transmit requires async
completion, it replaces the pending buffer's metadata object
(qeth_qdio_out_buffer) so that this queue buffer can be re-used while
the data is pending completion.

Later when the CQ indicates async completion of such a metadata object,
qeth_qdio_cq_handler() tries to free any data associated with this
object (since HW has now completed the transfer). By calling
qeth_clear_output_buffer(), it erronously operates on the queue buffer
that _previously_ belonged to this transfer ... but which has been
potentially re-used several times by now.
This results in double-free's of the buffer's data, and failing
transmits as the buffer descriptor is scrubbed in mid-air.

The correct way of handling this situation is to
1. scrub the queue buffer when it is prepared for re-use, and
2. later obtain the data addresses from the async-completion notifier
   (ie. the AOB), instead of the queue buffer.

All this only affects qeth devices used for af_iucv HiperTransport.

Fixes: 0da9581d ("qeth: exploit asynchronous delivery of storage blocks")
Signed-off-by: default avatarJulian Wiedmann <jwi@linux.ibm.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 9d0a58fb
...@@ -829,6 +829,17 @@ struct qeth_trap_id { ...@@ -829,6 +829,17 @@ struct qeth_trap_id {
/*some helper functions*/ /*some helper functions*/
#define QETH_CARD_IFNAME(card) (((card)->dev)? (card)->dev->name : "") #define QETH_CARD_IFNAME(card) (((card)->dev)? (card)->dev->name : "")
static inline void qeth_scrub_qdio_buffer(struct qdio_buffer *buf,
unsigned int elements)
{
unsigned int i;
for (i = 0; i < elements; i++)
memset(&buf->element[i], 0, sizeof(struct qdio_buffer_element));
buf->element[14].sflags = 0;
buf->element[15].sflags = 0;
}
/** /**
* qeth_get_elements_for_range() - find number of SBALEs to cover range. * qeth_get_elements_for_range() - find number of SBALEs to cover range.
* @start: Start of the address range. * @start: Start of the address range.
......
...@@ -73,9 +73,6 @@ static void qeth_notify_skbs(struct qeth_qdio_out_q *queue, ...@@ -73,9 +73,6 @@ static void qeth_notify_skbs(struct qeth_qdio_out_q *queue,
struct qeth_qdio_out_buffer *buf, struct qeth_qdio_out_buffer *buf,
enum iucv_tx_notify notification); enum iucv_tx_notify notification);
static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf); static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf);
static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
struct qeth_qdio_out_buffer *buf,
enum qeth_qdio_buffer_states newbufstate);
static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *, int); static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *, int);
struct workqueue_struct *qeth_wq; struct workqueue_struct *qeth_wq;
...@@ -489,6 +486,7 @@ static void qeth_qdio_handle_aob(struct qeth_card *card, ...@@ -489,6 +486,7 @@ static void qeth_qdio_handle_aob(struct qeth_card *card,
struct qaob *aob; struct qaob *aob;
struct qeth_qdio_out_buffer *buffer; struct qeth_qdio_out_buffer *buffer;
enum iucv_tx_notify notification; enum iucv_tx_notify notification;
unsigned int i;
aob = (struct qaob *) phys_to_virt(phys_aob_addr); aob = (struct qaob *) phys_to_virt(phys_aob_addr);
QETH_CARD_TEXT(card, 5, "haob"); QETH_CARD_TEXT(card, 5, "haob");
...@@ -513,10 +511,18 @@ static void qeth_qdio_handle_aob(struct qeth_card *card, ...@@ -513,10 +511,18 @@ static void qeth_qdio_handle_aob(struct qeth_card *card,
qeth_notify_skbs(buffer->q, buffer, notification); qeth_notify_skbs(buffer->q, buffer, notification);
buffer->aob = NULL; buffer->aob = NULL;
qeth_clear_output_buffer(buffer->q, buffer, /* Free dangling allocations. The attached skbs are handled by
QETH_QDIO_BUF_HANDLED_DELAYED); * qeth_cleanup_handled_pending().
*/
for (i = 0;
i < aob->sb_count && i < QETH_MAX_BUFFER_ELEMENTS(card);
i++) {
if (aob->sba[i] && buffer->is_header[i])
kmem_cache_free(qeth_core_header_cache,
(void *) aob->sba[i]);
}
atomic_set(&buffer->state, QETH_QDIO_BUF_HANDLED_DELAYED);
/* from here on: do not touch buffer anymore */
qdio_release_aob(aob); qdio_release_aob(aob);
} }
...@@ -3759,6 +3765,10 @@ static void qeth_qdio_output_handler(struct ccw_device *ccwdev, ...@@ -3759,6 +3765,10 @@ static void qeth_qdio_output_handler(struct ccw_device *ccwdev,
QETH_CARD_TEXT(queue->card, 5, "aob"); QETH_CARD_TEXT(queue->card, 5, "aob");
QETH_CARD_TEXT_(queue->card, 5, "%lx", QETH_CARD_TEXT_(queue->card, 5, "%lx",
virt_to_phys(buffer->aob)); virt_to_phys(buffer->aob));
/* prepare the queue slot for re-use: */
qeth_scrub_qdio_buffer(buffer->buffer,
QETH_MAX_BUFFER_ELEMENTS(card));
if (qeth_init_qdio_out_buf(queue, bidx)) { if (qeth_init_qdio_out_buf(queue, bidx)) {
QETH_CARD_TEXT(card, 2, "outofbuf"); QETH_CARD_TEXT(card, 2, "outofbuf");
qeth_schedule_recovery(card); qeth_schedule_recovery(card);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment