Message ID | 28a0b0682a0130c4d4cc39248ed7b476608bdea7.1533830180.git.joseph.salisbury@canonical.com |
---|---|
State | New |
Headers | show |
Series | s390/qeth: don't clobber buffer on async TX completion | expand |
On 08/14/18 21:15, Joseph Salisbury wrote: > From: Julian Wiedmann <jwi@linux.ibm.com> > > BugLink: https://bugs.launchpad.net/bugs/1786057 > > If qeth_qdio_output_handler() detects that a transmit requires async > completion, it replaces the pending buffer's metadata object > (qeth_qdio_out_buffer) so that this queue buffer can be re-used while > the data is pending completion. > > Later when the CQ indicates async completion of such a metadata object, > qeth_qdio_cq_handler() tries to free any data associated with this > object (since HW has now completed the transfer). By calling > qeth_clear_output_buffer(), it erronously operates on the queue buffer > that _previously_ belonged to this transfer ... but which has been > potentially re-used several times by now. > This results in double-free's of the buffer's data, and failing > transmits as the buffer descriptor is scrubbed in mid-air. > > The correct way of handling this situation is to > 1. scrub the queue buffer when it is prepared for re-use, and > 2. later obtain the data addresses from the async-completion notifier > (ie. the AOB), instead of the queue buffer. > > All this only affects qeth devices used for af_iucv HiperTransport. > > Fixes: 0da9581ddb0f ("qeth: exploit asynchronous delivery of storage blocks") > Signed-off-by: Julian Wiedmann <jwi@linux.ibm.com> > Signed-off-by: David S. Miller <davem@davemloft.net> > (backported from commit ce28867fd20c23cd769e78b4d619c4755bf71a1c) > Signed-off-by: Joseph Salisbury <joseph.salisbury@canonical.com> Acked-by: Kleber Sacilotto de Souza <kleber.souza@canonical.com> > --- > drivers/s390/net/qeth_core.h | 11 +++++++++++ > drivers/s390/net/qeth_core_main.c | 22 ++++++++++++++++------ > 2 files changed, 27 insertions(+), 6 deletions(-) > > diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h > index 50030cd..9c86ca2 100644 > --- a/drivers/s390/net/qeth_core.h > +++ b/drivers/s390/net/qeth_core.h > @@ -850,6 +850,17 @@ struct qeth_trap_id { > /*some helper functions*/ > #define QETH_CARD_IFNAME(card) (((card)->dev)? (card)->dev->name : "") > > +static inline void qeth_scrub_qdio_buffer(struct qdio_buffer *buf, > + unsigned int elements) > +{ > + unsigned int i; > + > + for (i = 0; i < elements; i++) > + memset(&buf->element[i], 0, sizeof(struct qdio_buffer_element)); > + buf->element[14].sflags = 0; > + buf->element[15].sflags = 0; > +} > + > static inline int qeth_get_micros(void) > { > return (int) (get_tod_clock() >> 12); > diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c > index cf2efc0..40284b4 100644 > --- a/drivers/s390/net/qeth_core_main.c > +++ b/drivers/s390/net/qeth_core_main.c > @@ -65,9 +65,6 @@ static void qeth_notify_skbs(struct qeth_qdio_out_q *queue, > struct qeth_qdio_out_buffer *buf, > enum iucv_tx_notify notification); > static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf); > -static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue, > - struct qeth_qdio_out_buffer *buf, > - enum qeth_qdio_buffer_states newbufstate); > static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *, int); > > struct workqueue_struct *qeth_wq; > @@ -478,6 +475,7 @@ static inline void qeth_qdio_handle_aob(struct qeth_card *card, > struct qaob *aob; > struct qeth_qdio_out_buffer *buffer; > enum iucv_tx_notify notification; > + unsigned int i; > > aob = (struct qaob *) phys_to_virt(phys_aob_addr); > QETH_CARD_TEXT(card, 5, "haob"); > @@ -502,10 +500,18 @@ static inline void qeth_qdio_handle_aob(struct qeth_card *card, > qeth_notify_skbs(buffer->q, buffer, notification); > > buffer->aob = NULL; > - qeth_clear_output_buffer(buffer->q, buffer, > - QETH_QDIO_BUF_HANDLED_DELAYED); > + /* Free dangling allocations. The attached skbs are handled by > + * qeth_cleanup_handled_pending(). > + */ > + for (i = 0; > + i < aob->sb_count && i < QETH_MAX_BUFFER_ELEMENTS(card); > + i++) { > + if (aob->sba[i] && buffer->is_header[i]) > + kmem_cache_free(qeth_core_header_cache, > + (void *) aob->sba[i]); > + } > + atomic_set(&buffer->state, QETH_QDIO_BUF_HANDLED_DELAYED); > > - /* from here on: do not touch buffer anymore */ > qdio_release_aob(aob); > } > > @@ -3743,6 +3749,10 @@ void qeth_qdio_output_handler(struct ccw_device *ccwdev, > QETH_CARD_TEXT(queue->card, 5, "aob"); > QETH_CARD_TEXT_(queue->card, 5, "%lx", > virt_to_phys(buffer->aob)); > + > + /* prepare the queue slot for re-use: */ > + qeth_scrub_qdio_buffer(buffer->buffer, > + QETH_MAX_BUFFER_ELEMENTS(card)); > if (qeth_init_qdio_out_buf(queue, bidx)) { > QETH_CARD_TEXT(card, 2, "outofbuf"); > qeth_schedule_recovery(card); >
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h index 50030cd..9c86ca2 100644 --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h @@ -850,6 +850,17 @@ struct qeth_trap_id { /*some helper functions*/ #define QETH_CARD_IFNAME(card) (((card)->dev)? (card)->dev->name : "") +static inline void qeth_scrub_qdio_buffer(struct qdio_buffer *buf, + unsigned int elements) +{ + unsigned int i; + + for (i = 0; i < elements; i++) + memset(&buf->element[i], 0, sizeof(struct qdio_buffer_element)); + buf->element[14].sflags = 0; + buf->element[15].sflags = 0; +} + static inline int qeth_get_micros(void) { return (int) (get_tod_clock() >> 12); diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index cf2efc0..40284b4 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -65,9 +65,6 @@ static void qeth_notify_skbs(struct qeth_qdio_out_q *queue, struct qeth_qdio_out_buffer *buf, enum iucv_tx_notify notification); static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf); -static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue, - struct qeth_qdio_out_buffer *buf, - enum qeth_qdio_buffer_states newbufstate); static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *, int); struct workqueue_struct *qeth_wq; @@ -478,6 +475,7 @@ static inline void qeth_qdio_handle_aob(struct qeth_card *card, struct qaob *aob; struct qeth_qdio_out_buffer *buffer; enum iucv_tx_notify notification; + unsigned int i; aob = (struct qaob *) phys_to_virt(phys_aob_addr); QETH_CARD_TEXT(card, 5, "haob"); @@ -502,10 +500,18 @@ static inline void qeth_qdio_handle_aob(struct qeth_card *card, qeth_notify_skbs(buffer->q, buffer, notification); buffer->aob = NULL; - qeth_clear_output_buffer(buffer->q, buffer, - QETH_QDIO_BUF_HANDLED_DELAYED); + /* Free dangling allocations. The attached skbs are handled by + * qeth_cleanup_handled_pending(). + */ + for (i = 0; + i < aob->sb_count && i < QETH_MAX_BUFFER_ELEMENTS(card); + i++) { + if (aob->sba[i] && buffer->is_header[i]) + kmem_cache_free(qeth_core_header_cache, + (void *) aob->sba[i]); + } + atomic_set(&buffer->state, QETH_QDIO_BUF_HANDLED_DELAYED); - /* from here on: do not touch buffer anymore */ qdio_release_aob(aob); } @@ -3743,6 +3749,10 @@ void qeth_qdio_output_handler(struct ccw_device *ccwdev, QETH_CARD_TEXT(queue->card, 5, "aob"); QETH_CARD_TEXT_(queue->card, 5, "%lx", virt_to_phys(buffer->aob)); + + /* prepare the queue slot for re-use: */ + qeth_scrub_qdio_buffer(buffer->buffer, + QETH_MAX_BUFFER_ELEMENTS(card)); if (qeth_init_qdio_out_buf(queue, bidx)) { QETH_CARD_TEXT(card, 2, "outofbuf"); qeth_schedule_recovery(card);