diff mbox series

[net-next] sfc: stop the TX queue before pushing new buffers

Message ID 152706844446.27257.6312747433904122379.stgit@mh-desktop.uk.solarflarecom.com
State Changes Requested, archived
Delegated to: David Miller
Headers show
Series [net-next] sfc: stop the TX queue before pushing new buffers | expand

Commit Message

Martin Habets May 23, 2018, 9:41 a.m. UTC
efx_enqueue_skb() can push new buffers for the xmit_more functionality.
We must stops the TX queue before this or else the TX queue does not get
restarted and we get a netdev watchdog.

In the error handling we may now need to unwind more than 1 packet, and
we may need to push the new buffers onto the partner queue.

Fixes: e9117e5099ea ("sfc: Firmware-Assisted TSO version 2")
Reported-by: Jarod Wilson <jarod@redhat.com>
Signed-off-by: Martin Habets <mhabets@solarflare.com>
---

Dave, could you please also queue up this patch for stable?

 drivers/net/ethernet/sfc/tx.c |   31 +++++++++++++++++++++++--------
 1 file changed, 23 insertions(+), 8 deletions(-)

Comments

Edward Cree May 23, 2018, 3:28 p.m. UTC | #1
On 23/05/18 10:41, Martin Habets wrote:
> efx_enqueue_skb() can push new buffers for the xmit_more functionality.
> We must stops the TX queue before this or else the TX queue does not get
> restarted and we get a netdev watchdog.
>
> In the error handling we may now need to unwind more than 1 packet, and
> we may need to push the new buffers onto the partner queue.
>
> Fixes: e9117e5099ea ("sfc: Firmware-Assisted TSO version 2")
> Reported-by: Jarod Wilson <jarod@redhat.com>
> Signed-off-by: Martin Habets <mhabets@solarflare.com>
> ---
>
> Dave, could you please also queue up this patch for stable?
>
>  drivers/net/ethernet/sfc/tx.c |   31 +++++++++++++++++++++++--------
>  1 file changed, 23 insertions(+), 8 deletions(-)
>
> diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
> index cece961f2e82..17e0697f42d5 100644
> --- a/drivers/net/ethernet/sfc/tx.c
> +++ b/drivers/net/ethernet/sfc/tx.c
> @@ -435,17 +435,18 @@ static int efx_tx_map_data(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
>  	} while (1);
>  }
>  
> -/* Remove buffers put into a tx_queue.  None of the buffers must have
> - * an skb attached.
> +/* Remove buffers put into a tx_queue for the current packet.
> + * None of the buffers must have an skb attached.
>   */
> -static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue)
> +static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue,
> +			       unsigned int insert_count)
>  {
>  	struct efx_tx_buffer *buffer;
>  	unsigned int bytes_compl = 0;
>  	unsigned int pkts_compl = 0;
>  
>  	/* Work backwards until we hit the original insert pointer value */
> -	while (tx_queue->insert_count != tx_queue->write_count) {
> +	while (tx_queue->insert_count != insert_count) {
>  		--tx_queue->insert_count;
>  		buffer = __efx_tx_queue_get_insert_buffer(tx_queue);
>  		efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
> @@ -504,6 +505,8 @@ static int efx_tx_tso_fallback(struct efx_tx_queue *tx_queue,
>   */
>  netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
>  {
> +	unsigned int old_insert_count = tx_queue->insert_count;
> +	bool xmit_more = skb->xmit_more;
>  	bool data_mapped = false;
>  	unsigned int segments;
>  	unsigned int skb_len;
> @@ -553,8 +556,10 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
>  	/* Update BQL */
>  	netdev_tx_sent_queue(tx_queue->core_txq, skb_len);
>  
> +	efx_tx_maybe_stop_queue(tx_queue);
> +
>  	/* Pass off to hardware */
> -	if (!skb->xmit_more || netif_xmit_stopped(tx_queue->core_txq)) {
> +	if (!xmit_more || netif_xmit_stopped(tx_queue->core_txq)) {
>  		struct efx_tx_queue *txq2 = efx_tx_queue_partner(tx_queue);
>  
>  		/* There could be packets left on the partner queue if those
> @@ -577,14 +582,24 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
>  		tx_queue->tx_packets++;
>  	}
>  
> -	efx_tx_maybe_stop_queue(tx_queue);
> -
>  	return NETDEV_TX_OK;
>  
>  
>  err:
> -	efx_enqueue_unwind(tx_queue);
> +	efx_enqueue_unwind(tx_queue, old_insert_count);
>  	dev_kfree_skb_any(skb);
> +
> +	/* If we're not expecting another transmit and we had something to push
> +	 * on this queue or a partner queue then we need to push here to get the
> +	 * previous packets out.
> +	 */
> +	if (!xmit_more) {
> +		struct efx_tx_queue *txq2 = efx_tx_queue_partner(tx_queue);
> +
> +		if (txq2->xmit_more_available)
> +			efx_nic_push_buffers(txq2);
> +	}
This only pushes the partner queue, it needs to also push this queue if
 tx_queue->xmit_more_available (as your comment just above mentions).

Apart from this, LGTM.

-Ed
> +
>  	return NETDEV_TX_OK;
>  }
>  
>
diff mbox series

Patch

diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index cece961f2e82..17e0697f42d5 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -435,17 +435,18 @@  static int efx_tx_map_data(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
 	} while (1);
 }
 
-/* Remove buffers put into a tx_queue.  None of the buffers must have
- * an skb attached.
+/* Remove buffers put into a tx_queue for the current packet.
+ * None of the buffers must have an skb attached.
  */
-static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue)
+static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue,
+			       unsigned int insert_count)
 {
 	struct efx_tx_buffer *buffer;
 	unsigned int bytes_compl = 0;
 	unsigned int pkts_compl = 0;
 
 	/* Work backwards until we hit the original insert pointer value */
-	while (tx_queue->insert_count != tx_queue->write_count) {
+	while (tx_queue->insert_count != insert_count) {
 		--tx_queue->insert_count;
 		buffer = __efx_tx_queue_get_insert_buffer(tx_queue);
 		efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
@@ -504,6 +505,8 @@  static int efx_tx_tso_fallback(struct efx_tx_queue *tx_queue,
  */
 netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
 {
+	unsigned int old_insert_count = tx_queue->insert_count;
+	bool xmit_more = skb->xmit_more;
 	bool data_mapped = false;
 	unsigned int segments;
 	unsigned int skb_len;
@@ -553,8 +556,10 @@  netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
 	/* Update BQL */
 	netdev_tx_sent_queue(tx_queue->core_txq, skb_len);
 
+	efx_tx_maybe_stop_queue(tx_queue);
+
 	/* Pass off to hardware */
-	if (!skb->xmit_more || netif_xmit_stopped(tx_queue->core_txq)) {
+	if (!xmit_more || netif_xmit_stopped(tx_queue->core_txq)) {
 		struct efx_tx_queue *txq2 = efx_tx_queue_partner(tx_queue);
 
 		/* There could be packets left on the partner queue if those
@@ -577,14 +582,24 @@  netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
 		tx_queue->tx_packets++;
 	}
 
-	efx_tx_maybe_stop_queue(tx_queue);
-
 	return NETDEV_TX_OK;
 
 
 err:
-	efx_enqueue_unwind(tx_queue);
+	efx_enqueue_unwind(tx_queue, old_insert_count);
 	dev_kfree_skb_any(skb);
+
+	/* If we're not expecting another transmit and we had something to push
+	 * on this queue or a partner queue then we need to push here to get the
+	 * previous packets out.
+	 */
+	if (!xmit_more) {
+		struct efx_tx_queue *txq2 = efx_tx_queue_partner(tx_queue);
+
+		if (txq2->xmit_more_available)
+			efx_nic_push_buffers(txq2);
+	}
+
 	return NETDEV_TX_OK;
 }