From patchwork Fri Aug 29 20:46:30 2014 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Benjamin Poirier X-Patchwork-Id: 384383 X-Patchwork-Delegate: davem@davemloft.net Return-Path: X-Original-To: patchwork-incoming@ozlabs.org Delivered-To: patchwork-incoming@ozlabs.org Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by ozlabs.org (Postfix) with ESMTP id F143B14010C for ; Sat, 30 Aug 2014 06:47:34 +1000 (EST) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1751551AbaH2Uq5 (ORCPT ); Fri, 29 Aug 2014 16:46:57 -0400 Received: from mail-pa0-f43.google.com ([209.85.220.43]:51470 "EHLO mail-pa0-f43.google.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1751020AbaH2Uqy (ORCPT ); Fri, 29 Aug 2014 16:46:54 -0400 Received: by mail-pa0-f43.google.com with SMTP id et14so7289797pad.30 for ; Fri, 29 Aug 2014 13:46:54 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=20120113; h=sender:from:to:cc:subject:date:message-id:in-reply-to:references; bh=Hl5oamqX1H3IAgjWFHcYeJm8xmZ+pN9HpruBhBMAvHE=; b=ptoJ6Wc++bJvs/AI5O1haA/+jzQnYTOBZ/YV39RP2u18yBufkqF5Sqkfj98xPG+fHn E78c1otQaNCS69PIeyftxUhjRS7aPoPWV1E/WXDG/CpYjHwNOGylmxQNYmMwP30aVWoQ BnOvtOHzm+ODQ84nEY+qQnEuZYvAcJGsXA2C9qMNoUmv4n7DyK2x7OhSJYwZxBDTPvwi hG18boBF5x36iZgnnn7teD8VehcvQexwH9QDw7JBv8CH+W3luq6AXcYAeHxmSgMZYPye wmFbepNYpCr7UKxJiNoM5dvRC+8i18Rp/tEzGPY207b1PtZihjvVPudFSRUCVIOocIda XYpQ== X-Received: by 10.70.62.5 with SMTP id u5mr18967649pdr.100.1409345214152; Fri, 29 Aug 2014 13:46:54 -0700 (PDT) Received: from f1.synalogic.ca (adsl-108-203-76-248.dsl.scrm01.sbcglobal.net. [108.203.76.248]) by mx.google.com with ESMTPSA id pa2sm1177522pdb.84.2014.08.29.13.46.52 for (version=TLSv1.2 cipher=ECDHE-RSA-AES128-GCM-SHA256 bits=128/128); Fri, 29 Aug 2014 13:46:53 -0700 (PDT) From: Benjamin Poirier To: Prashant Sreedharan , Michael Chan Cc: netdev@vger.kernel.org, linux-kernel@vger.kernel.org Subject: [PATCH net v5 3/4] tg3: Move tx queue stop logic to its own function Date: Fri, 29 Aug 2014 13:46:30 -0700 Message-Id: <1409345191-8819-3-git-send-email-bpoirier@suse.de> X-Mailer: git-send-email 1.8.4.5 In-Reply-To: <1409345191-8819-1-git-send-email-bpoirier@suse.de> References: <1409345191-8819-1-git-send-email-bpoirier@suse.de> Sender: netdev-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org It is duplicated. Also, the first instance in tg3_start_xmit() is racy. Consider: tg3_start_xmit() if budget <= ... tg3_tx() (free up the entire ring) tx_cons = smp_mb if queue_stopped and tx_avail, NO if !queue_stopped stop queue return NETDEV_TX_BUSY ... tx queue stopped forever Signed-off-by: Benjamin Poirier --- Changes v2->v3 * new patch to avoid repeatedly open coding this block in the next patch. Changes v3->v4 * added a comment to clarify the return value, as suggested * replaced the BUG_ON with netdev_err(). No need to be so dramatic, this situation will trigger a netdev watchdog anyways. --- drivers/net/ethernet/broadcom/tg3.c | 75 ++++++++++++++++++++----------------- 1 file changed, 40 insertions(+), 35 deletions(-) diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 0cecd6d..f706a1e 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -7831,6 +7831,35 @@ static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi, static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *); +/* Returns true if the queue has been stopped. Note that it may have been + * restarted since. + */ +static inline bool tg3_maybe_stop_txq(struct tg3_napi *tnapi, + struct netdev_queue *txq, + u32 stop_thresh, u32 wakeup_thresh) +{ + bool stopped = false; + + if (unlikely(tg3_tx_avail(tnapi) <= stop_thresh)) { + if (!netif_tx_queue_stopped(txq)) { + stopped = true; + netif_tx_stop_queue(txq); + if (wakeup_thresh >= tnapi->tx_pending) + netdev_err(tnapi->tp->dev, + "BUG! wakeup_thresh too large (%u >= %u)\n", + wakeup_thresh, tnapi->tx_pending); + } + /* netif_tx_stop_queue() must be done before checking tx index + * in tg3_tx_avail(), because in tg3_tx(), we update tx index + * before checking for netif_tx_queue_stopped(). + */ + smp_mb(); + if (tg3_tx_avail(tnapi) > wakeup_thresh) + netif_tx_wake_queue(txq); + } + return stopped; +} + /* Use GSO to workaround all TSO packets that meet HW bug conditions * indicated in tg3_tx_frag_set() */ @@ -7841,20 +7870,9 @@ static int tg3_tso_bug(struct tg3 *tp, struct tg3_napi *tnapi, u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3; /* Estimate the number of fragments in the worst case */ - if (unlikely(tg3_tx_avail(tnapi) <= frag_cnt_est)) { - netif_tx_stop_queue(txq); - - /* netif_tx_stop_queue() must be done before checking - * checking tx index in tg3_tx_avail() below, because in - * tg3_tx(), we update tx index before checking for - * netif_tx_queue_stopped(). - */ - smp_mb(); - if (tg3_tx_avail(tnapi) <= frag_cnt_est) - return NETDEV_TX_BUSY; - - netif_tx_wake_queue(txq); - } + tg3_maybe_stop_txq(tnapi, txq, frag_cnt_est, frag_cnt_est); + if (netif_tx_queue_stopped(txq)) + return NETDEV_TX_BUSY; segs = skb_gso_segment(skb, tp->dev->features & ~(NETIF_F_TSO | NETIF_F_TSO6)); @@ -7902,16 +7920,13 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) * interrupt. Furthermore, IRQ processing runs lockless so we have * no IRQ context deadlocks to worry about either. Rejoice! */ - if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) { - if (!netif_tx_queue_stopped(txq)) { - netif_tx_stop_queue(txq); - - /* This is a hard error, log it. */ - netdev_err(dev, - "BUG! Tx Ring full when queue awake!\n"); - } - return NETDEV_TX_BUSY; + if (tg3_maybe_stop_txq(tnapi, txq, skb_shinfo(skb)->nr_frags + 1, + TG3_TX_WAKEUP_THRESH(tnapi))) { + /* This is a hard error, log it. */ + netdev_err(dev, "BUG! Tx Ring full when queue awake!\n"); } + if (netif_tx_queue_stopped(txq)) + return NETDEV_TX_BUSY; entry = tnapi->tx_prod; base_flags = 0; @@ -8087,18 +8102,8 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) tw32_tx_mbox(tnapi->prodmbox, entry); tnapi->tx_prod = entry; - if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) { - netif_tx_stop_queue(txq); - - /* netif_tx_stop_queue() must be done before checking - * checking tx index in tg3_tx_avail() below, because in - * tg3_tx(), we update tx index before checking for - * netif_tx_queue_stopped(). - */ - smp_mb(); - if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)) - netif_tx_wake_queue(txq); - } + tg3_maybe_stop_txq(tnapi, txq, MAX_SKB_FRAGS + 1, + TG3_TX_WAKEUP_THRESH(tnapi)); mmiowb(); return NETDEV_TX_OK;