From patchwork Thu Aug 28 01:04:17 2014 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Benjamin Poirier X-Patchwork-Id: 383629 X-Patchwork-Delegate: davem@davemloft.net Return-Path: X-Original-To: patchwork-incoming@ozlabs.org Delivered-To: patchwork-incoming@ozlabs.org Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by ozlabs.org (Postfix) with ESMTP id DE56D1400B2 for ; Thu, 28 Aug 2014 11:06:20 +1000 (EST) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1757299AbaH1BFk (ORCPT ); Wed, 27 Aug 2014 21:05:40 -0400 Received: from mail-pa0-f44.google.com ([209.85.220.44]:63576 "EHLO mail-pa0-f44.google.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1757122AbaH1BFi (ORCPT ); Wed, 27 Aug 2014 21:05:38 -0400 Received: by mail-pa0-f44.google.com with SMTP id rd3so276312pab.17 for ; Wed, 27 Aug 2014 18:05:38 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=20120113; h=sender:from:to:cc:subject:date:message-id:in-reply-to:references; bh=Hl5oamqX1H3IAgjWFHcYeJm8xmZ+pN9HpruBhBMAvHE=; b=uzCAqFlXpObWraS/tDGgR0LFYqEefGYgukbz2Wu6ZBOa3VWamjPeS1UNqM70dZssvn 3ktx7ezxacQsCgmrQdtPtTgb28vUDHIydI3OActQxWnGRXE87TGKeSZqlFFx34Wvt7wc Sd6Y9tXy+ELIPNML5vHiPV1ikvBf1lSbWXvqUMZzg2Qk+VcvG9fW0So4GHz+jUTdVGwZ eelHL4xpU5mRSzRDhIEJCZs6p4+qgaKhts6q1+TliSkXDG+MC8rCAB39uCcqy1LVKZDP OZs7K84u0PDwm6qfRMA/fSMGzoq8bfhONO4MEcSOEGFrr1eO4nCoY1lHG6Ti0oYMSAha PjZA== X-Received: by 10.68.169.97 with SMTP id ad1mr888448pbc.108.1409187937998; Wed, 27 Aug 2014 18:05:37 -0700 (PDT) Received: from f1.synalogic.ca (adsl-108-203-76-248.dsl.scrm01.sbcglobal.net. [108.203.76.248]) by mx.google.com with ESMTPSA id df10sm2752229pdb.25.2014.08.27.18.05.37 for (version=TLSv1.2 cipher=ECDHE-RSA-AES128-GCM-SHA256 bits=128/128); Wed, 27 Aug 2014 18:05:37 -0700 (PDT) From: Benjamin Poirier To: Prashant Sreedharan , Michael Chan Cc: netdev@vger.kernel.org, linux-kernel@vger.kernel.org Subject: [PATCH net v4 3/4] tg3: Move tx queue stop logic to its own function Date: Wed, 27 Aug 2014 18:04:17 -0700 Message-Id: <1409187858-7698-3-git-send-email-bpoirier@suse.de> X-Mailer: git-send-email 1.8.4.5 In-Reply-To: <1409187858-7698-1-git-send-email-bpoirier@suse.de> References: <1409187858-7698-1-git-send-email-bpoirier@suse.de> Sender: netdev-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org It is duplicated. Also, the first instance in tg3_start_xmit() is racy. Consider: tg3_start_xmit() if budget <= ... tg3_tx() (free up the entire ring) tx_cons = smp_mb if queue_stopped and tx_avail, NO if !queue_stopped stop queue return NETDEV_TX_BUSY ... tx queue stopped forever Signed-off-by: Benjamin Poirier --- Changes v2->v3 * new patch to avoid repeatedly open coding this block in the next patch. Changes v3->v4 * added a comment to clarify the return value, as suggested * replaced the BUG_ON with netdev_err(). No need to be so dramatic, this situation will trigger a netdev watchdog anyways. --- drivers/net/ethernet/broadcom/tg3.c | 75 ++++++++++++++++++++----------------- 1 file changed, 40 insertions(+), 35 deletions(-) diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 0cecd6d..f706a1e 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -7831,6 +7831,35 @@ static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi, static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *); +/* Returns true if the queue has been stopped. Note that it may have been + * restarted since. + */ +static inline bool tg3_maybe_stop_txq(struct tg3_napi *tnapi, + struct netdev_queue *txq, + u32 stop_thresh, u32 wakeup_thresh) +{ + bool stopped = false; + + if (unlikely(tg3_tx_avail(tnapi) <= stop_thresh)) { + if (!netif_tx_queue_stopped(txq)) { + stopped = true; + netif_tx_stop_queue(txq); + if (wakeup_thresh >= tnapi->tx_pending) + netdev_err(tnapi->tp->dev, + "BUG! wakeup_thresh too large (%u >= %u)\n", + wakeup_thresh, tnapi->tx_pending); + } + /* netif_tx_stop_queue() must be done before checking tx index + * in tg3_tx_avail(), because in tg3_tx(), we update tx index + * before checking for netif_tx_queue_stopped(). + */ + smp_mb(); + if (tg3_tx_avail(tnapi) > wakeup_thresh) + netif_tx_wake_queue(txq); + } + return stopped; +} + /* Use GSO to workaround all TSO packets that meet HW bug conditions * indicated in tg3_tx_frag_set() */ @@ -7841,20 +7870,9 @@ static int tg3_tso_bug(struct tg3 *tp, struct tg3_napi *tnapi, u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3; /* Estimate the number of fragments in the worst case */ - if (unlikely(tg3_tx_avail(tnapi) <= frag_cnt_est)) { - netif_tx_stop_queue(txq); - - /* netif_tx_stop_queue() must be done before checking - * checking tx index in tg3_tx_avail() below, because in - * tg3_tx(), we update tx index before checking for - * netif_tx_queue_stopped(). - */ - smp_mb(); - if (tg3_tx_avail(tnapi) <= frag_cnt_est) - return NETDEV_TX_BUSY; - - netif_tx_wake_queue(txq); - } + tg3_maybe_stop_txq(tnapi, txq, frag_cnt_est, frag_cnt_est); + if (netif_tx_queue_stopped(txq)) + return NETDEV_TX_BUSY; segs = skb_gso_segment(skb, tp->dev->features & ~(NETIF_F_TSO | NETIF_F_TSO6)); @@ -7902,16 +7920,13 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) * interrupt. Furthermore, IRQ processing runs lockless so we have * no IRQ context deadlocks to worry about either. Rejoice! */ - if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) { - if (!netif_tx_queue_stopped(txq)) { - netif_tx_stop_queue(txq); - - /* This is a hard error, log it. */ - netdev_err(dev, - "BUG! Tx Ring full when queue awake!\n"); - } - return NETDEV_TX_BUSY; + if (tg3_maybe_stop_txq(tnapi, txq, skb_shinfo(skb)->nr_frags + 1, + TG3_TX_WAKEUP_THRESH(tnapi))) { + /* This is a hard error, log it. */ + netdev_err(dev, "BUG! Tx Ring full when queue awake!\n"); } + if (netif_tx_queue_stopped(txq)) + return NETDEV_TX_BUSY; entry = tnapi->tx_prod; base_flags = 0; @@ -8087,18 +8102,8 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) tw32_tx_mbox(tnapi->prodmbox, entry); tnapi->tx_prod = entry; - if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) { - netif_tx_stop_queue(txq); - - /* netif_tx_stop_queue() must be done before checking - * checking tx index in tg3_tx_avail() below, because in - * tg3_tx(), we update tx index before checking for - * netif_tx_queue_stopped(). - */ - smp_mb(); - if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)) - netif_tx_wake_queue(txq); - } + tg3_maybe_stop_txq(tnapi, txq, MAX_SKB_FRAGS + 1, + TG3_TX_WAKEUP_THRESH(tnapi)); mmiowb(); return NETDEV_TX_OK;