From patchwork Tue Aug 26 19:26:17 2014 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Benjamin Poirier X-Patchwork-Id: 383192 X-Patchwork-Delegate: davem@davemloft.net Return-Path: X-Original-To: patchwork-incoming@ozlabs.org Delivered-To: patchwork-incoming@ozlabs.org Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by ozlabs.org (Postfix) with ESMTP id C4CBC14003E for ; Wed, 27 Aug 2014 05:27:50 +1000 (EST) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1754171AbaHZT1a (ORCPT ); Tue, 26 Aug 2014 15:27:30 -0400 Received: from mail-pd0-f178.google.com ([209.85.192.178]:48106 "EHLO mail-pd0-f178.google.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1753870AbaHZT12 (ORCPT ); Tue, 26 Aug 2014 15:27:28 -0400 Received: by mail-pd0-f178.google.com with SMTP id w10so23228440pde.37 for ; Tue, 26 Aug 2014 12:27:27 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=20120113; h=sender:from:to:cc:subject:date:message-id:in-reply-to:references; bh=gJu+iyAfsG+Epx+S/3nGEutU0+kZkDLgOT5X2VZRcvo=; b=TDgSUPlA95BO5rrti0hRU4l2BDY+IFA7qWWwdZXfx3wy10EVtZNvK8t3uiqWTdE1FC XcmOd4vhcjyP5CL7xmIduLQq3pyiDlBztqKmMZGRUMwZ0MkMQQYF9Xtd+Fd+iMvhffTF bb3/gV6EJgJ5H/PQYDaoEmlQvdOhGU98sOOlEGQDDBo01kZV28PobGbXycfUaAePu+w5 hVuKCCE3fajptsM85uZ6hRXgCrACM26crdBbhUzXn+kRrZZXY5YkSw5M1/qtKPK443xF iJRtcNlrcOYVCJ46921OkaYd4TJeP2po5I25IaxliJxozxxU/RhnCdIOFjujVBXx2NW6 6vYg== X-Received: by 10.66.237.206 with SMTP id ve14mr19529851pac.40.1409081247695; Tue, 26 Aug 2014 12:27:27 -0700 (PDT) Received: from f1.synalogic.ca ([108.203.76.248]) by mx.google.com with ESMTPSA id zf5sm4049240pbc.44.2014.08.26.12.27.26 for (version=TLSv1.2 cipher=ECDHE-RSA-AES128-GCM-SHA256 bits=128/128); Tue, 26 Aug 2014 12:27:27 -0700 (PDT) From: Benjamin Poirier To: Prashant Sreedharan , Michael Chan Cc: netdev@vger.kernel.org, linux-kernel@vger.kernel.org Subject: [PATCH net v3 3/4] tg3: Move tx queue stop logic to its own function Date: Tue, 26 Aug 2014 12:26:17 -0700 Message-Id: <1409081178-4877-3-git-send-email-bpoirier@suse.de> X-Mailer: git-send-email 1.8.4.5 In-Reply-To: <1409081178-4877-1-git-send-email-bpoirier@suse.de> References: <1409081178-4877-1-git-send-email-bpoirier@suse.de> Sender: netdev-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org It is duplicated. Also, the first instance in tg3_start_xmit() is racy. Consider: tg3_start_xmit() if budget <= ... tg3_tx() (free up the entire ring) tx_cons = smp_mb if queue_stopped and tx_avail, NO if !queue_stopped stop queue return NETDEV_TX_BUSY ... tx queue stopped forever Signed-off-by: Benjamin Poirier --- Changes v2->v3 * new patch to avoid repeatedly open coding this block in the next patch. --- drivers/net/ethernet/broadcom/tg3.c | 69 ++++++++++++++++++------------------- 1 file changed, 34 insertions(+), 35 deletions(-) diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 0cecd6d..5d39554 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -7831,6 +7831,29 @@ static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi, static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *); +static inline bool tg3_maybe_stop_txq(struct tg3_napi *tnapi, + struct netdev_queue *txq, + u32 stop_thresh, u32 wakeup_thresh) +{ + bool stopped = false; + + if (unlikely(tg3_tx_avail(tnapi) <= stop_thresh)) { + if (!netif_tx_queue_stopped(txq)) { + stopped = true; + netif_tx_stop_queue(txq); + BUG_ON(wakeup_thresh >= tnapi->tx_pending); + } + /* netif_tx_stop_queue() must be done before checking tx index + * in tg3_tx_avail(), because in tg3_tx(), we update tx index + * before checking for netif_tx_queue_stopped(). + */ + smp_mb(); + if (tg3_tx_avail(tnapi) > wakeup_thresh) + netif_tx_wake_queue(txq); + } + return stopped; +} + /* Use GSO to workaround all TSO packets that meet HW bug conditions * indicated in tg3_tx_frag_set() */ @@ -7841,20 +7864,9 @@ static int tg3_tso_bug(struct tg3 *tp, struct tg3_napi *tnapi, u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3; /* Estimate the number of fragments in the worst case */ - if (unlikely(tg3_tx_avail(tnapi) <= frag_cnt_est)) { - netif_tx_stop_queue(txq); - - /* netif_tx_stop_queue() must be done before checking - * checking tx index in tg3_tx_avail() below, because in - * tg3_tx(), we update tx index before checking for - * netif_tx_queue_stopped(). - */ - smp_mb(); - if (tg3_tx_avail(tnapi) <= frag_cnt_est) - return NETDEV_TX_BUSY; - - netif_tx_wake_queue(txq); - } + tg3_maybe_stop_txq(tnapi, txq, frag_cnt_est, frag_cnt_est); + if (netif_tx_queue_stopped(txq)) + return NETDEV_TX_BUSY; segs = skb_gso_segment(skb, tp->dev->features & ~(NETIF_F_TSO | NETIF_F_TSO6)); @@ -7902,16 +7914,13 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) * interrupt. Furthermore, IRQ processing runs lockless so we have * no IRQ context deadlocks to worry about either. Rejoice! */ - if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) { - if (!netif_tx_queue_stopped(txq)) { - netif_tx_stop_queue(txq); - - /* This is a hard error, log it. */ - netdev_err(dev, - "BUG! Tx Ring full when queue awake!\n"); - } - return NETDEV_TX_BUSY; + if (tg3_maybe_stop_txq(tnapi, txq, skb_shinfo(skb)->nr_frags + 1, + TG3_TX_WAKEUP_THRESH(tnapi))) { + /* This is a hard error, log it. */ + netdev_err(dev, "BUG! Tx Ring full when queue awake!\n"); } + if (netif_tx_queue_stopped(txq)) + return NETDEV_TX_BUSY; entry = tnapi->tx_prod; base_flags = 0; @@ -8087,18 +8096,8 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) tw32_tx_mbox(tnapi->prodmbox, entry); tnapi->tx_prod = entry; - if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) { - netif_tx_stop_queue(txq); - - /* netif_tx_stop_queue() must be done before checking - * checking tx index in tg3_tx_avail() below, because in - * tg3_tx(), we update tx index before checking for - * netif_tx_queue_stopped(). - */ - smp_mb(); - if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)) - netif_tx_wake_queue(txq); - } + tg3_maybe_stop_txq(tnapi, txq, MAX_SKB_FRAGS + 1, + TG3_TX_WAKEUP_THRESH(tnapi)); mmiowb(); return NETDEV_TX_OK;