diff mbox series

[PATCHv2,2/2] forcedeth: disable recv cache by default

Message ID 1563713633-25528-3-git-send-email-yanjun.zhu@oracle.com
State Rejected
Delegated to: David Miller
Headers show
Series forcedeth: recv cache to make NIC work steadily | expand

Commit Message

Zhu Yanjun July 21, 2019, 12:53 p.m. UTC
The recv cache is to allocate 125MiB memory to reserve for NIC.
In the past time, this recv cache works very well. When the memory
is not enough, this recv cache reserves memory for NIC.
And the communications through this NIC is not affected by the
memory shortage. And the performance of NIC is better because of
this recv cache.
But this recv cache reserves 125MiB memory for one NIC port. Normally
there are 2 NIC ports in one card. So in a host, there are about 250
MiB memory reserved for NIC ports. To a host on which communications
are not mandatory, it is not necessary to reserve memory.
So this recv cache is disabled by default.

CC: Joe Jin <joe.jin@oracle.com>
CC: Junxiao Bi <junxiao.bi@oracle.com>
Tested-by: Nan san <nan.1986san@gmail.com>
Signed-off-by: Zhu Yanjun <yanjun.zhu@oracle.com>
---
 drivers/net/ethernet/nvidia/Kconfig     | 11 ++++++++
 drivers/net/ethernet/nvidia/Makefile    |  1 +
 drivers/net/ethernet/nvidia/forcedeth.c | 46 ++++++++++++++++++++++++++-------
 3 files changed, 48 insertions(+), 10 deletions(-)

Comments

Andrew Lunn July 21, 2019, 2:48 p.m. UTC | #1
On Sun, Jul 21, 2019 at 08:53:53AM -0400, Zhu Yanjun wrote:
> The recv cache is to allocate 125MiB memory to reserve for NIC.
> In the past time, this recv cache works very well. When the memory
> is not enough, this recv cache reserves memory for NIC.
> And the communications through this NIC is not affected by the
> memory shortage. And the performance of NIC is better because of
> this recv cache.
> But this recv cache reserves 125MiB memory for one NIC port. Normally
> there are 2 NIC ports in one card. So in a host, there are about 250
> MiB memory reserved for NIC ports. To a host on which communications
> are not mandatory, it is not necessary to reserve memory.
> So this recv cache is disabled by default.
> 
> CC: Joe Jin <joe.jin@oracle.com>
> CC: Junxiao Bi <junxiao.bi@oracle.com>
> Tested-by: Nan san <nan.1986san@gmail.com>
> Signed-off-by: Zhu Yanjun <yanjun.zhu@oracle.com>
> ---
>  drivers/net/ethernet/nvidia/Kconfig     | 11 ++++++++
>  drivers/net/ethernet/nvidia/Makefile    |  1 +
>  drivers/net/ethernet/nvidia/forcedeth.c | 46 ++++++++++++++++++++++++++-------
>  3 files changed, 48 insertions(+), 10 deletions(-)
> 
> diff --git a/drivers/net/ethernet/nvidia/Kconfig b/drivers/net/ethernet/nvidia/Kconfig
> index faacbd1..9a9f42a 100644
> --- a/drivers/net/ethernet/nvidia/Kconfig
> +++ b/drivers/net/ethernet/nvidia/Kconfig
> @@ -26,4 +26,15 @@ config FORCEDETH
>  	  To compile this driver as a module, choose M here. The module
>  	  will be called forcedeth.
>  
> +config	FORCEDETH_RECV_CACHE
> +	bool "nForce Ethernet recv cache support"
> +	depends on FORCEDETH
> +	default n
> +	---help---
> +	  The recv cache can make nic work steadily when the system memory is
> +	  not enough. And it can also enhance nic performance. But to a host
> +	  on which the communications are not mandatory, it is not necessary
> +	  to reserve 125MiB memory for NIC.
> +	  So recv cache is disabled by default.
> +
>  endif # NET_VENDOR_NVIDIA
> diff --git a/drivers/net/ethernet/nvidia/Makefile b/drivers/net/ethernet/nvidia/Makefile
> index 8935699..40c055e 100644
> --- a/drivers/net/ethernet/nvidia/Makefile
> +++ b/drivers/net/ethernet/nvidia/Makefile
> @@ -4,3 +4,4 @@
>  #
>  
>  obj-$(CONFIG_FORCEDETH) += forcedeth.o
> +ccflags-$(CONFIG_FORCEDETH_RECV_CACHE)	:=	-DFORCEDETH_RECV_CACHE
> diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
> index f8e766f..deda276 100644
> --- a/drivers/net/ethernet/nvidia/forcedeth.c
> +++ b/drivers/net/ethernet/nvidia/forcedeth.c
> @@ -674,10 +674,12 @@ struct nv_ethtool_stats {
>  	u64 tx_broadcast;
>  };
>  
> +#ifdef FORCEDETH_RECV_CACHE
>  /* 1000Mb is 125M bytes, 125 * 1024 * 1024 bytes
>   * The length of recv cache is 125M / skb_length
>   */
>  #define RECV_CACHE_LIST_LENGTH		(125 * 1024 * 1024 / np->rx_buf_sz)
> +#endif
>  
>  #define NV_DEV_STATISTICS_V3_COUNT (sizeof(struct nv_ethtool_stats)/sizeof(u64))
>  #define NV_DEV_STATISTICS_V2_COUNT (NV_DEV_STATISTICS_V3_COUNT - 3)
> @@ -850,10 +852,12 @@ struct fe_priv {
>  	char name_tx[IFNAMSIZ + 3];       /* -tx    */
>  	char name_other[IFNAMSIZ + 6];    /* -other */
>  
> +#ifdef FORCEDETH_RECV_CACHE
>  	/* This is to schedule work */
>  	struct delayed_work     recv_cache_work;
>  	/* This list is to store skb queue for recv */
>  	struct sk_buff_head recv_list;
> +#endif
>  };
>  
>  /*
> @@ -1814,8 +1818,12 @@ static int nv_alloc_rx(struct net_device *dev)
>  		less_rx = np->last_rx.orig;
>  
>  	while (np->put_rx.orig != less_rx) {
> +#ifdef FORCEDETH_RECV_CACHE
>  		struct sk_buff *skb = skb_dequeue(&np->recv_list);
> -
> +#else
> +		struct sk_buff *skb = netdev_alloc_skb(np->dev,
> +					 np->rx_buf_sz + NV_RX_ALLOC_PAD);
> +#endif
>  		if (likely(skb)) {
>  			np->put_rx_ctx->skb = skb;
>  			np->put_rx_ctx->dma = dma_map_single(&np->pci_dev->dev,
> @@ -1840,15 +1848,15 @@ static int nv_alloc_rx(struct net_device *dev)
>  			u64_stats_update_begin(&np->swstats_rx_syncp);
>  			np->stat_rx_dropped++;
>  			u64_stats_update_end(&np->swstats_rx_syncp);
> -
> +#ifdef FORCEDETH_RECV_CACHE
>  			schedule_delayed_work(&np->recv_cache_work, 0);
> -
> +#endif

All these #ifdef are pretty ugly. It also makes for easy to break code
since most of the time this option will not be enabled. Please
refactor the code so that is uses

if (IS_ENABLED(FORCEDETH_RECV_CACHE))

so that the compiler at least compiles the code every time, and then
optimizing it out.

	   Andrew
diff mbox series

Patch

diff --git a/drivers/net/ethernet/nvidia/Kconfig b/drivers/net/ethernet/nvidia/Kconfig
index faacbd1..9a9f42a 100644
--- a/drivers/net/ethernet/nvidia/Kconfig
+++ b/drivers/net/ethernet/nvidia/Kconfig
@@ -26,4 +26,15 @@  config FORCEDETH
 	  To compile this driver as a module, choose M here. The module
 	  will be called forcedeth.
 
+config	FORCEDETH_RECV_CACHE
+	bool "nForce Ethernet recv cache support"
+	depends on FORCEDETH
+	default n
+	---help---
+	  The recv cache can make nic work steadily when the system memory is
+	  not enough. And it can also enhance nic performance. But to a host
+	  on which the communications are not mandatory, it is not necessary
+	  to reserve 125MiB memory for NIC.
+	  So recv cache is disabled by default.
+
 endif # NET_VENDOR_NVIDIA
diff --git a/drivers/net/ethernet/nvidia/Makefile b/drivers/net/ethernet/nvidia/Makefile
index 8935699..40c055e 100644
--- a/drivers/net/ethernet/nvidia/Makefile
+++ b/drivers/net/ethernet/nvidia/Makefile
@@ -4,3 +4,4 @@ 
 #
 
 obj-$(CONFIG_FORCEDETH) += forcedeth.o
+ccflags-$(CONFIG_FORCEDETH_RECV_CACHE)	:=	-DFORCEDETH_RECV_CACHE
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index f8e766f..deda276 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -674,10 +674,12 @@  struct nv_ethtool_stats {
 	u64 tx_broadcast;
 };
 
+#ifdef FORCEDETH_RECV_CACHE
 /* 1000Mb is 125M bytes, 125 * 1024 * 1024 bytes
  * The length of recv cache is 125M / skb_length
  */
 #define RECV_CACHE_LIST_LENGTH		(125 * 1024 * 1024 / np->rx_buf_sz)
+#endif
 
 #define NV_DEV_STATISTICS_V3_COUNT (sizeof(struct nv_ethtool_stats)/sizeof(u64))
 #define NV_DEV_STATISTICS_V2_COUNT (NV_DEV_STATISTICS_V3_COUNT - 3)
@@ -850,10 +852,12 @@  struct fe_priv {
 	char name_tx[IFNAMSIZ + 3];       /* -tx    */
 	char name_other[IFNAMSIZ + 6];    /* -other */
 
+#ifdef FORCEDETH_RECV_CACHE
 	/* This is to schedule work */
 	struct delayed_work     recv_cache_work;
 	/* This list is to store skb queue for recv */
 	struct sk_buff_head recv_list;
+#endif
 };
 
 /*
@@ -1814,8 +1818,12 @@  static int nv_alloc_rx(struct net_device *dev)
 		less_rx = np->last_rx.orig;
 
 	while (np->put_rx.orig != less_rx) {
+#ifdef FORCEDETH_RECV_CACHE
 		struct sk_buff *skb = skb_dequeue(&np->recv_list);
-
+#else
+		struct sk_buff *skb = netdev_alloc_skb(np->dev,
+					 np->rx_buf_sz + NV_RX_ALLOC_PAD);
+#endif
 		if (likely(skb)) {
 			np->put_rx_ctx->skb = skb;
 			np->put_rx_ctx->dma = dma_map_single(&np->pci_dev->dev,
@@ -1840,15 +1848,15 @@  static int nv_alloc_rx(struct net_device *dev)
 			u64_stats_update_begin(&np->swstats_rx_syncp);
 			np->stat_rx_dropped++;
 			u64_stats_update_end(&np->swstats_rx_syncp);
-
+#ifdef FORCEDETH_RECV_CACHE
 			schedule_delayed_work(&np->recv_cache_work, 0);
-
+#endif
 			return 1;
 		}
 	}
-
+#ifdef FORCEDETH_RECV_CACHE
 	schedule_delayed_work(&np->recv_cache_work, 0);
-
+#endif
 	return 0;
 }
 
@@ -1862,7 +1870,12 @@  static int nv_alloc_rx_optimized(struct net_device *dev)
 		less_rx = np->last_rx.ex;
 
 	while (np->put_rx.ex != less_rx) {
+#ifdef FORCEDETH_RECV_CACHE
 		struct sk_buff *skb = skb_dequeue(&np->recv_list);
+#else
+		struct sk_buff *skb = netdev_alloc_skb(np->dev,
+					np->rx_buf_sz + NV_RX_ALLOC_PAD);
+#endif
 
 		if (likely(skb)) {
 			np->put_rx_ctx->skb = skb;
@@ -1889,15 +1902,15 @@  static int nv_alloc_rx_optimized(struct net_device *dev)
 			u64_stats_update_begin(&np->swstats_rx_syncp);
 			np->stat_rx_dropped++;
 			u64_stats_update_end(&np->swstats_rx_syncp);
-
+#ifdef FORCEDETH_RECV_CACHE
 			schedule_delayed_work(&np->recv_cache_work, 0);
-
+#endif
 			return 1;
 		}
 	}
-
+#ifdef FORCEDETH_RECV_CACHE
 	schedule_delayed_work(&np->recv_cache_work, 0);
-
+#endif
 	return 0;
 }
 
@@ -1981,6 +1994,7 @@  static void nv_init_tx(struct net_device *dev)
 	}
 }
 
+#ifdef FORCEDETH_RECV_CACHE
 static void nv_init_recv_cache(struct net_device *dev)
 {
 	struct fe_priv *np = netdev_priv(dev);
@@ -2017,6 +2031,7 @@  static void nv_destroy_recv_cache(struct net_device *dev)
 	skb_queue_purge(&np->recv_list);
 	WARN_ON(skb_queue_len(&np->recv_list));
 }
+#endif
 
 static int nv_init_ring(struct net_device *dev)
 {
@@ -3108,8 +3123,10 @@  static int nv_change_mtu(struct net_device *dev, int new_mtu)
 		nv_drain_rxtx(dev);
 		/* reinit driver view of the rx queue */
 		set_bufsize(dev);
+#ifdef FORCEDETH_RECV_CACHE
 		nv_destroy_recv_cache(dev);
 		nv_init_recv_cache(dev);
+#endif
 		if (nv_init_ring(dev)) {
 			if (!np->in_shutdown)
 				mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
@@ -4137,6 +4154,7 @@  static void nv_free_irq(struct net_device *dev)
 	}
 }
 
+#ifdef FORCEDETH_RECV_CACHE
 static void nv_recv_cache_worker(struct work_struct *work)
 {
 	struct fe_priv *np = container_of(work, struct fe_priv,
@@ -4162,6 +4180,7 @@  static void nv_recv_cache_worker(struct work_struct *work)
 		skb_queue_tail(&np->recv_list, skb);
 	}
 }
+#endif
 
 static void nv_do_nic_poll(struct timer_list *t)
 {
@@ -4218,8 +4237,10 @@  static void nv_do_nic_poll(struct timer_list *t)
 			nv_drain_rxtx(dev);
 			/* reinit driver view of the rx queue */
 			set_bufsize(dev);
+#ifdef FORCEDETH_RECV_CACHE
 			nv_destroy_recv_cache(dev);
 			nv_init_recv_cache(dev);
+#endif
 			if (nv_init_ring(dev)) {
 				if (!np->in_shutdown)
 					mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
@@ -4772,8 +4793,10 @@  static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ri
 	if (netif_running(dev)) {
 		/* reinit driver view of the queues */
 		set_bufsize(dev);
+#ifdef FORCEDETH_RECV_CACHE
 		nv_destroy_recv_cache(dev);
 		nv_init_recv_cache(dev);
+#endif
 		if (nv_init_ring(dev)) {
 			if (!np->in_shutdown)
 				mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
@@ -5495,9 +5518,11 @@  static int nv_open(struct net_device *dev)
 
 	/* initialize descriptor rings */
 	set_bufsize(dev);
+#ifdef FORCEDETH_RECV_CACHE
 	nv_init_recv_cache(dev);
 
 	INIT_DELAYED_WORK(&np->recv_cache_work, nv_recv_cache_worker);
+#endif
 	oom = nv_init_ring(dev);
 
 	writel(0, base + NvRegLinkSpeed);
@@ -5679,9 +5704,10 @@  static int nv_close(struct net_device *dev)
 		nv_txrx_gate(dev, true);
 	}
 
+#ifdef FORCEDETH_RECV_CACHE
 	/* free all SKBs in recv cache */
 	nv_destroy_recv_cache(dev);
-
+#endif
 	/* FIXME: power down nic */
 
 	return 0;