@@ -26,4 +26,15 @@ config FORCEDETH
To compile this driver as a module, choose M here. The module
will be called forcedeth.
+config FORCEDETH_RECV_CACHE
+ bool "nForce Ethernet recv cache support"
+ depends on FORCEDETH
+ default n
+ ---help---
+ The recv cache can make nic work steadily when the system memory is
+ not enough. And it can also enhance nic performance. But to a host
+ on which the communications are not mandatory, it is not necessary
+ to reserve 125MiB memory for NIC.
+ So recv cache is disabled by default.
+
endif # NET_VENDOR_NVIDIA
@@ -4,3 +4,4 @@
#
obj-$(CONFIG_FORCEDETH) += forcedeth.o
+ccflags-$(CONFIG_FORCEDETH_RECV_CACHE) := -DFORCEDETH_RECV_CACHE
@@ -674,10 +674,12 @@ struct nv_ethtool_stats {
u64 tx_broadcast;
};
+#ifdef FORCEDETH_RECV_CACHE
/* 1000Mb is 125M bytes, 125 * 1024 * 1024 bytes
* The length of recv cache is 125M / skb_length
*/
#define RECV_CACHE_LIST_LENGTH (125 * 1024 * 1024 / np->rx_buf_sz)
+#endif
#define NV_DEV_STATISTICS_V3_COUNT (sizeof(struct nv_ethtool_stats)/sizeof(u64))
#define NV_DEV_STATISTICS_V2_COUNT (NV_DEV_STATISTICS_V3_COUNT - 3)
@@ -850,10 +852,12 @@ struct fe_priv {
char name_tx[IFNAMSIZ + 3]; /* -tx */
char name_other[IFNAMSIZ + 6]; /* -other */
+#ifdef FORCEDETH_RECV_CACHE
/* This is to schedule work */
struct delayed_work recv_cache_work;
/* This list is to store skb queue for recv */
struct sk_buff_head recv_list;
+#endif
};
/*
@@ -1814,8 +1818,12 @@ static int nv_alloc_rx(struct net_device *dev)
less_rx = np->last_rx.orig;
while (np->put_rx.orig != less_rx) {
+#ifdef FORCEDETH_RECV_CACHE
struct sk_buff *skb = skb_dequeue(&np->recv_list);
-
+#else
+ struct sk_buff *skb = netdev_alloc_skb(np->dev,
+ np->rx_buf_sz + NV_RX_ALLOC_PAD);
+#endif
if (likely(skb)) {
np->put_rx_ctx->skb = skb;
np->put_rx_ctx->dma = dma_map_single(&np->pci_dev->dev,
@@ -1840,15 +1848,15 @@ static int nv_alloc_rx(struct net_device *dev)
u64_stats_update_begin(&np->swstats_rx_syncp);
np->stat_rx_dropped++;
u64_stats_update_end(&np->swstats_rx_syncp);
-
+#ifdef FORCEDETH_RECV_CACHE
schedule_delayed_work(&np->recv_cache_work, 0);
-
+#endif
return 1;
}
}
-
+#ifdef FORCEDETH_RECV_CACHE
schedule_delayed_work(&np->recv_cache_work, 0);
-
+#endif
return 0;
}
@@ -1862,7 +1870,12 @@ static int nv_alloc_rx_optimized(struct net_device *dev)
less_rx = np->last_rx.ex;
while (np->put_rx.ex != less_rx) {
+#ifdef FORCEDETH_RECV_CACHE
struct sk_buff *skb = skb_dequeue(&np->recv_list);
+#else
+ struct sk_buff *skb = netdev_alloc_skb(np->dev,
+ np->rx_buf_sz + NV_RX_ALLOC_PAD);
+#endif
if (likely(skb)) {
np->put_rx_ctx->skb = skb;
@@ -1889,15 +1902,15 @@ static int nv_alloc_rx_optimized(struct net_device *dev)
u64_stats_update_begin(&np->swstats_rx_syncp);
np->stat_rx_dropped++;
u64_stats_update_end(&np->swstats_rx_syncp);
-
+#ifdef FORCEDETH_RECV_CACHE
schedule_delayed_work(&np->recv_cache_work, 0);
-
+#endif
return 1;
}
}
-
+#ifdef FORCEDETH_RECV_CACHE
schedule_delayed_work(&np->recv_cache_work, 0);
-
+#endif
return 0;
}
@@ -1981,6 +1994,7 @@ static void nv_init_tx(struct net_device *dev)
}
}
+#ifdef FORCEDETH_RECV_CACHE
static void nv_init_recv_cache(struct net_device *dev)
{
struct fe_priv *np = netdev_priv(dev);
@@ -2017,6 +2031,7 @@ static void nv_destroy_recv_cache(struct net_device *dev)
skb_queue_purge(&np->recv_list);
WARN_ON(skb_queue_len(&np->recv_list));
}
+#endif
static int nv_init_ring(struct net_device *dev)
{
@@ -3108,8 +3123,10 @@ static int nv_change_mtu(struct net_device *dev, int new_mtu)
nv_drain_rxtx(dev);
/* reinit driver view of the rx queue */
set_bufsize(dev);
+#ifdef FORCEDETH_RECV_CACHE
nv_destroy_recv_cache(dev);
nv_init_recv_cache(dev);
+#endif
if (nv_init_ring(dev)) {
if (!np->in_shutdown)
mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
@@ -4137,6 +4154,7 @@ static void nv_free_irq(struct net_device *dev)
}
}
+#ifdef FORCEDETH_RECV_CACHE
static void nv_recv_cache_worker(struct work_struct *work)
{
struct fe_priv *np = container_of(work, struct fe_priv,
@@ -4162,6 +4180,7 @@ static void nv_recv_cache_worker(struct work_struct *work)
skb_queue_tail(&np->recv_list, skb);
}
}
+#endif
static void nv_do_nic_poll(struct timer_list *t)
{
@@ -4218,8 +4237,10 @@ static void nv_do_nic_poll(struct timer_list *t)
nv_drain_rxtx(dev);
/* reinit driver view of the rx queue */
set_bufsize(dev);
+#ifdef FORCEDETH_RECV_CACHE
nv_destroy_recv_cache(dev);
nv_init_recv_cache(dev);
+#endif
if (nv_init_ring(dev)) {
if (!np->in_shutdown)
mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
@@ -4772,8 +4793,10 @@ static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ri
if (netif_running(dev)) {
/* reinit driver view of the queues */
set_bufsize(dev);
+#ifdef FORCEDETH_RECV_CACHE
nv_destroy_recv_cache(dev);
nv_init_recv_cache(dev);
+#endif
if (nv_init_ring(dev)) {
if (!np->in_shutdown)
mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
@@ -5495,9 +5518,11 @@ static int nv_open(struct net_device *dev)
/* initialize descriptor rings */
set_bufsize(dev);
+#ifdef FORCEDETH_RECV_CACHE
nv_init_recv_cache(dev);
INIT_DELAYED_WORK(&np->recv_cache_work, nv_recv_cache_worker);
+#endif
oom = nv_init_ring(dev);
writel(0, base + NvRegLinkSpeed);
@@ -5679,9 +5704,10 @@ static int nv_close(struct net_device *dev)
nv_txrx_gate(dev, true);
}
+#ifdef FORCEDETH_RECV_CACHE
/* free all SKBs in recv cache */
nv_destroy_recv_cache(dev);
-
+#endif
/* FIXME: power down nic */
return 0;