@@ -130,6 +130,7 @@ struct mlxbf_gige {
u8 hw_version;
struct mlxbf_gige_mdio_gw *mdio_gw;
int prev_speed;
+ u32 msg_enable;
struct timer_list media_timer;
struct work_struct phy_task;
u8 aneg_timeout;
@@ -211,5 +212,6 @@ void mlxbf_gige_free_irqs(struct mlxbf_gige *priv);
int mlxbf_gige_poll(struct napi_struct *napi, int budget);
extern const struct ethtool_ops mlxbf_gige_ethtool_ops;
void mlxbf_gige_update_tx_wqe_next(struct mlxbf_gige *priv);
+void mlxbf_gige_dump_rx_tx(const char *str, struct mlxbf_gige *priv);
#endif /* !defined(__MLXBF_GIGE_H__) */
@@ -193,6 +193,18 @@ static void mlxbf_gige_get_pause_stats(struct net_device *netdev,
}
}
+static u32 mlxbf_gige_get_msglevel(struct net_device *netdev)
+{
+ struct mlxbf_gige *priv = netdev_priv(netdev);
+ return priv->msg_enable;
+}
+
+static void mlxbf_gige_set_msglevel(struct net_device *netdev, u32 data)
+{
+ struct mlxbf_gige *priv = netdev_priv(netdev);
+ priv->msg_enable = data;
+}
+
const struct ethtool_ops mlxbf_gige_ethtool_ops = {
.get_link = ethtool_op_get_link,
.get_ringparam = mlxbf_gige_get_ringparam,
@@ -207,4 +219,6 @@ const struct ethtool_ops mlxbf_gige_ethtool_ops = {
.get_pause_stats = mlxbf_gige_get_pause_stats,
.get_link_ksettings = phy_ethtool_get_link_ksettings,
.set_link_ksettings = phy_ethtool_set_link_ksettings,
+ .get_msglevel = mlxbf_gige_get_msglevel,
+ .set_msglevel = mlxbf_gige_set_msglevel,
};
@@ -24,6 +24,15 @@
#include "mlxbf_gige.h"
#include "mlxbf_gige_regs.h"
+#define MLXBF_GIGE_DEF_MSG_ENABLE \
+ (NETIF_MSG_IFUP | \
+ NETIF_MSG_LINK | \
+ NETIF_MSG_TX_ERR | \
+ NETIF_MSG_RX_ERR)
+static int mlxbf_gige_debug = -1;
+module_param(mlxbf_gige_debug, int, 0);
+MODULE_PARM_DESC(mlxbf_gige_debug, "Debug level (0=none,...,16=all)");
+
/* Allocate SKB whose payload pointer aligns with the Bluefield
* hardware DMA limitation, i.e. DMA operation can't cross
* a 4KB boundary. A maximum packet size of 2KB is assumed in the
@@ -135,25 +144,29 @@ static void mlxbf_gige_phy_task(struct work_struct *work)
{
struct mlxbf_gige *priv = container_of(work, struct mlxbf_gige, phy_task);
struct phy_device *phydev = priv->netdev->phydev;
- int aneg_done;
+ int aneg_done, aneg_restart;
aneg_done = phy_aneg_done(phydev);
if (aneg_done == 0) {
- dev_dbg(priv->dev, "phy_task: autoneg pending, timeout=%d\n", priv->aneg_timeout);
+ if (netif_msg_link(priv))
+ dev_dbg(priv->dev, "phy_task: autoneg pending, timeout=%d\n", priv->aneg_timeout);
if (priv->aneg_timeout--) {
/* Start timer to check again in one second */
priv->media_timer.expires = jiffies + HZ;
add_timer(&priv->media_timer);
} else {
/* Upon timeout, restart autoneg once */
- dev_dbg(priv->dev, "phy_task: restarting autoneg, status=0x%x\n",
- phy_restart_aneg(phydev));
+ aneg_restart = phy_restart_aneg(phydev);
+ if (netif_msg_link(priv))
+ dev_dbg(priv->dev, "phy_task: restarting autoneg, status=0x%x\n", aneg_restart);
}
} else if (aneg_done < 0) {
- dev_dbg(priv->dev, "phy_task: autoneg failed\n");
+ if (netif_msg_link(priv))
+ dev_dbg(priv->dev, "phy_task: autoneg failed\n");
} else {
- dev_dbg(priv->dev, "phy_task: autoneg succeeded\n");
+ if (netif_msg_link(priv))
+ dev_dbg(priv->dev, "phy_task: autoneg succeeded\n");
}
}
@@ -164,6 +177,20 @@ static void mlxbf_gige_phy_timer(struct timer_list *t)
schedule_work(&priv->phy_task);
}
+void mlxbf_gige_dump_rx_tx(const char *str, struct mlxbf_gige *priv)
+{
+ dev_dbg(priv->dev, "%s tx_ci=0x%llx tx_pi=0x%llx rx_ci=0x%llx rx_pi=0x%llx int_mask=0x%llx\n", str,
+ readq(priv->base + MLXBF_GIGE_TX_CONSUMER_INDEX),
+ readq(priv->base + MLXBF_GIGE_TX_PRODUCER_INDEX),
+ readq(priv->base + MLXBF_GIGE_RX_CQE_PACKET_CI),
+ readq(priv->base + MLXBF_GIGE_RX_WQE_PI),
+ readq(priv->base + MLXBF_GIGE_INT_MASK));
+
+ dev_dbg(priv->dev, " din_drop=0x%llx rx_dma=0x%llx rx_fifo=0x%llx rx_polarity=%d\n",
+ readq(priv->base + MLXBF_GIGE_RX_DIN_DROP_COUNTER), readq(priv->base + MLXBF_GIGE_RX_DMA),
+ readq(priv->base + MLXBF_GIGE_RX_FIFO_FULLNESS), priv->valid_polarity);
+}
+
static int mlxbf_gige_open(struct net_device *netdev)
{
struct mlxbf_gige *priv = netdev_priv(netdev);
@@ -190,8 +217,14 @@ static int mlxbf_gige_open(struct net_device *netdev)
*/
priv->valid_polarity = 0;
+ if (netif_msg_ifup(priv))
+ mlxbf_gige_dump_rx_tx("open: start state", priv);
+
phy_start(phydev);
+ if (netif_msg_ifup(priv))
+ mlxbf_gige_dump_rx_tx("open: after phy_start", priv);
+
if (priv->hw_version == MLXBF_GIGE_VERSION_BF2) {
/* On BlueField-2 systems, the KSZ9031 PHY hardware could fail
* to complete autonegotiation and so the link remains down.
@@ -208,16 +241,26 @@ static int mlxbf_gige_open(struct net_device *netdev)
dev_err(priv->dev, "open: tx_init failed, err=0x%x\n", err);
goto phy_deinit;
}
+
+ if (netif_msg_ifup(priv))
+ mlxbf_gige_dump_rx_tx("open: after tx_init", priv);
+
err = mlxbf_gige_rx_init(priv);
if (err) {
dev_err(priv->dev, "open: rx_init failed, err=0x%x\n", err);
goto tx_deinit;
}
+ if (netif_msg_ifup(priv))
+ mlxbf_gige_dump_rx_tx("open: after rx_init", priv);
+
netif_napi_add(netdev, &priv->napi, mlxbf_gige_poll, NAPI_POLL_WEIGHT);
napi_enable(&priv->napi);
netif_start_queue(netdev);
+ if (netif_msg_ifup(priv))
+ mlxbf_gige_dump_rx_tx("open: after napi", priv);
+
err = mlxbf_gige_request_irqs(priv);
if (err) {
dev_err(priv->dev, "open: request_irqs failed, err=0x%x\n", err);
@@ -242,6 +285,9 @@ static int mlxbf_gige_open(struct net_device *netdev)
writeq(int_en, priv->base + MLXBF_GIGE_INT_EN);
+ if (netif_msg_ifup(priv))
+ mlxbf_gige_dump_rx_tx("open: end state", priv);
+
return 0;
napi_deinit:
@@ -494,6 +540,7 @@ static int mlxbf_gige_probe(struct platform_device *pdev)
priv->base = base;
priv->llu_base = llu_base;
priv->plu_base = plu_base;
+ priv->msg_enable = netif_msg_init(mlxbf_gige_debug, MLXBF_GIGE_DEF_MSG_ENABLE);
if (priv->hw_version == MLXBF_GIGE_VERSION_BF2) {
INIT_WORK(&priv->phy_task, mlxbf_gige_phy_task);
@@ -75,6 +75,7 @@
#define MLXBF_GIGE_RX_DMA 0x0580
#define MLXBF_GIGE_RX_DMA_EN BIT(0)
#define MLXBF_GIGE_RX_CQE_PACKET_CI 0x05b0
+#define MLXBF_GIGE_RX_FIFO_FULLNESS 0x05d8
#define MLXBF_GIGE_MAC_CFG 0x05e8
/* NOTE: MLXBF_GIGE_MAC_CFG is the last defined register offset,
@@ -281,6 +281,9 @@ static bool mlxbf_gige_rx_packet(struct mlxbf_gige *priv, int *rx_pkts)
netdev->stats.rx_packets++;
netdev->stats.rx_bytes += datalen;
+ if (netif_msg_rx_status(priv))
+ mlxbf_gige_dump_rx_tx("rx_packet", priv);
+
skb = priv->rx_skb[rx_pi_rem];
/* Alloc another RX SKB for this same index */
@@ -300,8 +303,12 @@ static bool mlxbf_gige_rx_packet(struct mlxbf_gige *priv, int *rx_pkts)
*rx_wqe_addr = rx_buf_dma;
} else if (rx_cqe & MLXBF_GIGE_RX_CQE_PKT_STATUS_MAC_ERR) {
+ if (netif_msg_rx_err(priv))
+ mlxbf_gige_dump_rx_tx("rx_mac_error", priv);
priv->stats.rx_mac_errors++;
} else if (rx_cqe & MLXBF_GIGE_RX_CQE_PKT_STATUS_TRUNCATED) {
+ if (netif_msg_rx_err(priv))
+ mlxbf_gige_dump_rx_tx("rx_truncate_error", priv);
priv->stats.rx_truncate_errors++;
}
@@ -132,8 +132,11 @@ bool mlxbf_gige_handle_tx_complete(struct mlxbf_gige *priv)
u16 tx_ci;
tx_status = readq(priv->base + MLXBF_GIGE_TX_STATUS);
- if (tx_status & MLXBF_GIGE_TX_STATUS_DATA_FIFO_FULL)
+ if (tx_status & MLXBF_GIGE_TX_STATUS_DATA_FIFO_FULL) {
+ if (netif_msg_tx_err(priv))
+ mlxbf_gige_dump_rx_tx("tx_fifo_full", priv);
priv->stats.tx_fifo_full++;
+ }
tx_ci = readq(priv->base + MLXBF_GIGE_TX_CONSUMER_INDEX);
stats = &priv->netdev->stats;
@@ -164,6 +167,9 @@ bool mlxbf_gige_handle_tx_complete(struct mlxbf_gige *priv)
mb();
}
+ if (netif_msg_tx_done(priv))
+ mlxbf_gige_dump_rx_tx("handle_tx_complete", priv);
+
/* Since the TX ring was likely just drained, check if TX queue
* had previously been stopped and now that there are TX buffers
* available the TX queue can be awakened.
@@ -203,6 +209,8 @@ netdev_tx_t mlxbf_gige_start_xmit(struct sk_buff *skb,
if (skb->len > MLXBF_GIGE_DEFAULT_BUF_SZ || skb_linearize(skb)) {
dev_kfree_skb(skb);
netdev->stats.tx_dropped++;
+ if (netif_msg_tx_err(priv))
+ mlxbf_gige_dump_rx_tx("skb_linearize fail", priv);
return NETDEV_TX_OK;
}
@@ -221,6 +229,8 @@ netdev_tx_t mlxbf_gige_start_xmit(struct sk_buff *skb,
/* Free original skb, could not alloc new aligned SKB */
dev_kfree_skb(skb);
netdev->stats.tx_dropped++;
+ if (netif_msg_tx_err(priv))
+ mlxbf_gige_dump_rx_tx("alloc_skb fail", priv);
return NETDEV_TX_OK;
}
@@ -235,6 +245,8 @@ netdev_tx_t mlxbf_gige_start_xmit(struct sk_buff *skb,
if (dma_mapping_error(priv->dev, tx_buf_dma)) {
dev_kfree_skb(skb);
netdev->stats.tx_dropped++;
+ if (netif_msg_tx_err(priv))
+ mlxbf_gige_dump_rx_tx("dma_mapping_error", priv);
return NETDEV_TX_OK;
}
}
@@ -256,6 +268,9 @@ netdev_tx_t mlxbf_gige_start_xmit(struct sk_buff *skb,
/* Write entire 2nd word of TX WQE */
*(tx_wqe_addr + 1) = word2;
+ if (netif_msg_tx_queued(priv))
+ mlxbf_gige_dump_rx_tx("start_xmit", priv);
+
spin_lock_irqsave(&priv->lock, flags);
priv->tx_skb[priv->tx_pi % priv->tx_q_entries] = tx_skb;
priv->tx_pi++;