Message ID | 20240528134846.148890-10-aleksander.lobakin@intel.com |
---|---|
State | Changes Requested |
Delegated to: | Anthony Nguyen |
Headers | show |
Series | idpf: XDP chapter I: convert Rx to libeth | expand |
On 5/28/2024 6:48 AM, Alexander Lobakin wrote: > Page Pool Ethtool stats are deprecated since the Netlink Page Pool > interface introduction. > idpf receives big changes in Rx buffer management, including &page_pool > layout, so keeping these deprecated stats does only harm, not speaking > of that CONFIG_IDPF selects CONFIG_PAGE_POOL_STATS unconditionally, > while the latter is often turned off for better performance. > Remove all the references to PP stats from the Ethtool code. The stats > are still available in their full via the generic Netlink interface. > Yep. No reason to expose these twice, and blocking the ability for someone to disable these for optimization is annoying. Good cleanup. Reviewed-by: Jacob Keller <jacob.e.keller@intel.com> > @@ -876,7 +870,6 @@ static void idpf_get_ethtool_stats(struct net_device *netdev, > { > struct idpf_netdev_priv *np = netdev_priv(netdev); > struct idpf_vport_config *vport_config; > - struct page_pool_stats pp_stats = { }; > struct idpf_vport *vport; > unsigned int total = 0; > unsigned int i, j; > @@ -946,32 +939,12 @@ static void idpf_get_ethtool_stats(struct net_device *netdev, > idpf_add_empty_queue_stats(&data, qtype); > else > idpf_add_queue_stats(&data, rxq, qtype); > - > - /* In splitq mode, don't get page pool stats here since > - * the pools are attached to the buffer queues > - */ > - if (is_splitq) > - continue; > - > - if (rxq) > - page_pool_get_stats(rxq->pp, &pp_stats); > - } > - } > - > - for (i = 0; i < vport->num_rxq_grp; i++) { > - for (j = 0; j < vport->num_bufqs_per_qgrp; j++) { > - struct idpf_buf_queue *rxbufq = > - &vport->rxq_grps[i].splitq.bufq_sets[j].bufq; > - > - page_pool_get_stats(rxbufq->pp, &pp_stats); This can just go away because the page pool itself already tracks these and can be reported over the netlink page pool interface. Nice. > } > } > > for (; total < vport_config->max_q.max_rxq; total++) > idpf_add_empty_queue_stats(&data, VIRTCHNL2_QUEUE_TYPE_RX); > > - page_pool_ethtool_stats_get(data, &pp_stats); > - > rcu_read_unlock(); > > idpf_vport_ctrl_unlock(netdev);
diff --git a/drivers/net/ethernet/intel/idpf/Kconfig b/drivers/net/ethernet/intel/idpf/Kconfig index 638484c5723c..1f071143d992 100644 --- a/drivers/net/ethernet/intel/idpf/Kconfig +++ b/drivers/net/ethernet/intel/idpf/Kconfig @@ -7,7 +7,6 @@ config IDPF select DIMLIB select LIBETH select PAGE_POOL - select PAGE_POOL_STATS help This driver supports Intel(R) Infrastructure Data Path Function devices. diff --git a/drivers/net/ethernet/intel/idpf/idpf_ethtool.c b/drivers/net/ethernet/intel/idpf/idpf_ethtool.c index e933fed16c7e..3806ddd3ce4a 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_ethtool.c +++ b/drivers/net/ethernet/intel/idpf/idpf_ethtool.c @@ -565,8 +565,6 @@ static void idpf_get_stat_strings(struct net_device *netdev, u8 *data) for (i = 0; i < vport_config->max_q.max_rxq; i++) idpf_add_qstat_strings(&data, idpf_gstrings_rx_queue_stats, "rx", i); - - page_pool_ethtool_stats_get_strings(data); } /** @@ -600,7 +598,6 @@ static int idpf_get_sset_count(struct net_device *netdev, int sset) struct idpf_netdev_priv *np = netdev_priv(netdev); struct idpf_vport_config *vport_config; u16 max_txq, max_rxq; - unsigned int size; if (sset != ETH_SS_STATS) return -EINVAL; @@ -619,11 +616,8 @@ static int idpf_get_sset_count(struct net_device *netdev, int sset) max_txq = vport_config->max_q.max_txq; max_rxq = vport_config->max_q.max_rxq; - size = IDPF_PORT_STATS_LEN + (IDPF_TX_QUEUE_STATS_LEN * max_txq) + + return IDPF_PORT_STATS_LEN + (IDPF_TX_QUEUE_STATS_LEN * max_txq) + (IDPF_RX_QUEUE_STATS_LEN * max_rxq); - size += page_pool_ethtool_stats_get_count(); - - return size; } /** @@ -876,7 +870,6 @@ static void idpf_get_ethtool_stats(struct net_device *netdev, { struct idpf_netdev_priv *np = netdev_priv(netdev); struct idpf_vport_config *vport_config; - struct page_pool_stats pp_stats = { }; struct idpf_vport *vport; unsigned int total = 0; unsigned int i, j; @@ -946,32 +939,12 @@ static void idpf_get_ethtool_stats(struct net_device *netdev, idpf_add_empty_queue_stats(&data, qtype); else idpf_add_queue_stats(&data, rxq, qtype); - - /* In splitq mode, don't get page pool stats here since - * the pools are attached to the buffer queues - */ - if (is_splitq) - continue; - - if (rxq) - page_pool_get_stats(rxq->pp, &pp_stats); - } - } - - for (i = 0; i < vport->num_rxq_grp; i++) { - for (j = 0; j < vport->num_bufqs_per_qgrp; j++) { - struct idpf_buf_queue *rxbufq = - &vport->rxq_grps[i].splitq.bufq_sets[j].bufq; - - page_pool_get_stats(rxbufq->pp, &pp_stats); } } for (; total < vport_config->max_q.max_rxq; total++) idpf_add_empty_queue_stats(&data, VIRTCHNL2_QUEUE_TYPE_RX); - page_pool_ethtool_stats_get(data, &pp_stats); - rcu_read_unlock(); idpf_vport_ctrl_unlock(netdev);