@@ -2451,6 +2451,7 @@ static const struct net_device_ops idpf_netdev_ops_splitq = {
.ndo_set_features = idpf_set_features,
.ndo_tx_timeout = idpf_tx_timeout,
.ndo_bpf = idpf_xdp,
+ .ndo_xdp_xmit = idpf_xdp_xmit,
};
static const struct net_device_ops idpf_netdev_ops_singleq = {
@@ -313,6 +313,35 @@ bool __idpf_xdp_run_prog(struct xdp_buff *xdp, struct libie_xdp_tx_bulk *bq)
return libie_xdp_run_prog(xdp, bq, idpf_xdp_tx_flush_bulk);
}
+/**
+ * idpf_xdp_xmit - submit packets to xdp ring for transmission
+ * @dev: netdev
+ * @n: number of xdp frames to be transmitted
+ * @frames: xdp frames to be transmitted
+ * @flags: transmit flags
+ *
+ * Returns number of frames successfully sent. Frames that fail are
+ * free'ed via XDP return API.
+ * For error cases, a negative errno code is returned and no-frames
+ * are transmitted (caller must handle freeing frames).
+ */
+int idpf_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
+ u32 flags)
+{
+ struct idpf_netdev_priv *np = netdev_priv(dev);
+ struct idpf_vport *vport = np->vport;
+
+ if (unlikely(!netif_carrier_ok(dev) || !vport->link_up))
+ return -ENETDOWN;
+ if (unlikely(!idpf_xdp_is_prog_ena(vport)))
+ return -ENXIO;
+
+ return libie_xdp_xmit_do_bulk(dev, n, frames, flags,
+ &vport->txqs[vport->xdp_txq_offset],
+ vport->num_xdp_txq, idpf_xdp_tx_prep,
+ idpf_xdp_tx_xmit, idpf_xdp_tx_finalize);
+}
+
/**
* idpf_xdp_reconfig_queues - reconfigure queues after the XDP setup
* @vport: vport to load or unload XDP for
@@ -410,6 +439,11 @@ idpf_xdp_setup_prog(struct idpf_vport *vport, struct bpf_prog *prog,
return err;
}
+ if (prog)
+ xdp_features_set_redirect_target(vport->netdev, false);
+ else
+ xdp_features_clear_redirect_target(vport->netdev);
+
if (vport_is_up) {
err = idpf_vport_open(vport, false);
if (err) {
@@ -31,6 +31,8 @@ static inline void idpf_xdp_finalize_rx(struct libie_xdp_tx_bulk *bq)
__idpf_xdp_finalize_rx(bq);
}
+int idpf_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
+ u32 flags);
int idpf_xdp(struct net_device *netdev, struct netdev_bpf *xdp);
#endif /* _IDPF_XDP_H_ */
Use libie XDP infra to implement .ndo_xdp_xmit() in idpf. The Tx callbacks are reused from XDP_TX code. XDP redirect target feature is set/cleared depending on the XDP prog presence, as for now we still don't allocate XDP Tx queues when there's no program. Signed-off-by: Alexander Lobakin <aleksander.lobakin@intel.com> --- drivers/net/ethernet/intel/idpf/idpf_lib.c | 1 + drivers/net/ethernet/intel/idpf/idpf_xdp.c | 34 ++++++++++++++++++++++ drivers/net/ethernet/intel/idpf/idpf_xdp.h | 2 ++ 3 files changed, 37 insertions(+)