@@ -17,6 +17,7 @@
#include <net/rtnetlink.h>
#include <net/dst.h>
#include <net/xfrm.h>
+#include <net/xdp.h>
#include <linux/veth.h>
#include <linux/module.h>
#include <linux/bpf.h>
@@ -254,6 +255,43 @@ static struct sk_buff *veth_build_skb(void *head, int headroom, int len,
return skb;
}
+static int veth_xdp_xmit(struct net_device *dev, int n,
+ struct xdp_frame **frames, u32 flags)
+{
+ struct veth_priv *rcv_priv, *priv = netdev_priv(dev);
+ struct net_device *rcv;
+ int i, drops = 0;
+
+ if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
+ return -EINVAL;
+
+ rcv = rcu_dereference(priv->peer);
+ if (unlikely(!rcv))
+ return -ENXIO;
+
+ rcv_priv = netdev_priv(rcv);
+ /* xdp_ring is initialized on receive side? */
+ if (!rcu_access_pointer(rcv_priv->xdp_prog))
+ return -ENXIO;
+
+ spin_lock(&rcv_priv->xdp_tx_ring.producer_lock);
+ for (i = 0; i < n; i++) {
+ struct xdp_frame *frame = frames[i];
+
+ if (unlikely(xdp_ok_fwd_dev(rcv, frame->len) ||
+ __ptr_ring_produce(&rcv_priv->xdp_tx_ring, frame))) {
+ xdp_return_frame_rx_napi(frame);
+ drops++;
+ }
+ }
+ spin_unlock(&rcv_priv->xdp_tx_ring.producer_lock);
+
+ if (flags & XDP_XMIT_FLUSH)
+ __veth_xdp_flush(rcv_priv);
+
+ return n - drops;
+}
+
static struct sk_buff *veth_xdp_rcv_one(struct veth_priv *priv,
struct xdp_frame *frame)
{
@@ -770,6 +808,7 @@ static const struct net_device_ops veth_netdev_ops = {
.ndo_features_check = passthru_features_check,
.ndo_set_rx_headroom = veth_set_rx_headroom,
.ndo_bpf = veth_xdp,
+ .ndo_xdp_xmit = veth_xdp_xmit,
};
#define VETH_FEATURES (NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HW_CSUM | \
@@ -19,6 +19,7 @@
#include <linux/cryptohash.h>
#include <linux/set_memory.h>
#include <linux/kallsyms.h>
+#include <linux/if_vlan.h>
#include <net/sch_generic.h>
@@ -786,6 +787,21 @@ static inline bool bpf_dump_raw_ok(void)
struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
const struct bpf_insn *patch, u32 len);
+static __always_inline int
+xdp_ok_fwd_dev(const struct net_device *fwd, unsigned int pktlen)
+{
+ unsigned int len;
+
+ if (unlikely(!(fwd->flags & IFF_UP)))
+ return -ENETDOWN;
+
+ len = fwd->mtu + fwd->hard_header_len + VLAN_HLEN;
+ if (pktlen > len)
+ return -EMSGSIZE;
+
+ return 0;
+}
+
/* The pair of xdp_do_redirect and xdp_do_flush_map MUST be called in the
* same cpu context. Further for best results no more than a single map
* for the do_redirect/do_flush pair should be used. This limitation is
@@ -3216,16 +3216,7 @@ EXPORT_SYMBOL_GPL(xdp_do_redirect);
static int __xdp_generic_ok_fwd_dev(struct sk_buff *skb, struct net_device *fwd)
{
- unsigned int len;
-
- if (unlikely(!(fwd->flags & IFF_UP)))
- return -ENETDOWN;
-
- len = fwd->mtu + fwd->hard_header_len + VLAN_HLEN;
- if (skb->len > len)
- return -EMSGSIZE;
-
- return 0;
+ return xdp_ok_fwd_dev(fwd, skb->len);
}
static int xdp_do_generic_redirect_map(struct net_device *dev,