@@ -3717,6 +3717,8 @@ static inline void dev_consume_skb_any(struct sk_buff *skb)
void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog);
int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff *skb);
+unsigned int do_xdp_egress(struct net_device *dev, struct xdp_frame **frames,
+ unsigned int count);
int netif_rx(struct sk_buff *skb);
int netif_rx_ni(struct sk_buff *skb);
int netif_receive_skb(struct sk_buff *skb);
@@ -321,24 +321,33 @@ static int bq_xmit_all(struct xdp_dev_bulk_queue *bq, u32 flags)
{
struct net_device *dev = bq->dev;
int sent = 0, drops = 0, err = 0;
+ unsigned int count = bq->count;
int i;
- if (unlikely(!bq->count))
+ if (unlikely(!count))
return 0;
- for (i = 0; i < bq->count; i++) {
+ for (i = 0; i < count; i++) {
struct xdp_frame *xdpf = bq->q[i];
prefetch(xdpf);
}
- sent = dev->netdev_ops->ndo_xdp_xmit(dev, bq->count, bq->q, flags);
+ if (static_branch_unlikely(&xdp_egress_needed_key)) {
+ count = do_xdp_egress(dev, bq->q, count);
+ drops += bq->count - count;
+ /* all frames consumed by the xdp program? */
+ if (!count)
+ goto out;
+ }
+
+ sent = dev->netdev_ops->ndo_xdp_xmit(dev, count, bq->q, flags);
if (sent < 0) {
err = sent;
sent = 0;
goto error;
}
- drops = bq->count - sent;
+ drops += count - sent;
out:
bq->count = 0;
@@ -350,7 +359,7 @@ static int bq_xmit_all(struct xdp_dev_bulk_queue *bq, u32 flags)
/* If ndo_xdp_xmit fails with an errno, no frames have been
* xmit'ed and it's our responsibility to them free all.
*/
- for (i = 0; i < bq->count; i++) {
+ for (i = 0; i < count; i++) {
struct xdp_frame *xdpf = bq->q[i];
xdp_return_frame_rx_napi(xdpf);
@@ -4673,6 +4673,80 @@ int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff *skb)
}
EXPORT_SYMBOL_GPL(do_xdp_generic);
+static u32 __xdp_egress_frame(struct net_device *dev,
+ struct bpf_prog *xdp_prog,
+ struct xdp_frame *xdp_frame,
+ struct xdp_txq_info *txq)
+{
+ struct xdp_buff xdp;
+ u32 act;
+
+ xdp.data_hard_start = xdp_frame->data - xdp_frame->headroom
+ - sizeof(*xdp_frame);
+ xdp.data = xdp_frame->data;
+ xdp.data_end = xdp.data + xdp_frame->len;
+ xdp.data_meta = xdp.data - xdp_frame->metasize;
+ xdp.txq = txq;
+
+ act = bpf_prog_run_xdp(xdp_prog, &xdp);
+ switch (act) {
+ case XDP_DROP:
+ fallthrough;
+ case XDP_PASS:
+ break;
+ case XDP_TX:
+ fallthrough;
+ case XDP_REDIRECT:
+ fallthrough;
+ default:
+ bpf_warn_invalid_xdp_action(act);
+ fallthrough;
+ case XDP_ABORTED:
+ trace_xdp_exception(dev, xdp_prog, act);
+ act = XDP_DROP;
+ break;
+ }
+
+ /* if not dropping frame, readjust pointers in case
+ * program made changes to the buffer
+ */
+ if (act != XDP_DROP) {
+ if (unlikely(!update_xdp_frame(&xdp, xdp_frame)))
+ return XDP_DROP;
+ }
+
+ return act;
+}
+
+unsigned int do_xdp_egress(struct net_device *dev, struct xdp_frame **frames,
+ unsigned int count)
+{
+ struct bpf_prog *xdp_prog;
+
+ xdp_prog = rcu_dereference(dev->xdp_egress_prog);
+ if (xdp_prog) {
+ struct xdp_txq_info txq = { .dev = dev };
+ unsigned int i, j;
+ u32 act;
+
+ for (i = 0, j = 0; i < count; i++) {
+ struct xdp_frame *frame = frames[i];
+
+ act = __xdp_egress_frame(dev, xdp_prog, frame, &txq);
+ if (act == XDP_DROP) {
+ xdp_return_frame_rx_napi(frame);
+ continue;
+ }
+
+ frames[j] = frame;
+ j++;
+ }
+ count = j;
+ }
+
+ return count;
+}
+
static int netif_rx_internal(struct sk_buff *skb)
{
int ret;