@@ -766,11 +766,28 @@ static int veth_xsk_poll(struct napi_struct *napi, int budget)
struct sk_buff *skb;
struct page *page;
void *vaddr;
+ u64 handle;
u32 len;
if (!xsk_umem_consume_tx_virtual(peer_rq->xsk_umem, &vaddr, &len))
break;
+ if (rq->xsk_umem && xsk_umem_peek_addr(rq->xsk_umem, &handle)) {
+ char *daddr;
+ u64 hr = 0;
+
+ /* the peer side also has umem enabled,
+ * copy directly to it.
+ */
+ handle &= rq->xsk_umem->chunk_mask;
+ hr = rq->xsk_umem->headroom + XDP_PACKET_HEADROOM;
+ daddr = xdp_umem_get_data(rq->xsk_umem, handle);
+ daddr += hr;
+ memcpy((void *)daddr, vaddr, len);
+ xsk_umem_discard_addr(rq->xsk_umem);
+ vaddr = daddr;
+ }
+
xdpf.data = vaddr + metasize;
xdpf.len = len;
xdpf.headroom = 0;
If the receiving veth side has umem rx enabled, the patch directly copy the packet from the peer side's send buffer to the umem receive buffer. This requires running AF_XDP as both side of the veth peer. For example: Receiver: # ip netns exec at_ns0 xdpsock -r -N -z -i p0 Sender: # xdpsock -i p1 -t -N -z The performance increases from 1.4Mpps to 2.3Mpps. Signed-off-by: William Tu <u9012063@gmail.com> --- drivers/net/veth.c | 17 +++++++++++++++++ 1 file changed, 17 insertions(+)