@@ -81,6 +81,7 @@ u64 *xsk_umem_peek_addr(struct xdp_umem *umem, u64 *addr);
void xsk_umem_discard_addr(struct xdp_umem *umem);
void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries);
bool xsk_umem_consume_tx(struct xdp_umem *umem, dma_addr_t *dma, u32 *len);
+bool xsk_umem_consume_tx_virtual(struct xdp_umem *umem, void **addr, u32 *len);
void xsk_umem_consume_tx_done(struct xdp_umem *umem);
struct xdp_umem_fq_reuse *xsk_reuseq_prepare(u32 nentries);
struct xdp_umem_fq_reuse *xsk_reuseq_swap(struct xdp_umem *umem,
@@ -165,6 +166,12 @@ static inline bool xsk_umem_consume_tx(struct xdp_umem *umem, dma_addr_t *dma,
return false;
}
+static inline bool xsk_umem_consume_tx_virtual(struct xdp_umem *umem,
+ void **vaddr, u32 *len)
+{
+ return false;
+}
+
static inline void xsk_umem_consume_tx_done(struct xdp_umem *umem)
{
}
@@ -60,6 +60,7 @@ struct xdp_umem *xdp_get_umem_from_qid(struct net_device *dev,
return NULL;
}
+EXPORT_SYMBOL(xdp_get_umem_from_qid);
static void xdp_clear_umem_at_qid(struct net_device *dev, u16 queue_id)
{
@@ -170,22 +170,19 @@ void xsk_umem_consume_tx_done(struct xdp_umem *umem)
}
EXPORT_SYMBOL(xsk_umem_consume_tx_done);
-bool xsk_umem_consume_tx(struct xdp_umem *umem, dma_addr_t *dma, u32 *len)
+static __always_inline bool __xsk_umem_consume_tx(struct xdp_umem *umem,
+ struct xdp_desc *desc)
{
- struct xdp_desc desc;
struct xdp_sock *xs;
rcu_read_lock();
list_for_each_entry_rcu(xs, &umem->xsk_list, list) {
- if (!xskq_peek_desc(xs->tx, &desc))
+ if (!xskq_peek_desc(xs->tx, desc))
continue;
- if (xskq_produce_addr_lazy(umem->cq, desc.addr))
+ if (xskq_produce_addr_lazy(umem->cq, desc->addr))
goto out;
- *dma = xdp_umem_get_dma(umem, desc.addr);
- *len = desc.len;
-
xskq_discard_desc(xs->tx);
rcu_read_unlock();
return true;
@@ -195,8 +192,35 @@ bool xsk_umem_consume_tx(struct xdp_umem *umem, dma_addr_t *dma, u32 *len)
rcu_read_unlock();
return false;
}
+
+bool xsk_umem_consume_tx(struct xdp_umem *umem, dma_addr_t *dma, u32 *len)
+{
+ struct xdp_desc desc;
+
+ if (!__xsk_umem_consume_tx(umem, &desc))
+ return false;
+
+ *dma = xdp_umem_get_dma(umem, desc.addr);
+ *len = desc.len;
+
+ return true;
+}
EXPORT_SYMBOL(xsk_umem_consume_tx);
+bool xsk_umem_consume_tx_virtual(struct xdp_umem *umem, void **vaddr, u32 *len)
+{
+ struct xdp_desc desc;
+
+ if (!__xsk_umem_consume_tx(umem, &desc))
+ return false;
+
+ *vaddr = xdp_umem_get_data(umem, desc.addr);
+ *len = desc.len;
+
+ return true;
+}
+EXPORT_SYMBOL(xsk_umem_consume_tx_virtual);
+
static int xsk_zc_xmit(struct sock *sk)
{
struct xdp_sock *xs = xdp_sk(sk);
Currently the xsk_umem_consume_tx expects only the physical NICs so the api returns a dma address. This patch introduce the new function to return the virtual address, when XSK is used by a virtual device. Signed-off-by: William Tu <u9012063@gmail.com> --- include/net/xdp_sock.h | 7 +++++++ net/xdp/xdp_umem.c | 1 + net/xdp/xsk.c | 38 +++++++++++++++++++++++++++++++------- 3 files changed, 39 insertions(+), 7 deletions(-)