@@ -614,12 +614,13 @@ struct xdp_buff;
struct sk_buff;
struct bpf_dtab_netdev *__dev_map_lookup_elem(struct bpf_map *map, u32 key);
+struct bpf_dtab_netdev *__dev_map_idx_lookup_elem(struct bpf_map *map, u32 key);
struct bpf_map *__dev_map_get_default_map(struct net_device *dev);
int dev_map_ensure_default_map(struct net *net);
void dev_map_put_default_map(struct net *net);
int dev_map_inc_redirect_count(void);
void dev_map_dec_redirect_count(void);
-void __dev_map_insert_ctx(struct bpf_map *map, u32 index);
+void __dev_map_insert_ctx(struct bpf_map *map, struct bpf_dtab_netdev *dst);
void __dev_map_flush(struct bpf_map *map);
int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp,
struct net_device *dev_rx);
@@ -705,6 +706,12 @@ static inline struct net_device *__dev_map_lookup_elem(struct bpf_map *map,
return NULL;
}
+static inline struct net_device *__dev_map_idx_lookup_elem(struct bpf_map *map,
+ u32 key)
+{
+ return NULL;
+}
+
static inline struct bpf_map *__dev_map_get_default_map(struct net_device *dev)
{
return NULL;
@@ -727,7 +734,8 @@ static inline void dev_map_dec_redirect_count(void)
{
}
-static inline void __dev_map_insert_ctx(struct bpf_map *map, u32 index)
+static inline void __dev_map_insert_ctx(struct bpf_map *map,
+ struct bpf_dtab_netdev *dst)
{
}
@@ -59,6 +59,7 @@ BPF_MAP_TYPE(BPF_MAP_TYPE_ARRAY_OF_MAPS, array_of_maps_map_ops)
BPF_MAP_TYPE(BPF_MAP_TYPE_HASH_OF_MAPS, htab_of_maps_map_ops)
#ifdef CONFIG_NET
BPF_MAP_TYPE(BPF_MAP_TYPE_DEVMAP, dev_map_ops)
+BPF_MAP_TYPE(BPF_MAP_TYPE_DEVMAP_IDX, dev_map_idx_ops)
#if defined(CONFIG_BPF_STREAM_PARSER)
BPF_MAP_TYPE(BPF_MAP_TYPE_SOCKMAP, sock_map_ops)
BPF_MAP_TYPE(BPF_MAP_TYPE_SOCKHASH, sock_hash_ops)
@@ -147,7 +147,8 @@ struct _bpf_dtab_netdev {
#define devmap_ifindex(fwd, map) \
(!fwd ? 0 : \
- ((map->map_type == BPF_MAP_TYPE_DEVMAP) ? \
+ ((map->map_type == BPF_MAP_TYPE_DEVMAP || \
+ map->map_type == BPF_MAP_TYPE_DEVMAP_IDX) ? \
((struct _bpf_dtab_netdev *)fwd)->dev->ifindex : 0))
#define _trace_xdp_redirect_map(dev, xdp, fwd, map, idx) \
@@ -132,6 +132,7 @@ enum bpf_map_type {
BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE,
BPF_MAP_TYPE_QUEUE,
BPF_MAP_TYPE_STACK,
+ BPF_MAP_TYPE_DEVMAP_IDX,
};
/* Note that tracing related programs such as
@@ -46,6 +46,12 @@
* notifier hook walks the map we know that new dev references can not be
* added by the user because core infrastructure ensures dev_get_by_index()
* calls will fail at this point.
+ *
+ * The devmap_idx type is a map type which interprets keys as ifindexes and
+ * indexes these using a hashmap. This allows maps that use ifindex as key to be
+ * densely packed instead of having holes in the lookup array for unused
+ * ifindexes. The setup and packet enqueue/send code is shared between the two
+ * types of devmap; only the lookup and insertion is different.
*/
#include <linux/bpf.h>
#include <net/xdp.h>
@@ -67,6 +73,8 @@ struct xdp_bulk_queue {
struct bpf_dtab_netdev {
struct net_device *dev; /* must be first member, due to tracepoint */
+ unsigned int ifindex;
+ struct hlist_node index_hlist;
struct bpf_dtab *dtab;
unsigned int bit;
struct xdp_bulk_queue __percpu *bulkq;
@@ -79,12 +87,30 @@ struct bpf_dtab {
unsigned long __percpu *flush_needed;
struct list_head list;
struct rcu_head rcu;
+
+ /* these are only used for DEVMAP_IDX type maps */
+ unsigned long *bits_used;
+ struct hlist_head *dev_index_head;
+ spinlock_t index_lock;
};
static DEFINE_SPINLOCK(dev_map_lock);
static LIST_HEAD(dev_map_list);
static atomic_t global_redirect_use = {};
+static struct hlist_head *dev_map_create_hash(void)
+{
+ int i;
+ struct hlist_head *hash;
+
+ hash = kmalloc_array(NETDEV_HASHENTRIES, sizeof(*hash), GFP_KERNEL);
+ if (hash != NULL)
+ for (i = 0; i < NETDEV_HASHENTRIES; i++)
+ INIT_HLIST_HEAD(&hash[i]);
+
+ return hash;
+}
+
static u64 dev_map_bitmap_size(const union bpf_attr *attr)
{
return BITS_TO_LONGS((u64) attr->max_entries) * sizeof(unsigned long);
@@ -101,6 +127,11 @@ static int dev_map_init_map(struct bpf_dtab *dtab, union bpf_attr *attr,
/* make sure page count doesn't overflow */
cost = (u64) dtab->map.max_entries * sizeof(struct bpf_dtab_netdev *);
cost += dev_map_bitmap_size(attr) * num_possible_cpus();
+
+ if (attr->map_type == BPF_MAP_TYPE_DEVMAP_IDX)
+ cost += dev_map_bitmap_size(attr) +
+ sizeof(struct hlist_head) * NETDEV_HASHENTRIES;
+
if (cost >= U32_MAX - PAGE_SIZE)
return -EINVAL;
@@ -126,8 +157,25 @@ static int dev_map_init_map(struct bpf_dtab *dtab, union bpf_attr *attr,
if (!dtab->netdev_map)
goto err_map;
+ if (attr->map_type == BPF_MAP_TYPE_DEVMAP_IDX) {
+ dtab->bits_used = kzalloc(dev_map_bitmap_size(attr),
+ GFP_KERNEL);
+ if (!dtab->bits_used)
+ goto err_bitmap;
+
+ dtab->dev_index_head = dev_map_create_hash();
+ if (!dtab->dev_index_head)
+ goto err_idx;
+
+ spin_lock_init(&dtab->index_lock);
+ }
+
return 0;
+ err_idx:
+ kfree(dtab->bits_used);
+ err_bitmap:
+ bpf_map_area_free(dtab->netdev_map);
err_map:
free_percpu(dtab->flush_needed);
err_alloc:
@@ -192,6 +240,8 @@ static void __dev_map_free(struct rcu_head *rcu)
kfree(dev);
}
+ kfree(dtab->dev_index_head);
+ kfree(dtab->bits_used);
free_percpu(dtab->flush_needed);
bpf_map_area_free(dtab->netdev_map);
kfree(dtab);
@@ -234,12 +284,76 @@ static int dev_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
return 0;
}
-void __dev_map_insert_ctx(struct bpf_map *map, u32 bit)
+static inline struct hlist_head *dev_map_index_hash(struct bpf_dtab *dtab,
+ int ifindex)
+{
+ return &dtab->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
+}
+
+struct bpf_dtab_netdev *__dev_map_idx_lookup_elem(struct bpf_map *map, u32 key)
+{
+ struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
+ struct hlist_head *head = dev_map_index_hash(dtab, key);
+ struct bpf_dtab_netdev *dev;
+
+ hlist_for_each_entry_rcu(dev, head, index_hlist)
+ if (dev->ifindex == key)
+ return dev;
+
+ return NULL;
+}
+
+static int dev_map_idx_get_next_key(struct bpf_map *map, void *key,
+ void *next_key)
+{
+ struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
+ u32 ifindex, *next = next_key;
+ struct bpf_dtab_netdev *dev, *next_dev;
+ struct hlist_head *head;
+ int i = 0;
+
+ if (!key)
+ goto find_first;
+
+ ifindex = *(u32 *)key;
+
+ dev = __dev_map_idx_lookup_elem(map, ifindex);
+ if (!dev)
+ goto find_first;
+
+ next_dev = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(&dev->index_hlist)),
+ struct bpf_dtab_netdev, index_hlist);
+
+ if (next_dev) {
+ *next = next_dev->ifindex;
+ return 0;
+ }
+
+ i = ifindex & (NETDEV_HASHENTRIES - 1);
+ i++;
+
+ find_first:
+ for (; i < NETDEV_HASHENTRIES; i++) {
+ head = dev_map_index_hash(dtab, i);
+
+ next_dev = hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(head)),
+ struct bpf_dtab_netdev,
+ index_hlist);
+ if (next_dev) {
+ *next = next_dev->ifindex;
+ return 0;
+ }
+ }
+
+ return -ENOENT;
+}
+
+void __dev_map_insert_ctx(struct bpf_map *map, struct bpf_dtab_netdev *dst)
{
struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
unsigned long *bitmap = this_cpu_ptr(dtab->flush_needed);
- __set_bit(bit, bitmap);
+ __set_bit(dst->bit, bitmap);
}
static int bq_xmit_all(struct bpf_dtab_netdev *obj,
@@ -409,9 +523,16 @@ int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb,
static void *dev_map_lookup_elem(struct bpf_map *map, void *key)
{
struct bpf_dtab_netdev *obj = __dev_map_lookup_elem(map, *(u32 *)key);
- struct net_device *dev = obj ? obj->dev : NULL;
- return dev ? &dev->ifindex : NULL;
+ return obj ? &obj->ifindex : NULL;
+}
+
+static void *dev_map_idx_lookup_elem(struct bpf_map *map, void *key)
+{
+ struct bpf_dtab_netdev *obj = __dev_map_idx_lookup_elem(map,
+ *(u32 *)key);
+
+ return obj ? &obj->ifindex : NULL;
}
static void dev_map_flush_old(struct bpf_dtab_netdev *dev)
@@ -466,6 +587,43 @@ static int dev_map_delete_elem(struct bpf_map *map, void *key)
return 0;
}
+static int dev_map_idx_delete_elem(struct bpf_map *map, void *key)
+{
+ struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
+ struct bpf_dtab_netdev *old_dev;
+ int k = *(u32 *)key;
+
+ old_dev = __dev_map_idx_lookup_elem(map, k);
+ if (!old_dev)
+ return 0;
+
+ spin_lock(&dtab->index_lock);
+ hlist_del_rcu(&old_dev->index_hlist);
+ spin_unlock(&dtab->index_lock);
+
+ xchg(&dtab->netdev_map[old_dev->bit], NULL);
+ clear_bit_unlock(old_dev->bit, dtab->bits_used);
+ call_rcu(&old_dev->rcu, __dev_map_entry_free);
+ return 0;
+}
+
+static bool __dev_map_find_bit(struct bpf_dtab *dtab, unsigned int *bit)
+{
+ unsigned int b = 0;
+
+ retry:
+ b = find_next_zero_bit(dtab->bits_used, dtab->map.max_entries, b);
+
+ if (b >= dtab->map.max_entries)
+ return false;
+
+ if (test_and_set_bit_lock(b, dtab->bits_used))
+ goto retry;
+
+ *bit = b;
+ return true;
+}
+
static struct bpf_dtab_netdev *__dev_map_alloc_node(struct net *net,
struct bpf_dtab *dtab,
u32 ifindex,
@@ -492,6 +650,7 @@ static struct bpf_dtab_netdev *__dev_map_alloc_node(struct net *net,
return ERR_PTR(-EINVAL);
}
+ dev->ifindex = dev->dev->ifindex;
dev->bit = bit;
dev->dtab = dtab;
@@ -539,6 +698,49 @@ static int dev_map_update_elem(struct bpf_map *map, void *key, void *value,
map, key, value, map_flags);
}
+static int __dev_map_idx_update_elem(struct net *net, struct bpf_map *map,
+ void *key, void *value, u64 map_flags)
+{
+ struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
+ struct bpf_dtab_netdev *dev, *old_dev;
+ u32 idx = *(u32 *)key;
+ u32 val = *(u32 *)value;
+ u32 bit;
+
+ if (idx != val)
+ return -EINVAL;
+ if (unlikely(map_flags > BPF_EXIST))
+ return -EINVAL;
+
+ old_dev = __dev_map_idx_lookup_elem(map, idx);
+ if (old_dev) {
+ if (map_flags & BPF_NOEXIST)
+ return -EEXIST;
+ else
+ return 0;
+ }
+
+ if (!__dev_map_find_bit(dtab, &bit))
+ return -ENOSPC;
+ dev = __dev_map_alloc_node(net, dtab, idx, bit);
+ if (IS_ERR(dev))
+ return PTR_ERR(dev);
+
+ xchg(&dtab->netdev_map[bit], dev);
+ spin_lock(&dtab->index_lock);
+ hlist_add_head_rcu(&dev->index_hlist,
+ dev_map_index_hash(dtab, dev->ifindex));
+ spin_unlock(&dtab->index_lock);
+ return 0;
+}
+
+static int dev_map_idx_update_elem(struct bpf_map *map, void *key, void *value,
+ u64 map_flags)
+{
+ return __dev_map_idx_update_elem(current->nsproxy->net_ns,
+ map, key, value, map_flags);
+}
+
const struct bpf_map_ops dev_map_ops = {
.map_alloc = dev_map_alloc,
.map_free = dev_map_free,
@@ -549,6 +751,16 @@ const struct bpf_map_ops dev_map_ops = {
.map_check_btf = map_check_no_btf,
};
+const struct bpf_map_ops dev_map_idx_ops = {
+ .map_alloc = dev_map_alloc,
+ .map_free = dev_map_free,
+ .map_get_next_key = dev_map_idx_get_next_key,
+ .map_lookup_elem = dev_map_idx_lookup_elem,
+ .map_update_elem = dev_map_idx_update_elem,
+ .map_delete_elem = dev_map_idx_delete_elem,
+ .map_check_btf = map_check_no_btf,
+};
+
static inline struct net *bpf_default_map_to_net(struct bpf_dtab_container *cont)
{
struct netns_xdp *xdp = container_of(cont, struct netns_xdp, default_map);
@@ -583,8 +795,8 @@ void dev_map_put_default_map(struct net *net)
static int __init_default_map(struct bpf_dtab_container *cont)
{
struct net *net = bpf_default_map_to_net(cont);
+ int size = DEV_MAP_DEFAULT_SIZE, i = 0;
struct bpf_dtab *dtab, *old_dtab;
- int size = DEV_MAP_DEFAULT_SIZE;
struct net_device *netdev;
union bpf_attr attr = {};
u32 idx;
@@ -596,7 +808,7 @@ static int __init_default_map(struct bpf_dtab_container *cont)
return 0;
for_each_netdev(net, netdev)
- if (netdev->ifindex >= size)
+ if (++i >= size)
size <<= 1;
old_dtab = rcu_dereference(cont->dtab);
@@ -607,7 +819,7 @@ static int __init_default_map(struct bpf_dtab_container *cont)
if (!dtab)
return -ENOMEM;
- attr.map_type = BPF_MAP_TYPE_DEVMAP;
+ attr.map_type = BPF_MAP_TYPE_DEVMAP_IDX;
attr.max_entries = size;
attr.value_size = 4;
attr.key_size = 4;
@@ -620,7 +832,7 @@ static int __init_default_map(struct bpf_dtab_container *cont)
for_each_netdev(net, netdev) {
idx = netdev->ifindex;
- err = __dev_map_update_elem(net, &dtab->map, &idx, &idx, 0);
+ err = __dev_map_idx_update_elem(net, &dtab->map, &idx, &idx, 0);
if (err) {
__dev_map_free(&dtab->rcu);
return err;
@@ -741,8 +953,8 @@ static int dev_map_notification(struct notifier_block *notifier,
rcu_read_lock();
dtab = rcu_dereference(net->xdp.default_map.dtab);
if (dtab) {
- err = __dev_map_update_elem(net, &dtab->map,
- &idx, &idx, 0);
+ err = __dev_map_idx_update_elem(net, &dtab->map,
+ &idx, &idx, 0);
if (err == -E2BIG) {
spin_lock(&dev_map_lock);
err = __init_default_map(&net->xdp.default_map);
@@ -2576,6 +2576,7 @@ static int check_map_func_compatibility(struct bpf_verifier_env *env,
* for now.
*/
case BPF_MAP_TYPE_DEVMAP:
+ case BPF_MAP_TYPE_DEVMAP_IDX:
if (func_id != BPF_FUNC_redirect_map)
goto error;
break;
@@ -2648,6 +2649,7 @@ static int check_map_func_compatibility(struct bpf_verifier_env *env,
break;
case BPF_FUNC_redirect_map:
if (map->map_type != BPF_MAP_TYPE_DEVMAP &&
+ map->map_type != BPF_MAP_TYPE_DEVMAP_IDX &&
map->map_type != BPF_MAP_TYPE_CPUMAP &&
map->map_type != BPF_MAP_TYPE_XSKMAP)
goto error;
@@ -3345,13 +3345,14 @@ static int __bpf_tx_xdp_map(struct net_device *dev_rx, void *fwd,
int err;
switch (map->map_type) {
- case BPF_MAP_TYPE_DEVMAP: {
+ case BPF_MAP_TYPE_DEVMAP:
+ case BPF_MAP_TYPE_DEVMAP_IDX: {
struct bpf_dtab_netdev *dst = fwd;
err = dev_map_enqueue(dst, xdp, dev_rx);
if (unlikely(err))
return err;
- __dev_map_insert_ctx(map, index);
+ __dev_map_insert_ctx(map, dst);
break;
}
case BPF_MAP_TYPE_CPUMAP: {
@@ -3384,6 +3385,7 @@ void xdp_do_flush_map(void)
if (map) {
switch (map->map_type) {
case BPF_MAP_TYPE_DEVMAP:
+ case BPF_MAP_TYPE_DEVMAP_IDX:
__dev_map_flush(map);
break;
case BPF_MAP_TYPE_CPUMAP:
@@ -3404,6 +3406,8 @@ static inline void *__xdp_map_lookup_elem(struct bpf_map *map, u32 index)
switch (map->map_type) {
case BPF_MAP_TYPE_DEVMAP:
return __dev_map_lookup_elem(map, index);
+ case BPF_MAP_TYPE_DEVMAP_IDX:
+ return __dev_map_idx_lookup_elem(map, index);
case BPF_MAP_TYPE_CPUMAP:
return __cpu_map_lookup_elem(map, index);
case BPF_MAP_TYPE_XSKMAP:
@@ -3494,7 +3498,8 @@ static int xdp_do_generic_redirect_map(struct net_device *dev,
goto err;
}
- if (map->map_type == BPF_MAP_TYPE_DEVMAP) {
+ if (map->map_type == BPF_MAP_TYPE_DEVMAP ||
+ map->map_type == BPF_MAP_TYPE_DEVMAP_IDX) {
struct bpf_dtab_netdev *dst = fwd;
err = dev_map_generic_redirect(dst, skb, xdp_prog);
@@ -37,6 +37,7 @@ const char * const map_type_name[] = {
[BPF_MAP_TYPE_ARRAY_OF_MAPS] = "array_of_maps",
[BPF_MAP_TYPE_HASH_OF_MAPS] = "hash_of_maps",
[BPF_MAP_TYPE_DEVMAP] = "devmap",
+ [BPF_MAP_TYPE_DEVMAP_IDX] = "devmap_idx",
[BPF_MAP_TYPE_SOCKMAP] = "sockmap",
[BPF_MAP_TYPE_CPUMAP] = "cpumap",
[BPF_MAP_TYPE_XSKMAP] = "xskmap",
@@ -132,6 +132,7 @@ enum bpf_map_type {
BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE,
BPF_MAP_TYPE_QUEUE,
BPF_MAP_TYPE_STACK,
+ BPF_MAP_TYPE_DEVMAP_IDX,
};
/* Note that tracing related programs such as
@@ -172,6 +172,7 @@ bool bpf_probe_map_type(enum bpf_map_type map_type, __u32 ifindex)
case BPF_MAP_TYPE_ARRAY_OF_MAPS:
case BPF_MAP_TYPE_HASH_OF_MAPS:
case BPF_MAP_TYPE_DEVMAP:
+ case BPF_MAP_TYPE_DEVMAP_IDX:
case BPF_MAP_TYPE_SOCKMAP:
case BPF_MAP_TYPE_CPUMAP:
case BPF_MAP_TYPE_XSKMAP:
@@ -519,6 +519,21 @@ static void test_devmap(unsigned int task, void *data)
close(fd);
}
+static void test_devmap_idx(unsigned int task, void *data)
+{
+ int fd;
+ __u32 key, value;
+
+ fd = bpf_create_map(BPF_MAP_TYPE_DEVMAP_IDX, sizeof(key), sizeof(value),
+ 2, 0);
+ if (fd < 0) {
+ printf("Failed to create devmap_idx '%s'!\n", strerror(errno));
+ exit(1);
+ }
+
+ close(fd);
+}
+
static void test_queuemap(unsigned int task, void *data)
{
const int MAP_SIZE = 32;
@@ -1686,6 +1701,7 @@ static void run_all_tests(void)
test_arraymap_percpu_many_keys();
test_devmap(0, NULL);
+ test_devmap_idx(0, NULL);
test_sockmap(0, NULL);
test_map_large();
A common pattern when using xdp_redirect_map() is to create a device map where the lookup key is simply ifindex. Because device maps are arrays, this leaves holes in the map, and the map has to be sized to fit the largest ifindex, regardless of how many devices actually are actually needed in the map. This patch adds a second type of device map where the key is interpreted as an ifindex and looked up using a hashmap, instead of being used as an array index. This leads to maps being densely packed, so they can be smaller. The default maps used by xdp_redirect() are changed to use the new map type, which means that xdp_redirect() is no longer limited to ifindex < 64, but instead to 64 total simultaneous interfaces per network namespace. This also provides an easy way to compare the performance of devmap and devmap_idx: xdp_redirect_map (devmap): 8394560 pkt/s xdp_redirect (devmap_idx): 8179480 pkt/s Difference: 215080 pkt/s or 3.1 nanoseconds per packet. Signed-off-by: Toke Høiland-Jørgensen <toke@redhat.com> --- include/linux/bpf.h | 12 +- include/linux/bpf_types.h | 1 include/trace/events/xdp.h | 3 include/uapi/linux/bpf.h | 1 kernel/bpf/devmap.c | 232 ++++++++++++++++++++++++++++++- kernel/bpf/verifier.c | 2 net/core/filter.c | 11 + tools/bpf/bpftool/map.c | 1 tools/include/uapi/linux/bpf.h | 1 tools/lib/bpf/libbpf_probes.c | 1 tools/testing/selftests/bpf/test_maps.c | 16 ++ 11 files changed, 265 insertions(+), 16 deletions(-)