forked from luck/tmp_suning_uos_patched
xsk: Store struct xdp_sock as a flexible array member of the XSKMAP
Prior this commit, the array storing XDP socket instances were stored in a separate allocated array of the XSKMAP. Now, we store the sockets as a flexible array member in a similar fashion as the arraymap. Doing so, we do less pointer chasing in the lookup. Signed-off-by: Björn Töpel <bjorn.topel@intel.com> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Acked-by: Jonathan Lemon <jonathan.lemon@gmail.com> Link: https://lore.kernel.org/bpf/20191101110346.15004-2-bjorn.topel@gmail.com
This commit is contained in:
parent
75b0bfd2e1
commit
64fe8c061d
|
@ -11,9 +11,9 @@
|
|||
|
||||
struct xsk_map {
|
||||
struct bpf_map map;
|
||||
struct xdp_sock **xsk_map;
|
||||
struct list_head __percpu *flush_list;
|
||||
spinlock_t lock; /* Synchronize map updates */
|
||||
struct xdp_sock *xsk_map[];
|
||||
};
|
||||
|
||||
int xsk_map_inc(struct xsk_map *map)
|
||||
|
@ -80,9 +80,10 @@ static void xsk_map_sock_delete(struct xdp_sock *xs,
|
|||
|
||||
static struct bpf_map *xsk_map_alloc(union bpf_attr *attr)
|
||||
{
|
||||
struct bpf_map_memory mem;
|
||||
int cpu, err, numa_node;
|
||||
struct xsk_map *m;
|
||||
int cpu, err;
|
||||
u64 cost;
|
||||
u64 cost, size;
|
||||
|
||||
if (!capable(CAP_NET_ADMIN))
|
||||
return ERR_PTR(-EPERM);
|
||||
|
@ -92,44 +93,35 @@ static struct bpf_map *xsk_map_alloc(union bpf_attr *attr)
|
|||
attr->map_flags & ~(BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY))
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
m = kzalloc(sizeof(*m), GFP_USER);
|
||||
if (!m)
|
||||
numa_node = bpf_map_attr_numa_node(attr);
|
||||
size = struct_size(m, xsk_map, attr->max_entries);
|
||||
cost = size + array_size(sizeof(*m->flush_list), num_possible_cpus());
|
||||
|
||||
err = bpf_map_charge_init(&mem, cost);
|
||||
if (err < 0)
|
||||
return ERR_PTR(err);
|
||||
|
||||
m = bpf_map_area_alloc(size, numa_node);
|
||||
if (!m) {
|
||||
bpf_map_charge_finish(&mem);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
bpf_map_init_from_attr(&m->map, attr);
|
||||
bpf_map_charge_move(&m->map.memory, &mem);
|
||||
spin_lock_init(&m->lock);
|
||||
|
||||
cost = (u64)m->map.max_entries * sizeof(struct xdp_sock *);
|
||||
cost += sizeof(struct list_head) * num_possible_cpus();
|
||||
|
||||
/* Notice returns -EPERM on if map size is larger than memlock limit */
|
||||
err = bpf_map_charge_init(&m->map.memory, cost);
|
||||
if (err)
|
||||
goto free_m;
|
||||
|
||||
err = -ENOMEM;
|
||||
|
||||
m->flush_list = alloc_percpu(struct list_head);
|
||||
if (!m->flush_list)
|
||||
goto free_charge;
|
||||
if (!m->flush_list) {
|
||||
bpf_map_charge_finish(&m->map.memory);
|
||||
bpf_map_area_free(m);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
for_each_possible_cpu(cpu)
|
||||
INIT_LIST_HEAD(per_cpu_ptr(m->flush_list, cpu));
|
||||
|
||||
m->xsk_map = bpf_map_area_alloc(m->map.max_entries *
|
||||
sizeof(struct xdp_sock *),
|
||||
m->map.numa_node);
|
||||
if (!m->xsk_map)
|
||||
goto free_percpu;
|
||||
return &m->map;
|
||||
|
||||
free_percpu:
|
||||
free_percpu(m->flush_list);
|
||||
free_charge:
|
||||
bpf_map_charge_finish(&m->map.memory);
|
||||
free_m:
|
||||
kfree(m);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
static void xsk_map_free(struct bpf_map *map)
|
||||
|
@ -139,8 +131,7 @@ static void xsk_map_free(struct bpf_map *map)
|
|||
bpf_clear_redirect_map(map);
|
||||
synchronize_net();
|
||||
free_percpu(m->flush_list);
|
||||
bpf_map_area_free(m->xsk_map);
|
||||
kfree(m);
|
||||
bpf_map_area_free(m);
|
||||
}
|
||||
|
||||
static int xsk_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
|
||||
|
|
Loading…
Reference in New Issue
Block a user