forked from luck/tmp_suning_uos_patched
ebpf: misc core cleanup
Besides others, move bpf_tail_call_proto to the remaining definitions of other protos, improve comments a bit (i.e. remove some obvious ones, where the code is already self-documenting, add objectives for others), simplify bpf_prog_array_compatible() a bit. Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Acked-by: Alexei Starovoitov <ast@plumgrid.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
17ca8cbf49
commit
3324b584b6
|
@ -26,9 +26,10 @@
|
||||||
#include <linux/vmalloc.h>
|
#include <linux/vmalloc.h>
|
||||||
#include <linux/random.h>
|
#include <linux/random.h>
|
||||||
#include <linux/moduleloader.h>
|
#include <linux/moduleloader.h>
|
||||||
#include <asm/unaligned.h>
|
|
||||||
#include <linux/bpf.h>
|
#include <linux/bpf.h>
|
||||||
|
|
||||||
|
#include <asm/unaligned.h>
|
||||||
|
|
||||||
/* Registers */
|
/* Registers */
|
||||||
#define BPF_R0 regs[BPF_REG_0]
|
#define BPF_R0 regs[BPF_REG_0]
|
||||||
#define BPF_R1 regs[BPF_REG_1]
|
#define BPF_R1 regs[BPF_REG_1]
|
||||||
|
@ -62,6 +63,7 @@ void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, uns
|
||||||
ptr = skb_network_header(skb) + k - SKF_NET_OFF;
|
ptr = skb_network_header(skb) + k - SKF_NET_OFF;
|
||||||
else if (k >= SKF_LL_OFF)
|
else if (k >= SKF_LL_OFF)
|
||||||
ptr = skb_mac_header(skb) + k - SKF_LL_OFF;
|
ptr = skb_mac_header(skb) + k - SKF_LL_OFF;
|
||||||
|
|
||||||
if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb))
|
if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb))
|
||||||
return ptr;
|
return ptr;
|
||||||
|
|
||||||
|
@ -176,15 +178,6 @@ noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
const struct bpf_func_proto bpf_tail_call_proto = {
|
|
||||||
.func = NULL,
|
|
||||||
.gpl_only = false,
|
|
||||||
.ret_type = RET_VOID,
|
|
||||||
.arg1_type = ARG_PTR_TO_CTX,
|
|
||||||
.arg2_type = ARG_CONST_MAP_PTR,
|
|
||||||
.arg3_type = ARG_ANYTHING,
|
|
||||||
};
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* __bpf_prog_run - run eBPF program on a given context
|
* __bpf_prog_run - run eBPF program on a given context
|
||||||
* @ctx: is the data we are operating on
|
* @ctx: is the data we are operating on
|
||||||
|
@ -650,36 +643,35 @@ static unsigned int __bpf_prog_run(void *ctx, const struct bpf_insn *insn)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void __weak bpf_int_jit_compile(struct bpf_prog *prog)
|
bool bpf_prog_array_compatible(struct bpf_array *array,
|
||||||
|
const struct bpf_prog *fp)
|
||||||
{
|
{
|
||||||
}
|
if (!array->owner_prog_type) {
|
||||||
|
/* There's no owner yet where we could check for
|
||||||
bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp)
|
* compatibility.
|
||||||
{
|
*/
|
||||||
if (array->owner_prog_type) {
|
|
||||||
if (array->owner_prog_type != fp->type)
|
|
||||||
return false;
|
|
||||||
if (array->owner_jited != fp->jited)
|
|
||||||
return false;
|
|
||||||
} else {
|
|
||||||
array->owner_prog_type = fp->type;
|
array->owner_prog_type = fp->type;
|
||||||
array->owner_jited = fp->jited;
|
array->owner_jited = fp->jited;
|
||||||
|
|
||||||
|
return true;
|
||||||
}
|
}
|
||||||
return true;
|
|
||||||
|
return array->owner_prog_type == fp->type &&
|
||||||
|
array->owner_jited == fp->jited;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int check_tail_call(const struct bpf_prog *fp)
|
static int bpf_check_tail_call(const struct bpf_prog *fp)
|
||||||
{
|
{
|
||||||
struct bpf_prog_aux *aux = fp->aux;
|
struct bpf_prog_aux *aux = fp->aux;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (i = 0; i < aux->used_map_cnt; i++) {
|
for (i = 0; i < aux->used_map_cnt; i++) {
|
||||||
|
struct bpf_map *map = aux->used_maps[i];
|
||||||
struct bpf_array *array;
|
struct bpf_array *array;
|
||||||
struct bpf_map *map;
|
|
||||||
|
|
||||||
map = aux->used_maps[i];
|
|
||||||
if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY)
|
if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
array = container_of(map, struct bpf_array, map);
|
array = container_of(map, struct bpf_array, map);
|
||||||
if (!bpf_prog_array_compatible(array, fp))
|
if (!bpf_prog_array_compatible(array, fp))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
@ -689,22 +681,25 @@ static int check_tail_call(const struct bpf_prog *fp)
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* bpf_prog_select_runtime - select execution runtime for BPF program
|
* bpf_prog_select_runtime - select exec runtime for BPF program
|
||||||
* @fp: bpf_prog populated with internal BPF program
|
* @fp: bpf_prog populated with internal BPF program
|
||||||
*
|
*
|
||||||
* try to JIT internal BPF program, if JIT is not available select interpreter
|
* Try to JIT eBPF program, if JIT is not available, use interpreter.
|
||||||
* BPF program will be executed via BPF_PROG_RUN() macro
|
* The BPF program will be executed via BPF_PROG_RUN() macro.
|
||||||
*/
|
*/
|
||||||
int bpf_prog_select_runtime(struct bpf_prog *fp)
|
int bpf_prog_select_runtime(struct bpf_prog *fp)
|
||||||
{
|
{
|
||||||
fp->bpf_func = (void *) __bpf_prog_run;
|
fp->bpf_func = (void *) __bpf_prog_run;
|
||||||
|
|
||||||
/* Probe if internal BPF can be JITed */
|
|
||||||
bpf_int_jit_compile(fp);
|
bpf_int_jit_compile(fp);
|
||||||
/* Lock whole bpf_prog as read-only */
|
|
||||||
bpf_prog_lock_ro(fp);
|
bpf_prog_lock_ro(fp);
|
||||||
|
|
||||||
return check_tail_call(fp);
|
/* The tail call compatibility check can only be done at
|
||||||
|
* this late stage as we need to determine, if we deal
|
||||||
|
* with JITed or non JITed program concatenations and not
|
||||||
|
* all eBPF JITs might immediately support all features.
|
||||||
|
*/
|
||||||
|
return bpf_check_tail_call(fp);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(bpf_prog_select_runtime);
|
EXPORT_SYMBOL_GPL(bpf_prog_select_runtime);
|
||||||
|
|
||||||
|
@ -736,6 +731,21 @@ const struct bpf_func_proto bpf_get_prandom_u32_proto __weak;
|
||||||
const struct bpf_func_proto bpf_get_smp_processor_id_proto __weak;
|
const struct bpf_func_proto bpf_get_smp_processor_id_proto __weak;
|
||||||
const struct bpf_func_proto bpf_ktime_get_ns_proto __weak;
|
const struct bpf_func_proto bpf_ktime_get_ns_proto __weak;
|
||||||
|
|
||||||
|
/* Always built-in helper functions. */
|
||||||
|
const struct bpf_func_proto bpf_tail_call_proto = {
|
||||||
|
.func = NULL,
|
||||||
|
.gpl_only = false,
|
||||||
|
.ret_type = RET_VOID,
|
||||||
|
.arg1_type = ARG_PTR_TO_CTX,
|
||||||
|
.arg2_type = ARG_CONST_MAP_PTR,
|
||||||
|
.arg3_type = ARG_ANYTHING,
|
||||||
|
};
|
||||||
|
|
||||||
|
/* For classic BPF JITs that don't implement bpf_int_jit_compile(). */
|
||||||
|
void __weak bpf_int_jit_compile(struct bpf_prog *prog)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
/* To execute LD_ABS/LD_IND instructions __bpf_prog_run() may call
|
/* To execute LD_ABS/LD_IND instructions __bpf_prog_run() may call
|
||||||
* skb_copy_bits(), so provide a weak definition of it for NET-less config.
|
* skb_copy_bits(), so provide a weak definition of it for NET-less config.
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -45,11 +45,11 @@ static u64 bpf_map_lookup_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
|
||||||
}
|
}
|
||||||
|
|
||||||
const struct bpf_func_proto bpf_map_lookup_elem_proto = {
|
const struct bpf_func_proto bpf_map_lookup_elem_proto = {
|
||||||
.func = bpf_map_lookup_elem,
|
.func = bpf_map_lookup_elem,
|
||||||
.gpl_only = false,
|
.gpl_only = false,
|
||||||
.ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
|
.ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
|
||||||
.arg1_type = ARG_CONST_MAP_PTR,
|
.arg1_type = ARG_CONST_MAP_PTR,
|
||||||
.arg2_type = ARG_PTR_TO_MAP_KEY,
|
.arg2_type = ARG_PTR_TO_MAP_KEY,
|
||||||
};
|
};
|
||||||
|
|
||||||
static u64 bpf_map_update_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
|
static u64 bpf_map_update_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
|
||||||
|
@ -64,13 +64,13 @@ static u64 bpf_map_update_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
|
||||||
}
|
}
|
||||||
|
|
||||||
const struct bpf_func_proto bpf_map_update_elem_proto = {
|
const struct bpf_func_proto bpf_map_update_elem_proto = {
|
||||||
.func = bpf_map_update_elem,
|
.func = bpf_map_update_elem,
|
||||||
.gpl_only = false,
|
.gpl_only = false,
|
||||||
.ret_type = RET_INTEGER,
|
.ret_type = RET_INTEGER,
|
||||||
.arg1_type = ARG_CONST_MAP_PTR,
|
.arg1_type = ARG_CONST_MAP_PTR,
|
||||||
.arg2_type = ARG_PTR_TO_MAP_KEY,
|
.arg2_type = ARG_PTR_TO_MAP_KEY,
|
||||||
.arg3_type = ARG_PTR_TO_MAP_VALUE,
|
.arg3_type = ARG_PTR_TO_MAP_VALUE,
|
||||||
.arg4_type = ARG_ANYTHING,
|
.arg4_type = ARG_ANYTHING,
|
||||||
};
|
};
|
||||||
|
|
||||||
static u64 bpf_map_delete_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
|
static u64 bpf_map_delete_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
|
||||||
|
@ -84,11 +84,11 @@ static u64 bpf_map_delete_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
|
||||||
}
|
}
|
||||||
|
|
||||||
const struct bpf_func_proto bpf_map_delete_elem_proto = {
|
const struct bpf_func_proto bpf_map_delete_elem_proto = {
|
||||||
.func = bpf_map_delete_elem,
|
.func = bpf_map_delete_elem,
|
||||||
.gpl_only = false,
|
.gpl_only = false,
|
||||||
.ret_type = RET_INTEGER,
|
.ret_type = RET_INTEGER,
|
||||||
.arg1_type = ARG_CONST_MAP_PTR,
|
.arg1_type = ARG_CONST_MAP_PTR,
|
||||||
.arg2_type = ARG_PTR_TO_MAP_KEY,
|
.arg2_type = ARG_PTR_TO_MAP_KEY,
|
||||||
};
|
};
|
||||||
|
|
||||||
static u64 bpf_get_prandom_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
|
static u64 bpf_get_prandom_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
|
||||||
|
|
Loading…
Reference in New Issue
Block a user