forked from luck/tmp_suning_uos_patched
bpf: Track contents of read-only maps as scalars
Maps that are read-only both from BPF program side and user space side have their contents constant, so verifier can track referenced values precisely and use that knowledge for dead code elimination, branch pruning, etc. This patch teaches BPF verifier how to do this. Signed-off-by: Andrii Nakryiko <andriin@fb.com> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Link: https://lore.kernel.org/bpf/20191009201458.2679171-2-andriin@fb.com
This commit is contained in:
parent
e0b68fb186
commit
a23740ec43
|
@ -2739,6 +2739,41 @@ static void coerce_reg_to_size(struct bpf_reg_state *reg, int size)
|
|||
reg->smax_value = reg->umax_value;
|
||||
}
|
||||
|
||||
static bool bpf_map_is_rdonly(const struct bpf_map *map)
|
||||
{
|
||||
return (map->map_flags & BPF_F_RDONLY_PROG) && map->frozen;
|
||||
}
|
||||
|
||||
static int bpf_map_direct_read(struct bpf_map *map, int off, int size, u64 *val)
|
||||
{
|
||||
void *ptr;
|
||||
u64 addr;
|
||||
int err;
|
||||
|
||||
err = map->ops->map_direct_value_addr(map, &addr, off);
|
||||
if (err)
|
||||
return err;
|
||||
ptr = (void *)addr + off;
|
||||
|
||||
switch (size) {
|
||||
case sizeof(u8):
|
||||
*val = (u64)*(u8 *)ptr;
|
||||
break;
|
||||
case sizeof(u16):
|
||||
*val = (u64)*(u16 *)ptr;
|
||||
break;
|
||||
case sizeof(u32):
|
||||
*val = (u64)*(u32 *)ptr;
|
||||
break;
|
||||
case sizeof(u64):
|
||||
*val = *(u64 *)ptr;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* check whether memory at (regno + off) is accessible for t = (read | write)
|
||||
* if t==write, value_regno is a register which value is stored into memory
|
||||
* if t==read, value_regno is a register which will receive the value from memory
|
||||
|
@ -2776,9 +2811,27 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
|
|||
if (err)
|
||||
return err;
|
||||
err = check_map_access(env, regno, off, size, false);
|
||||
if (!err && t == BPF_READ && value_regno >= 0)
|
||||
mark_reg_unknown(env, regs, value_regno);
|
||||
if (!err && t == BPF_READ && value_regno >= 0) {
|
||||
struct bpf_map *map = reg->map_ptr;
|
||||
|
||||
/* if map is read-only, track its contents as scalars */
|
||||
if (tnum_is_const(reg->var_off) &&
|
||||
bpf_map_is_rdonly(map) &&
|
||||
map->ops->map_direct_value_addr) {
|
||||
int map_off = off + reg->var_off.value;
|
||||
u64 val = 0;
|
||||
|
||||
err = bpf_map_direct_read(map, map_off, size,
|
||||
&val);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
regs[value_regno].type = SCALAR_VALUE;
|
||||
__mark_reg_known(®s[value_regno], val);
|
||||
} else {
|
||||
mark_reg_unknown(env, regs, value_regno);
|
||||
}
|
||||
}
|
||||
} else if (reg->type == PTR_TO_CTX) {
|
||||
enum bpf_reg_type reg_type = SCALAR_VALUE;
|
||||
|
||||
|
|
Loading…
Reference in New Issue
Block a user