forked from luck/tmp_suning_uos_patched
bpf: fix cgroup bpf release synchronization
Since commit4bfc0bb2c6
("bpf: decouple the lifetime of cgroup_bpf from cgroup itself"), cgroup_bpf release occurs asynchronously (from a worker context), and before the release of the cgroup itself. This introduced a previously non-existing race between the release and update paths. E.g. if a leaf's cgroup_bpf is released and a new bpf program is attached to the one of ancestor cgroups at the same time. The race may result in double-free and other memory corruptions. To fix the problem, let's protect the body of cgroup_bpf_release() with cgroup_mutex, as it was effectively previously, when all this code was called from the cgroup release path with cgroup mutex held. Also let's skip cgroups, which have no chances to invoke a bpf program, on the update path. If the cgroup bpf refcnt reached 0, it means that the cgroup is offline (no attached processes), and there are no associated sockets left. It means there is no point in updating effective progs array! And it can lead to a leak, if it happens after the release. So, let's skip such cgroups. Big thanks for Tejun Heo for discovering and debugging of this problem! Fixes:4bfc0bb2c6
("bpf: decouple the lifetime of cgroup_bpf from cgroup itself") Reported-by: Tejun Heo <tj@kernel.org> Signed-off-by: Roman Gushchin <guro@fb.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
This commit is contained in:
parent
572a6928f9
commit
e5c891a349
|
@ -16,6 +16,8 @@
|
||||||
#include <linux/bpf-cgroup.h>
|
#include <linux/bpf-cgroup.h>
|
||||||
#include <net/sock.h>
|
#include <net/sock.h>
|
||||||
|
|
||||||
|
#include "../cgroup/cgroup-internal.h"
|
||||||
|
|
||||||
DEFINE_STATIC_KEY_FALSE(cgroup_bpf_enabled_key);
|
DEFINE_STATIC_KEY_FALSE(cgroup_bpf_enabled_key);
|
||||||
EXPORT_SYMBOL(cgroup_bpf_enabled_key);
|
EXPORT_SYMBOL(cgroup_bpf_enabled_key);
|
||||||
|
|
||||||
|
@ -38,6 +40,8 @@ static void cgroup_bpf_release(struct work_struct *work)
|
||||||
struct bpf_prog_array *old_array;
|
struct bpf_prog_array *old_array;
|
||||||
unsigned int type;
|
unsigned int type;
|
||||||
|
|
||||||
|
mutex_lock(&cgroup_mutex);
|
||||||
|
|
||||||
for (type = 0; type < ARRAY_SIZE(cgrp->bpf.progs); type++) {
|
for (type = 0; type < ARRAY_SIZE(cgrp->bpf.progs); type++) {
|
||||||
struct list_head *progs = &cgrp->bpf.progs[type];
|
struct list_head *progs = &cgrp->bpf.progs[type];
|
||||||
struct bpf_prog_list *pl, *tmp;
|
struct bpf_prog_list *pl, *tmp;
|
||||||
|
@ -54,10 +58,12 @@ static void cgroup_bpf_release(struct work_struct *work)
|
||||||
}
|
}
|
||||||
old_array = rcu_dereference_protected(
|
old_array = rcu_dereference_protected(
|
||||||
cgrp->bpf.effective[type],
|
cgrp->bpf.effective[type],
|
||||||
percpu_ref_is_dying(&cgrp->bpf.refcnt));
|
lockdep_is_held(&cgroup_mutex));
|
||||||
bpf_prog_array_free(old_array);
|
bpf_prog_array_free(old_array);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
mutex_unlock(&cgroup_mutex);
|
||||||
|
|
||||||
percpu_ref_exit(&cgrp->bpf.refcnt);
|
percpu_ref_exit(&cgrp->bpf.refcnt);
|
||||||
cgroup_put(cgrp);
|
cgroup_put(cgrp);
|
||||||
}
|
}
|
||||||
|
@ -229,6 +235,9 @@ static int update_effective_progs(struct cgroup *cgrp,
|
||||||
css_for_each_descendant_pre(css, &cgrp->self) {
|
css_for_each_descendant_pre(css, &cgrp->self) {
|
||||||
struct cgroup *desc = container_of(css, struct cgroup, self);
|
struct cgroup *desc = container_of(css, struct cgroup, self);
|
||||||
|
|
||||||
|
if (percpu_ref_is_zero(&desc->bpf.refcnt))
|
||||||
|
continue;
|
||||||
|
|
||||||
err = compute_effective_progs(desc, type, &desc->bpf.inactive);
|
err = compute_effective_progs(desc, type, &desc->bpf.inactive);
|
||||||
if (err)
|
if (err)
|
||||||
goto cleanup;
|
goto cleanup;
|
||||||
|
@ -238,6 +247,14 @@ static int update_effective_progs(struct cgroup *cgrp,
|
||||||
css_for_each_descendant_pre(css, &cgrp->self) {
|
css_for_each_descendant_pre(css, &cgrp->self) {
|
||||||
struct cgroup *desc = container_of(css, struct cgroup, self);
|
struct cgroup *desc = container_of(css, struct cgroup, self);
|
||||||
|
|
||||||
|
if (percpu_ref_is_zero(&desc->bpf.refcnt)) {
|
||||||
|
if (unlikely(desc->bpf.inactive)) {
|
||||||
|
bpf_prog_array_free(desc->bpf.inactive);
|
||||||
|
desc->bpf.inactive = NULL;
|
||||||
|
}
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
activate_effective_progs(desc, type, desc->bpf.inactive);
|
activate_effective_progs(desc, type, desc->bpf.inactive);
|
||||||
desc->bpf.inactive = NULL;
|
desc->bpf.inactive = NULL;
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue
Block a user