forked from luck/tmp_suning_uos_patched
bpf: Update locking comment in hashtab code
The comment where the bucket lock is acquired says: /* bpf_map_update_elem() can be called in_irq() */ which is not really helpful and aside of that it does not explain the subtle details of the hash bucket locks expecially in the context of BPF and perf, kprobes and tracing. Add a comment at the top of the file which explains the protection scopes and the details how potential deadlocks are prevented. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Alexei Starovoitov <ast@kernel.org> Link: https://lore.kernel.org/bpf/20200224145642.755793061@linutronix.de
This commit is contained in:
parent
2ed905c521
commit
dbca151cad
|
@ -27,6 +27,26 @@
|
|||
.map_delete_batch = \
|
||||
generic_map_delete_batch
|
||||
|
||||
/*
|
||||
* The bucket lock has two protection scopes:
|
||||
*
|
||||
* 1) Serializing concurrent operations from BPF programs on differrent
|
||||
* CPUs
|
||||
*
|
||||
* 2) Serializing concurrent operations from BPF programs and sys_bpf()
|
||||
*
|
||||
* BPF programs can execute in any context including perf, kprobes and
|
||||
* tracing. As there are almost no limits where perf, kprobes and tracing
|
||||
* can be invoked from the lock operations need to be protected against
|
||||
* deadlocks. Deadlocks can be caused by recursion and by an invocation in
|
||||
* the lock held section when functions which acquire this lock are invoked
|
||||
* from sys_bpf(). BPF recursion is prevented by incrementing the per CPU
|
||||
* variable bpf_prog_active, which prevents BPF programs attached to perf
|
||||
* events, kprobes and tracing to be invoked before the prior invocation
|
||||
* from one of these contexts completed. sys_bpf() uses the same mechanism
|
||||
* by pinning the task to the current CPU and incrementing the recursion
|
||||
* protection accross the map operation.
|
||||
*/
|
||||
struct bucket {
|
||||
struct hlist_nulls_head head;
|
||||
raw_spinlock_t lock;
|
||||
|
@ -884,7 +904,6 @@ static int htab_map_update_elem(struct bpf_map *map, void *key, void *value,
|
|||
*/
|
||||
}
|
||||
|
||||
/* bpf_map_update_elem() can be called in_irq() */
|
||||
raw_spin_lock_irqsave(&b->lock, flags);
|
||||
|
||||
l_old = lookup_elem_raw(head, hash, key, key_size);
|
||||
|
@ -964,7 +983,6 @@ static int htab_lru_map_update_elem(struct bpf_map *map, void *key, void *value,
|
|||
return -ENOMEM;
|
||||
memcpy(l_new->key + round_up(map->key_size, 8), value, map->value_size);
|
||||
|
||||
/* bpf_map_update_elem() can be called in_irq() */
|
||||
raw_spin_lock_irqsave(&b->lock, flags);
|
||||
|
||||
l_old = lookup_elem_raw(head, hash, key, key_size);
|
||||
|
@ -1019,7 +1037,6 @@ static int __htab_percpu_map_update_elem(struct bpf_map *map, void *key,
|
|||
b = __select_bucket(htab, hash);
|
||||
head = &b->head;
|
||||
|
||||
/* bpf_map_update_elem() can be called in_irq() */
|
||||
raw_spin_lock_irqsave(&b->lock, flags);
|
||||
|
||||
l_old = lookup_elem_raw(head, hash, key, key_size);
|
||||
|
@ -1083,7 +1100,6 @@ static int __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
|
|||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/* bpf_map_update_elem() can be called in_irq() */
|
||||
raw_spin_lock_irqsave(&b->lock, flags);
|
||||
|
||||
l_old = lookup_elem_raw(head, hash, key, key_size);
|
||||
|
|
Loading…
Reference in New Issue
Block a user