forked from luck/tmp_suning_uos_patched
perf: Protect perf_guest_cbs with RCU
commit ff083a2d972f56bebfd82409ca62e5dfce950961 upstream.
Protect perf_guest_cbs with RCU to fix multiple possible errors. Luckily,
all paths that read perf_guest_cbs already require RCU protection, e.g. to
protect the callback chains, so only the direct perf_guest_cbs touchpoints
need to be modified.
Bug #1 is a simple lack of WRITE_ONCE/READ_ONCE behavior to ensure
perf_guest_cbs isn't reloaded between a !NULL check and a dereference.
Fixed via the READ_ONCE() in rcu_dereference().
Bug #2 is that on weakly-ordered architectures, updates to the callbacks
themselves are not guaranteed to be visible before the pointer is made
visible to readers. Fixed by the smp_store_release() in
rcu_assign_pointer() when the new pointer is non-NULL.
Bug #3 is that, because the callbacks are global, it's possible for
readers to run in parallel with an unregisters, and thus a module
implementing the callbacks can be unloaded while readers are in flight,
resulting in a use-after-free. Fixed by a synchronize_rcu() call when
unregistering callbacks.
Bug #1 escaped notice because it's extremely unlikely a compiler will
reload perf_guest_cbs in this sequence. perf_guest_cbs does get reloaded
for future derefs, e.g. for ->is_user_mode(), but the ->is_in_guest()
guard all but guarantees the consumer will win the race, e.g. to nullify
perf_guest_cbs, KVM has to completely exit the guest and teardown down
all VMs before KVM start its module unload / unregister sequence. This
also makes it all but impossible to encounter bug #3.
Bug #2 has not been a problem because all architectures that register
callbacks are strongly ordered and/or have a static set of callbacks.
But with help, unloading kvm_intel can trigger bug #1 e.g. wrapping
perf_guest_cbs with READ_ONCE in perf_misc_flags() while spamming
kvm_intel module load/unload leads to:
BUG: kernel NULL pointer dereference, address: 0000000000000000
#PF: supervisor read access in kernel mode
#PF: error_code(0x0000) - not-present page
PGD 0 P4D 0
Oops: 0000 [#1] PREEMPT SMP
CPU: 6 PID: 1825 Comm: stress Not tainted 5.14.0-rc2+ #459
Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 0.0.0 02/06/2015
RIP: 0010:perf_misc_flags+0x1c/0x70
Call Trace:
perf_prepare_sample+0x53/0x6b0
perf_event_output_forward+0x67/0x160
__perf_event_overflow+0x52/0xf0
handle_pmi_common+0x207/0x300
intel_pmu_handle_irq+0xcf/0x410
perf_event_nmi_handler+0x28/0x50
nmi_handle+0xc7/0x260
default_do_nmi+0x6b/0x170
exc_nmi+0x103/0x130
asm_exc_nmi+0x76/0xbf
Fixes: 39447b386c
("perf: Enhance perf to allow for guest statistic collection from host")
Signed-off-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Cc: stable@vger.kernel.org
Link: https://lore.kernel.org/r/20211111020738.2512932-2-seanjc@google.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
parent
eadde287a6
commit
723acd75a0
|
@ -62,9 +62,10 @@ user_backtrace(struct frame_tail __user *tail,
|
||||||
void
|
void
|
||||||
perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
|
perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
|
struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
|
||||||
struct frame_tail __user *tail;
|
struct frame_tail __user *tail;
|
||||||
|
|
||||||
if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
|
if (guest_cbs && guest_cbs->is_in_guest()) {
|
||||||
/* We don't support guest os callchain now */
|
/* We don't support guest os callchain now */
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -98,9 +99,10 @@ callchain_trace(struct stackframe *fr,
|
||||||
void
|
void
|
||||||
perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
|
perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
|
struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
|
||||||
struct stackframe fr;
|
struct stackframe fr;
|
||||||
|
|
||||||
if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
|
if (guest_cbs && guest_cbs->is_in_guest()) {
|
||||||
/* We don't support guest os callchain now */
|
/* We don't support guest os callchain now */
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -111,18 +113,21 @@ perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *re
|
||||||
|
|
||||||
unsigned long perf_instruction_pointer(struct pt_regs *regs)
|
unsigned long perf_instruction_pointer(struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
if (perf_guest_cbs && perf_guest_cbs->is_in_guest())
|
struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
|
||||||
return perf_guest_cbs->get_guest_ip();
|
|
||||||
|
if (guest_cbs && guest_cbs->is_in_guest())
|
||||||
|
return guest_cbs->get_guest_ip();
|
||||||
|
|
||||||
return instruction_pointer(regs);
|
return instruction_pointer(regs);
|
||||||
}
|
}
|
||||||
|
|
||||||
unsigned long perf_misc_flags(struct pt_regs *regs)
|
unsigned long perf_misc_flags(struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
|
struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
|
||||||
int misc = 0;
|
int misc = 0;
|
||||||
|
|
||||||
if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
|
if (guest_cbs && guest_cbs->is_in_guest()) {
|
||||||
if (perf_guest_cbs->is_user_mode())
|
if (guest_cbs->is_user_mode())
|
||||||
misc |= PERF_RECORD_MISC_GUEST_USER;
|
misc |= PERF_RECORD_MISC_GUEST_USER;
|
||||||
else
|
else
|
||||||
misc |= PERF_RECORD_MISC_GUEST_KERNEL;
|
misc |= PERF_RECORD_MISC_GUEST_KERNEL;
|
||||||
|
|
|
@ -102,7 +102,9 @@ compat_user_backtrace(struct compat_frame_tail __user *tail,
|
||||||
void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
|
void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
|
||||||
struct pt_regs *regs)
|
struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
|
struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
|
||||||
|
|
||||||
|
if (guest_cbs && guest_cbs->is_in_guest()) {
|
||||||
/* We don't support guest os callchain now */
|
/* We don't support guest os callchain now */
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -147,9 +149,10 @@ static bool callchain_trace(void *data, unsigned long pc)
|
||||||
void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
|
void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
|
||||||
struct pt_regs *regs)
|
struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
|
struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
|
||||||
struct stackframe frame;
|
struct stackframe frame;
|
||||||
|
|
||||||
if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
|
if (guest_cbs && guest_cbs->is_in_guest()) {
|
||||||
/* We don't support guest os callchain now */
|
/* We don't support guest os callchain now */
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -160,18 +163,21 @@ void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
|
||||||
|
|
||||||
unsigned long perf_instruction_pointer(struct pt_regs *regs)
|
unsigned long perf_instruction_pointer(struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
if (perf_guest_cbs && perf_guest_cbs->is_in_guest())
|
struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
|
||||||
return perf_guest_cbs->get_guest_ip();
|
|
||||||
|
if (guest_cbs && guest_cbs->is_in_guest())
|
||||||
|
return guest_cbs->get_guest_ip();
|
||||||
|
|
||||||
return instruction_pointer(regs);
|
return instruction_pointer(regs);
|
||||||
}
|
}
|
||||||
|
|
||||||
unsigned long perf_misc_flags(struct pt_regs *regs)
|
unsigned long perf_misc_flags(struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
|
struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
|
||||||
int misc = 0;
|
int misc = 0;
|
||||||
|
|
||||||
if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
|
if (guest_cbs && guest_cbs->is_in_guest()) {
|
||||||
if (perf_guest_cbs->is_user_mode())
|
if (guest_cbs->is_user_mode())
|
||||||
misc |= PERF_RECORD_MISC_GUEST_USER;
|
misc |= PERF_RECORD_MISC_GUEST_USER;
|
||||||
else
|
else
|
||||||
misc |= PERF_RECORD_MISC_GUEST_KERNEL;
|
misc |= PERF_RECORD_MISC_GUEST_KERNEL;
|
||||||
|
|
|
@ -86,10 +86,11 @@ static unsigned long user_backtrace(struct perf_callchain_entry_ctx *entry,
|
||||||
void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
|
void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
|
||||||
struct pt_regs *regs)
|
struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
|
struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
|
||||||
unsigned long fp = 0;
|
unsigned long fp = 0;
|
||||||
|
|
||||||
/* C-SKY does not support virtualization. */
|
/* C-SKY does not support virtualization. */
|
||||||
if (perf_guest_cbs && perf_guest_cbs->is_in_guest())
|
if (guest_cbs && guest_cbs->is_in_guest())
|
||||||
return;
|
return;
|
||||||
|
|
||||||
fp = regs->regs[4];
|
fp = regs->regs[4];
|
||||||
|
@ -110,10 +111,11 @@ void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
|
||||||
void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
|
void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
|
||||||
struct pt_regs *regs)
|
struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
|
struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
|
||||||
struct stackframe fr;
|
struct stackframe fr;
|
||||||
|
|
||||||
/* C-SKY does not support virtualization. */
|
/* C-SKY does not support virtualization. */
|
||||||
if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
|
if (guest_cbs && guest_cbs->is_in_guest()) {
|
||||||
pr_warn("C-SKY does not support perf in guest mode!");
|
pr_warn("C-SKY does not support perf in guest mode!");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1363,6 +1363,7 @@ void
|
||||||
perf_callchain_user(struct perf_callchain_entry_ctx *entry,
|
perf_callchain_user(struct perf_callchain_entry_ctx *entry,
|
||||||
struct pt_regs *regs)
|
struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
|
struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
|
||||||
unsigned long fp = 0;
|
unsigned long fp = 0;
|
||||||
unsigned long gp = 0;
|
unsigned long gp = 0;
|
||||||
unsigned long lp = 0;
|
unsigned long lp = 0;
|
||||||
|
@ -1371,7 +1372,7 @@ perf_callchain_user(struct perf_callchain_entry_ctx *entry,
|
||||||
|
|
||||||
leaf_fp = 0;
|
leaf_fp = 0;
|
||||||
|
|
||||||
if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
|
if (guest_cbs && guest_cbs->is_in_guest()) {
|
||||||
/* We don't support guest os callchain now */
|
/* We don't support guest os callchain now */
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -1479,9 +1480,10 @@ void
|
||||||
perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
|
perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
|
||||||
struct pt_regs *regs)
|
struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
|
struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
|
||||||
struct stackframe fr;
|
struct stackframe fr;
|
||||||
|
|
||||||
if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
|
if (guest_cbs && guest_cbs->is_in_guest()) {
|
||||||
/* We don't support guest os callchain now */
|
/* We don't support guest os callchain now */
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -1493,20 +1495,23 @@ perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
|
||||||
|
|
||||||
unsigned long perf_instruction_pointer(struct pt_regs *regs)
|
unsigned long perf_instruction_pointer(struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
|
struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
|
||||||
|
|
||||||
/* However, NDS32 does not support virtualization */
|
/* However, NDS32 does not support virtualization */
|
||||||
if (perf_guest_cbs && perf_guest_cbs->is_in_guest())
|
if (guest_cbs && guest_cbs->is_in_guest())
|
||||||
return perf_guest_cbs->get_guest_ip();
|
return guest_cbs->get_guest_ip();
|
||||||
|
|
||||||
return instruction_pointer(regs);
|
return instruction_pointer(regs);
|
||||||
}
|
}
|
||||||
|
|
||||||
unsigned long perf_misc_flags(struct pt_regs *regs)
|
unsigned long perf_misc_flags(struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
|
struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
|
||||||
int misc = 0;
|
int misc = 0;
|
||||||
|
|
||||||
/* However, NDS32 does not support virtualization */
|
/* However, NDS32 does not support virtualization */
|
||||||
if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
|
if (guest_cbs && guest_cbs->is_in_guest()) {
|
||||||
if (perf_guest_cbs->is_user_mode())
|
if (guest_cbs->is_user_mode())
|
||||||
misc |= PERF_RECORD_MISC_GUEST_USER;
|
misc |= PERF_RECORD_MISC_GUEST_USER;
|
||||||
else
|
else
|
||||||
misc |= PERF_RECORD_MISC_GUEST_KERNEL;
|
misc |= PERF_RECORD_MISC_GUEST_KERNEL;
|
||||||
|
|
|
@ -60,10 +60,11 @@ static unsigned long user_backtrace(struct perf_callchain_entry_ctx *entry,
|
||||||
void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
|
void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
|
||||||
struct pt_regs *regs)
|
struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
|
struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
|
||||||
unsigned long fp = 0;
|
unsigned long fp = 0;
|
||||||
|
|
||||||
/* RISC-V does not support perf in guest mode. */
|
/* RISC-V does not support perf in guest mode. */
|
||||||
if (perf_guest_cbs && perf_guest_cbs->is_in_guest())
|
if (guest_cbs && guest_cbs->is_in_guest())
|
||||||
return;
|
return;
|
||||||
|
|
||||||
fp = regs->s0;
|
fp = regs->s0;
|
||||||
|
@ -84,8 +85,10 @@ void notrace walk_stackframe(struct task_struct *task,
|
||||||
void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
|
void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
|
||||||
struct pt_regs *regs)
|
struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
|
struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
|
||||||
|
|
||||||
/* RISC-V does not support perf in guest mode. */
|
/* RISC-V does not support perf in guest mode. */
|
||||||
if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
|
if (guest_cbs && guest_cbs->is_in_guest()) {
|
||||||
pr_warn("RISC-V does not support perf in guest mode!");
|
pr_warn("RISC-V does not support perf in guest mode!");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
|
@ -2545,10 +2545,11 @@ static bool perf_hw_regs(struct pt_regs *regs)
|
||||||
void
|
void
|
||||||
perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
|
perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
|
struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
|
||||||
struct unwind_state state;
|
struct unwind_state state;
|
||||||
unsigned long addr;
|
unsigned long addr;
|
||||||
|
|
||||||
if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
|
if (guest_cbs && guest_cbs->is_in_guest()) {
|
||||||
/* TODO: We don't support guest os callchain now */
|
/* TODO: We don't support guest os callchain now */
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -2648,10 +2649,11 @@ perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry_ctx *ent
|
||||||
void
|
void
|
||||||
perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
|
perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
|
struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
|
||||||
struct stack_frame frame;
|
struct stack_frame frame;
|
||||||
const struct stack_frame __user *fp;
|
const struct stack_frame __user *fp;
|
||||||
|
|
||||||
if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
|
if (guest_cbs && guest_cbs->is_in_guest()) {
|
||||||
/* TODO: We don't support guest os callchain now */
|
/* TODO: We don't support guest os callchain now */
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -2728,18 +2730,21 @@ static unsigned long code_segment_base(struct pt_regs *regs)
|
||||||
|
|
||||||
unsigned long perf_instruction_pointer(struct pt_regs *regs)
|
unsigned long perf_instruction_pointer(struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
if (perf_guest_cbs && perf_guest_cbs->is_in_guest())
|
struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
|
||||||
return perf_guest_cbs->get_guest_ip();
|
|
||||||
|
if (guest_cbs && guest_cbs->is_in_guest())
|
||||||
|
return guest_cbs->get_guest_ip();
|
||||||
|
|
||||||
return regs->ip + code_segment_base(regs);
|
return regs->ip + code_segment_base(regs);
|
||||||
}
|
}
|
||||||
|
|
||||||
unsigned long perf_misc_flags(struct pt_regs *regs)
|
unsigned long perf_misc_flags(struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
|
struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
|
||||||
int misc = 0;
|
int misc = 0;
|
||||||
|
|
||||||
if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
|
if (guest_cbs && guest_cbs->is_in_guest()) {
|
||||||
if (perf_guest_cbs->is_user_mode())
|
if (guest_cbs->is_user_mode())
|
||||||
misc |= PERF_RECORD_MISC_GUEST_USER;
|
misc |= PERF_RECORD_MISC_GUEST_USER;
|
||||||
else
|
else
|
||||||
misc |= PERF_RECORD_MISC_GUEST_KERNEL;
|
misc |= PERF_RECORD_MISC_GUEST_KERNEL;
|
||||||
|
|
|
@ -2586,6 +2586,7 @@ static int handle_pmi_common(struct pt_regs *regs, u64 status)
|
||||||
{
|
{
|
||||||
struct perf_sample_data data;
|
struct perf_sample_data data;
|
||||||
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
|
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
|
||||||
|
struct perf_guest_info_callbacks *guest_cbs;
|
||||||
int bit;
|
int bit;
|
||||||
int handled = 0;
|
int handled = 0;
|
||||||
|
|
||||||
|
@ -2651,9 +2652,11 @@ static int handle_pmi_common(struct pt_regs *regs, u64 status)
|
||||||
*/
|
*/
|
||||||
if (__test_and_clear_bit(GLOBAL_STATUS_TRACE_TOPAPMI_BIT, (unsigned long *)&status)) {
|
if (__test_and_clear_bit(GLOBAL_STATUS_TRACE_TOPAPMI_BIT, (unsigned long *)&status)) {
|
||||||
handled++;
|
handled++;
|
||||||
if (unlikely(perf_guest_cbs && perf_guest_cbs->is_in_guest() &&
|
|
||||||
perf_guest_cbs->handle_intel_pt_intr))
|
guest_cbs = perf_get_guest_cbs();
|
||||||
perf_guest_cbs->handle_intel_pt_intr();
|
if (unlikely(guest_cbs && guest_cbs->is_in_guest() &&
|
||||||
|
guest_cbs->handle_intel_pt_intr))
|
||||||
|
guest_cbs->handle_intel_pt_intr();
|
||||||
else
|
else
|
||||||
intel_pt_interrupt();
|
intel_pt_interrupt();
|
||||||
}
|
}
|
||||||
|
|
|
@ -1235,7 +1235,18 @@ extern void perf_event_bpf_event(struct bpf_prog *prog,
|
||||||
enum perf_bpf_event_type type,
|
enum perf_bpf_event_type type,
|
||||||
u16 flags);
|
u16 flags);
|
||||||
|
|
||||||
extern struct perf_guest_info_callbacks *perf_guest_cbs;
|
extern struct perf_guest_info_callbacks __rcu *perf_guest_cbs;
|
||||||
|
static inline struct perf_guest_info_callbacks *perf_get_guest_cbs(void)
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
* Callbacks are RCU-protected and must be READ_ONCE to avoid reloading
|
||||||
|
* the callbacks between a !NULL check and dereferences, to ensure
|
||||||
|
* pending stores/changes to the callback pointers are visible before a
|
||||||
|
* non-NULL perf_guest_cbs is visible to readers, and to prevent a
|
||||||
|
* module from unloading callbacks while readers are active.
|
||||||
|
*/
|
||||||
|
return rcu_dereference(perf_guest_cbs);
|
||||||
|
}
|
||||||
extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
|
extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
|
||||||
extern int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
|
extern int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
|
||||||
|
|
||||||
|
|
|
@ -6395,18 +6395,25 @@ static void perf_pending_event(struct irq_work *entry)
|
||||||
* Later on, we might change it to a list if there is
|
* Later on, we might change it to a list if there is
|
||||||
* another virtualization implementation supporting the callbacks.
|
* another virtualization implementation supporting the callbacks.
|
||||||
*/
|
*/
|
||||||
struct perf_guest_info_callbacks *perf_guest_cbs;
|
struct perf_guest_info_callbacks __rcu *perf_guest_cbs;
|
||||||
|
|
||||||
int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
|
int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
|
||||||
{
|
{
|
||||||
perf_guest_cbs = cbs;
|
if (WARN_ON_ONCE(rcu_access_pointer(perf_guest_cbs)))
|
||||||
|
return -EBUSY;
|
||||||
|
|
||||||
|
rcu_assign_pointer(perf_guest_cbs, cbs);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(perf_register_guest_info_callbacks);
|
EXPORT_SYMBOL_GPL(perf_register_guest_info_callbacks);
|
||||||
|
|
||||||
int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
|
int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
|
||||||
{
|
{
|
||||||
perf_guest_cbs = NULL;
|
if (WARN_ON_ONCE(rcu_access_pointer(perf_guest_cbs) != cbs))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
rcu_assign_pointer(perf_guest_cbs, NULL);
|
||||||
|
synchronize_rcu();
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(perf_unregister_guest_info_callbacks);
|
EXPORT_SYMBOL_GPL(perf_unregister_guest_info_callbacks);
|
||||||
|
|
Loading…
Reference in New Issue
Block a user