forked from luck/tmp_suning_uos_patched
32ff77e8cc
Problem and motivation: Once a breakpoint perf event (PERF_TYPE_BREAKPOINT) is created, there is no flexibility to change the breakpoint type (bp_type), breakpoint address (bp_addr), or breakpoint length (bp_len). The only option is to close the perf event and configure a new breakpoint event. This inflexibility has a significant performance overhead. For example, sampling-based, lightweight performance profilers (and also concurrency bug detection tools), monitor different addresses for a short duration using PERF_TYPE_BREAKPOINT and change the address (bp_addr) to another address or change the kind of breakpoint (bp_type) from "write" to a "read" or vice-versa or change the length (bp_len) of the address being monitored. The cost of these modifications is prohibitive since it involves unmapping the circular buffer associated with the perf event, closing the perf event, opening another perf event and mmaping another circular buffer. Solution: The new ioctl flag for perf events, PERF_EVENT_IOC_MODIFY_ATTRIBUTES, introduced in this patch takes a pointer to a struct perf_event_attr as an argument to update an old breakpoint event with new address, type, and size. This facility allows retaining a previous mmaped perf events ring buffer and avoids having to close and reopen another perf event. This patch supports only changing PERF_TYPE_BREAKPOINT event type; future implementations can extend this feature. The patch replicates some of its functionality of modify_user_hw_breakpoint() in kernel/events/hw_breakpoint.c. modify_user_hw_breakpoint cannot be called directly since perf_event_ctx_lock() is already held in _perf_ioctl(). Evidence: Experiments show that the baseline (not able to modify an already created breakpoint) costs an order of magnitude (~10x) more than the suggested optimization (having the ability to dynamically modifying a configured breakpoint via ioctl). When the breakpoints typically do not trap, the speedup due to the suggested optimization is ~10x; even when the breakpoints always trap, the speedup is ~4x due to the suggested optimization. Testing: tests posted at https://github.com/linux-contrib/perf_event_modify_bp demonstrate the performance significance of this patch. Tests also check the functional correctness of the patch. Signed-off-by: Milind Chabbi <chabbi.milind@gmail.com> [ Using modify_user_hw_breakpoint_check function. ] [ Reformated PERF_EVENT_IOC_*, so the values are all in one column. ] Signed-off-by: Jiri Olsa <jolsa@kernel.org> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: David Ahern <dsahern@gmail.com> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: Hari Bathini <hbathini@linux.vnet.ibm.com> Cc: Jin Yao <yao.jin@linux.intel.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Kan Liang <kan.liang@intel.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Oleg Nesterov <onestero@redhat.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Sukadev Bhattiprolu <sukadev@linux.vnet.ibm.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Will Deacon <will.deacon@arm.com> Link: http://lkml.kernel.org/r/20180312134548.31532-8-jolsa@kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
136 lines
4.1 KiB
C
136 lines
4.1 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef _LINUX_HW_BREAKPOINT_H
|
|
#define _LINUX_HW_BREAKPOINT_H
|
|
|
|
#include <linux/perf_event.h>
|
|
#include <uapi/linux/hw_breakpoint.h>
|
|
|
|
#ifdef CONFIG_HAVE_HW_BREAKPOINT
|
|
|
|
extern int __init init_hw_breakpoint(void);
|
|
|
|
static inline void hw_breakpoint_init(struct perf_event_attr *attr)
|
|
{
|
|
memset(attr, 0, sizeof(*attr));
|
|
|
|
attr->type = PERF_TYPE_BREAKPOINT;
|
|
attr->size = sizeof(*attr);
|
|
/*
|
|
* As it's for in-kernel or ptrace use, we want it to be pinned
|
|
* and to call its callback every hits.
|
|
*/
|
|
attr->pinned = 1;
|
|
attr->sample_period = 1;
|
|
}
|
|
|
|
static inline void ptrace_breakpoint_init(struct perf_event_attr *attr)
|
|
{
|
|
hw_breakpoint_init(attr);
|
|
attr->exclude_kernel = 1;
|
|
}
|
|
|
|
static inline unsigned long hw_breakpoint_addr(struct perf_event *bp)
|
|
{
|
|
return bp->attr.bp_addr;
|
|
}
|
|
|
|
static inline int hw_breakpoint_type(struct perf_event *bp)
|
|
{
|
|
return bp->attr.bp_type;
|
|
}
|
|
|
|
static inline unsigned long hw_breakpoint_len(struct perf_event *bp)
|
|
{
|
|
return bp->attr.bp_len;
|
|
}
|
|
|
|
extern struct perf_event *
|
|
register_user_hw_breakpoint(struct perf_event_attr *attr,
|
|
perf_overflow_handler_t triggered,
|
|
void *context,
|
|
struct task_struct *tsk);
|
|
|
|
/* FIXME: only change from the attr, and don't unregister */
|
|
extern int
|
|
modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *attr);
|
|
extern int
|
|
modify_user_hw_breakpoint_check(struct perf_event *bp, struct perf_event_attr *attr,
|
|
bool check);
|
|
|
|
/*
|
|
* Kernel breakpoints are not associated with any particular thread.
|
|
*/
|
|
extern struct perf_event *
|
|
register_wide_hw_breakpoint_cpu(struct perf_event_attr *attr,
|
|
perf_overflow_handler_t triggered,
|
|
void *context,
|
|
int cpu);
|
|
|
|
extern struct perf_event * __percpu *
|
|
register_wide_hw_breakpoint(struct perf_event_attr *attr,
|
|
perf_overflow_handler_t triggered,
|
|
void *context);
|
|
|
|
extern int register_perf_hw_breakpoint(struct perf_event *bp);
|
|
extern int __register_perf_hw_breakpoint(struct perf_event *bp);
|
|
extern void unregister_hw_breakpoint(struct perf_event *bp);
|
|
extern void unregister_wide_hw_breakpoint(struct perf_event * __percpu *cpu_events);
|
|
|
|
extern int dbg_reserve_bp_slot(struct perf_event *bp);
|
|
extern int dbg_release_bp_slot(struct perf_event *bp);
|
|
extern int reserve_bp_slot(struct perf_event *bp);
|
|
extern void release_bp_slot(struct perf_event *bp);
|
|
|
|
extern void flush_ptrace_hw_breakpoint(struct task_struct *tsk);
|
|
|
|
static inline struct arch_hw_breakpoint *counter_arch_bp(struct perf_event *bp)
|
|
{
|
|
return &bp->hw.info;
|
|
}
|
|
|
|
#else /* !CONFIG_HAVE_HW_BREAKPOINT */
|
|
|
|
static inline int __init init_hw_breakpoint(void) { return 0; }
|
|
|
|
static inline struct perf_event *
|
|
register_user_hw_breakpoint(struct perf_event_attr *attr,
|
|
perf_overflow_handler_t triggered,
|
|
void *context,
|
|
struct task_struct *tsk) { return NULL; }
|
|
static inline int
|
|
modify_user_hw_breakpoint(struct perf_event *bp,
|
|
struct perf_event_attr *attr) { return -ENOSYS; }
|
|
static inline int
|
|
modify_user_hw_breakpoint_check(struct perf_event *bp, struct perf_event_attr *attr,
|
|
bool check) { return -ENOSYS; }
|
|
|
|
static inline struct perf_event *
|
|
register_wide_hw_breakpoint_cpu(struct perf_event_attr *attr,
|
|
perf_overflow_handler_t triggered,
|
|
void *context,
|
|
int cpu) { return NULL; }
|
|
static inline struct perf_event * __percpu *
|
|
register_wide_hw_breakpoint(struct perf_event_attr *attr,
|
|
perf_overflow_handler_t triggered,
|
|
void *context) { return NULL; }
|
|
static inline int
|
|
register_perf_hw_breakpoint(struct perf_event *bp) { return -ENOSYS; }
|
|
static inline int
|
|
__register_perf_hw_breakpoint(struct perf_event *bp) { return -ENOSYS; }
|
|
static inline void unregister_hw_breakpoint(struct perf_event *bp) { }
|
|
static inline void
|
|
unregister_wide_hw_breakpoint(struct perf_event * __percpu *cpu_events) { }
|
|
static inline int
|
|
reserve_bp_slot(struct perf_event *bp) {return -ENOSYS; }
|
|
static inline void release_bp_slot(struct perf_event *bp) { }
|
|
|
|
static inline void flush_ptrace_hw_breakpoint(struct task_struct *tsk) { }
|
|
|
|
static inline struct arch_hw_breakpoint *counter_arch_bp(struct perf_event *bp)
|
|
{
|
|
return NULL;
|
|
}
|
|
|
|
#endif /* CONFIG_HAVE_HW_BREAKPOINT */
|
|
#endif /* _LINUX_HW_BREAKPOINT_H */
|