forked from luck/tmp_suning_uos_patched
perf/core: Provide a kernel-internal interface to recalibrate event period
Currently, perf_event_period() is used by user tools via ioctl. Based on naming convention, exporting perf_event_period() for kernel users (such as KVM) who may recalibrate the event period for their assigned counter according to their requirements. The perf_event_period() is an external accessor, just like the perf_event_{en,dis}able() and should thus use perf_event_ctx_lock(). Suggested-by: Kan Liang <kan.liang@linux.intel.com> Signed-off-by: Like Xu <like.xu@linux.intel.com> Acked-by: Peter Zijlstra <peterz@infradead.org> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
02d496cfb8
commit
3ca270fc9e
|
@ -1336,6 +1336,7 @@ extern void perf_event_disable_local(struct perf_event *event);
|
||||||
extern void perf_event_disable_inatomic(struct perf_event *event);
|
extern void perf_event_disable_inatomic(struct perf_event *event);
|
||||||
extern void perf_event_task_tick(void);
|
extern void perf_event_task_tick(void);
|
||||||
extern int perf_event_account_interrupt(struct perf_event *event);
|
extern int perf_event_account_interrupt(struct perf_event *event);
|
||||||
|
extern int perf_event_period(struct perf_event *event, u64 value);
|
||||||
#else /* !CONFIG_PERF_EVENTS: */
|
#else /* !CONFIG_PERF_EVENTS: */
|
||||||
static inline void *
|
static inline void *
|
||||||
perf_aux_output_begin(struct perf_output_handle *handle,
|
perf_aux_output_begin(struct perf_output_handle *handle,
|
||||||
|
@ -1415,6 +1416,10 @@ static inline void perf_event_disable(struct perf_event *event) { }
|
||||||
static inline int __perf_event_disable(void *info) { return -1; }
|
static inline int __perf_event_disable(void *info) { return -1; }
|
||||||
static inline void perf_event_task_tick(void) { }
|
static inline void perf_event_task_tick(void) { }
|
||||||
static inline int perf_event_release_kernel(struct perf_event *event) { return 0; }
|
static inline int perf_event_release_kernel(struct perf_event *event) { return 0; }
|
||||||
|
static inline int perf_event_period(struct perf_event *event, u64 value)
|
||||||
|
{
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_INTEL)
|
#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_INTEL)
|
||||||
|
|
|
@ -5106,16 +5106,11 @@ static int perf_event_check_period(struct perf_event *event, u64 value)
|
||||||
return event->pmu->check_period(event, value);
|
return event->pmu->check_period(event, value);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int perf_event_period(struct perf_event *event, u64 __user *arg)
|
static int _perf_event_period(struct perf_event *event, u64 value)
|
||||||
{
|
{
|
||||||
u64 value;
|
|
||||||
|
|
||||||
if (!is_sampling_event(event))
|
if (!is_sampling_event(event))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
if (copy_from_user(&value, arg, sizeof(value)))
|
|
||||||
return -EFAULT;
|
|
||||||
|
|
||||||
if (!value)
|
if (!value)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
|
@ -5133,6 +5128,19 @@ static int perf_event_period(struct perf_event *event, u64 __user *arg)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int perf_event_period(struct perf_event *event, u64 value)
|
||||||
|
{
|
||||||
|
struct perf_event_context *ctx;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
ctx = perf_event_ctx_lock(event);
|
||||||
|
ret = _perf_event_period(event, value);
|
||||||
|
perf_event_ctx_unlock(event, ctx);
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(perf_event_period);
|
||||||
|
|
||||||
static const struct file_operations perf_fops;
|
static const struct file_operations perf_fops;
|
||||||
|
|
||||||
static inline int perf_fget_light(int fd, struct fd *p)
|
static inline int perf_fget_light(int fd, struct fd *p)
|
||||||
|
@ -5176,8 +5184,14 @@ static long _perf_ioctl(struct perf_event *event, unsigned int cmd, unsigned lon
|
||||||
return _perf_event_refresh(event, arg);
|
return _perf_event_refresh(event, arg);
|
||||||
|
|
||||||
case PERF_EVENT_IOC_PERIOD:
|
case PERF_EVENT_IOC_PERIOD:
|
||||||
return perf_event_period(event, (u64 __user *)arg);
|
{
|
||||||
|
u64 value;
|
||||||
|
|
||||||
|
if (copy_from_user(&value, (u64 __user *)arg, sizeof(value)))
|
||||||
|
return -EFAULT;
|
||||||
|
|
||||||
|
return _perf_event_period(event, value);
|
||||||
|
}
|
||||||
case PERF_EVENT_IOC_ID:
|
case PERF_EVENT_IOC_ID:
|
||||||
{
|
{
|
||||||
u64 id = primary_event_id(event);
|
u64 id = primary_event_id(event);
|
||||||
|
|
Loading…
Reference in New Issue
Block a user