perf_events: Move code around to prepare for cgroup

In particular this patch move perf_event_exit_task() before
cgroup_exit() to allow for cgroup support. The cgroup_exit()
function detaches the cgroups attached to a task.

Other movements include hoisting some definitions and inlines
at the top of perf_event.c

Signed-off-by: Stephane Eranian <eranian@google.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <4d22058b.cdace30a.4657.ffff95b1@mx.google.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
Stephane Eranian 2011-01-03 18:20:01 +02:00 committed by Ingo Molnar
parent 23a2f3ab46
commit 0b3fcf178d
2 changed files with 26 additions and 16 deletions

View File

@ -994,6 +994,15 @@ NORET_TYPE void do_exit(long code)
exit_fs(tsk); exit_fs(tsk);
check_stack_usage(); check_stack_usage();
exit_thread(); exit_thread();
/*
* Flush inherited counters to the parent - before the parent
* gets woken up by child-exit notifications.
*
* because of cgroup mode, must be called before cgroup_exit()
*/
perf_event_exit_task(tsk);
cgroup_exit(tsk, 1); cgroup_exit(tsk, 1);
if (group_dead) if (group_dead)
@ -1007,11 +1016,6 @@ NORET_TYPE void do_exit(long code)
* FIXME: do that only when needed, using sched_exit tracepoint * FIXME: do that only when needed, using sched_exit tracepoint
*/ */
flush_ptrace_hw_breakpoint(tsk); flush_ptrace_hw_breakpoint(tsk);
/*
* Flush inherited counters to the parent - before the parent
* gets woken up by child-exit notifications.
*/
perf_event_exit_task(tsk);
exit_notify(tsk, group_dead); exit_notify(tsk, group_dead);
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA

View File

@ -38,6 +38,12 @@
#include <asm/irq_regs.h> #include <asm/irq_regs.h>
enum event_type_t {
EVENT_FLEXIBLE = 0x1,
EVENT_PINNED = 0x2,
EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED,
};
atomic_t perf_task_events __read_mostly; atomic_t perf_task_events __read_mostly;
static atomic_t nr_mmap_events __read_mostly; static atomic_t nr_mmap_events __read_mostly;
static atomic_t nr_comm_events __read_mostly; static atomic_t nr_comm_events __read_mostly;
@ -65,6 +71,12 @@ int sysctl_perf_event_sample_rate __read_mostly = 100000;
static atomic64_t perf_event_id; static atomic64_t perf_event_id;
static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
enum event_type_t event_type);
static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
enum event_type_t event_type);
void __weak perf_event_print_debug(void) { } void __weak perf_event_print_debug(void) { }
extern __weak const char *perf_pmu_name(void) extern __weak const char *perf_pmu_name(void)
@ -72,6 +84,11 @@ extern __weak const char *perf_pmu_name(void)
return "pmu"; return "pmu";
} }
static inline u64 perf_clock(void)
{
return local_clock();
}
void perf_pmu_disable(struct pmu *pmu) void perf_pmu_disable(struct pmu *pmu)
{ {
int *count = this_cpu_ptr(pmu->pmu_disable_count); int *count = this_cpu_ptr(pmu->pmu_disable_count);
@ -240,11 +257,6 @@ static void perf_unpin_context(struct perf_event_context *ctx)
put_ctx(ctx); put_ctx(ctx);
} }
static inline u64 perf_clock(void)
{
return local_clock();
}
/* /*
* Update the record of the current time in a context. * Update the record of the current time in a context.
*/ */
@ -1193,12 +1205,6 @@ static int perf_event_refresh(struct perf_event *event, int refresh)
return 0; return 0;
} }
enum event_type_t {
EVENT_FLEXIBLE = 0x1,
EVENT_PINNED = 0x2,
EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED,
};
static void ctx_sched_out(struct perf_event_context *ctx, static void ctx_sched_out(struct perf_event_context *ctx,
struct perf_cpu_context *cpuctx, struct perf_cpu_context *cpuctx,
enum event_type_t event_type) enum event_type_t event_type)