forked from luck/tmp_suning_uos_patched
perf/core: Simpify perf_event_groups_for_each()
The last argument is, and always must be, the same. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Acked-by: Mark Rutland <mark.rutland@arm.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Alexey Budankov <alexey.budankov@linux.intel.com> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: David Carrillo-Cisneros <davidcc@google.com> Cc: Dmitri Prokhorov <Dmitry.Prohorov@intel.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Kan Liang <kan.liang@intel.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Stephane Eranian <eranian@google.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Valery Cherepennikov <valery.cherepennikov@intel.com> Cc: Vince Weaver <vincent.weaver@maine.edu> Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
6668128a9e
commit
6e6804d2fa
|
@ -1642,11 +1642,11 @@ perf_event_groups_rotate(struct perf_event_groups *groups, int cpu)
|
|||
/*
|
||||
* Iterate through the whole groups tree.
|
||||
*/
|
||||
#define perf_event_groups_for_each(event, groups, node) \
|
||||
for (event = rb_entry_safe(rb_first(&((groups)->tree)), \
|
||||
typeof(*event), node); event; \
|
||||
event = rb_entry_safe(rb_next(&event->node), \
|
||||
typeof(*event), node))
|
||||
#define perf_event_groups_for_each(event, groups) \
|
||||
for (event = rb_entry_safe(rb_first(&((groups)->tree)), \
|
||||
typeof(*event), group_node); event; \
|
||||
event = rb_entry_safe(rb_next(&event->group_node), \
|
||||
typeof(*event), group_node))
|
||||
|
||||
/*
|
||||
* Add a event from the lists for its context.
|
||||
|
@ -11345,7 +11345,7 @@ static int perf_event_init_context(struct task_struct *child, int ctxn)
|
|||
* We dont have to disable NMIs - we are only looking at
|
||||
* the list, not manipulating it:
|
||||
*/
|
||||
perf_event_groups_for_each(event, &parent_ctx->pinned_groups, group_node) {
|
||||
perf_event_groups_for_each(event, &parent_ctx->pinned_groups) {
|
||||
ret = inherit_task_group(event, parent, parent_ctx,
|
||||
child, ctxn, &inherited_all);
|
||||
if (ret)
|
||||
|
@ -11361,7 +11361,7 @@ static int perf_event_init_context(struct task_struct *child, int ctxn)
|
|||
parent_ctx->rotate_disable = 1;
|
||||
raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
|
||||
|
||||
perf_event_groups_for_each(event, &parent_ctx->flexible_groups, group_node) {
|
||||
perf_event_groups_for_each(event, &parent_ctx->flexible_groups) {
|
||||
ret = inherit_task_group(event, parent, parent_ctx,
|
||||
child, ctxn, &inherited_all);
|
||||
if (ret)
|
||||
|
|
Loading…
Reference in New Issue
Block a user