perf/core: Fix data race between perf_event_set_output() and perf_mmap_close()

[ Upstream commit 68e3c69803dada336893640110cb87221bb01dcf ]

Yang Jihing reported a race between perf_event_set_output() and
perf_mmap_close():

	CPU1					CPU2

	perf_mmap_close(e2)
	  if (atomic_dec_and_test(&e2->rb->mmap_count)) // 1 - > 0
	    detach_rest = true

						ioctl(e1, IOC_SET_OUTPUT, e2)
						  perf_event_set_output(e1, e2)

	  ...
	  list_for_each_entry_rcu(e, &e2->rb->event_list, rb_entry)
	    ring_buffer_attach(e, NULL);
	    // e1 isn't yet added and
	    // therefore not detached

						    ring_buffer_attach(e1, e2->rb)
						      list_add_rcu(&e1->rb_entry,
								   &e2->rb->event_list)

After this; e1 is attached to an unmapped rb and a subsequent
perf_mmap() will loop forever more:

	again:
		mutex_lock(&e->mmap_mutex);
		if (event->rb) {
			...
			if (!atomic_inc_not_zero(&e->rb->mmap_count)) {
				...
				mutex_unlock(&e->mmap_mutex);
				goto again;
			}
		}

The loop in perf_mmap_close() holds e2->mmap_mutex, while the attach
in perf_event_set_output() holds e1->mmap_mutex. As such there is no
serialization to avoid this race.

Change perf_event_set_output() to take both e1->mmap_mutex and
e2->mmap_mutex to alleviate that problem. Additionally, have the loop
in perf_mmap() detach the rb directly, this avoids having to wait for
the concurrent perf_mmap_close() to get around to doing it to make
progress.

Fixes: 9bb5d40cd9 ("perf: Fix mmap() accounting hole")
Reported-by: Yang Jihong <yangjihong1@huawei.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Tested-by: Yang Jihong <yangjihong1@huawei.com>
Link: https://lkml.kernel.org/r/YsQ3jm2GR38SW7uD@worktop.programming.kicks-ass.net
Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:
Peter Zijlstra 2022-07-05 15:07:26 +02:00 committed by Greg Kroah-Hartman
parent 5694b162f2
commit 43128b3eee

View File

@ -6228,10 +6228,10 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
if (!atomic_inc_not_zero(&event->rb->mmap_count)) { if (!atomic_inc_not_zero(&event->rb->mmap_count)) {
/* /*
* Raced against perf_mmap_close() through * Raced against perf_mmap_close(); remove the
* perf_event_set_output(). Try again, hope for better * event and try again.
* luck.
*/ */
ring_buffer_attach(event, NULL);
mutex_unlock(&event->mmap_mutex); mutex_unlock(&event->mmap_mutex);
goto again; goto again;
} }
@ -11587,14 +11587,25 @@ static int perf_copy_attr(struct perf_event_attr __user *uattr,
goto out; goto out;
} }
static void mutex_lock_double(struct mutex *a, struct mutex *b)
{
if (b < a)
swap(a, b);
mutex_lock(a);
mutex_lock_nested(b, SINGLE_DEPTH_NESTING);
}
static int static int
perf_event_set_output(struct perf_event *event, struct perf_event *output_event) perf_event_set_output(struct perf_event *event, struct perf_event *output_event)
{ {
struct perf_buffer *rb = NULL; struct perf_buffer *rb = NULL;
int ret = -EINVAL; int ret = -EINVAL;
if (!output_event) if (!output_event) {
mutex_lock(&event->mmap_mutex);
goto set; goto set;
}
/* don't allow circular references */ /* don't allow circular references */
if (event == output_event) if (event == output_event)
@ -11632,8 +11643,15 @@ perf_event_set_output(struct perf_event *event, struct perf_event *output_event)
event->pmu != output_event->pmu) event->pmu != output_event->pmu)
goto out; goto out;
/*
* Hold both mmap_mutex to serialize against perf_mmap_close(). Since
* output_event is already on rb->event_list, and the list iteration
* restarts after every removal, it is guaranteed this new event is
* observed *OR* if output_event is already removed, it's guaranteed we
* observe !rb->mmap_count.
*/
mutex_lock_double(&event->mmap_mutex, &output_event->mmap_mutex);
set: set:
mutex_lock(&event->mmap_mutex);
/* Can't redirect output if we've got an active mmap() */ /* Can't redirect output if we've got an active mmap() */
if (atomic_read(&event->mmap_count)) if (atomic_read(&event->mmap_count))
goto unlock; goto unlock;
@ -11643,6 +11661,12 @@ perf_event_set_output(struct perf_event *event, struct perf_event *output_event)
rb = ring_buffer_get(output_event); rb = ring_buffer_get(output_event);
if (!rb) if (!rb)
goto unlock; goto unlock;
/* did we race against perf_mmap_close() */
if (!atomic_read(&rb->mmap_count)) {
ring_buffer_put(rb);
goto unlock;
}
} }
ring_buffer_attach(event, rb); ring_buffer_attach(event, rb);
@ -11650,20 +11674,13 @@ perf_event_set_output(struct perf_event *event, struct perf_event *output_event)
ret = 0; ret = 0;
unlock: unlock:
mutex_unlock(&event->mmap_mutex); mutex_unlock(&event->mmap_mutex);
if (output_event)
mutex_unlock(&output_event->mmap_mutex);
out: out:
return ret; return ret;
} }
static void mutex_lock_double(struct mutex *a, struct mutex *b)
{
if (b < a)
swap(a, b);
mutex_lock(a);
mutex_lock_nested(b, SINGLE_DEPTH_NESTING);
}
static int perf_event_set_clock(struct perf_event *event, clockid_t clk_id) static int perf_event_set_clock(struct perf_event *event, clockid_t clk_id)
{ {
bool nmi_safe = false; bool nmi_safe = false;