forked from luck/tmp_suning_uos_patched
libperf: Adopt perf_mmap__consume() function from tools/perf
Move perf_mmap__consume() vrom tools/perf to libperf and export it in the perf/mmap.h header. Move also the needed helpers perf_mmap__write_tail(), perf_mmap__read_head() and perf_mmap__empty(). Signed-off-by: Jiri Olsa <jolsa@kernel.org> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Michael Petlan <mpetlan@redhat.com> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Link: http://lore.kernel.org/lkml/20191007125344.14268-10-jolsa@kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
This commit is contained in:
parent
1d40ae4e17
commit
7728fa0cfa
|
@ -9,6 +9,7 @@
|
|||
#include <sys/prctl.h>
|
||||
#include <perf/cpumap.h>
|
||||
#include <perf/evlist.h>
|
||||
#include <perf/mmap.h>
|
||||
|
||||
#include "debug.h"
|
||||
#include "parse-events.h"
|
||||
|
@ -139,7 +140,7 @@ int test__perf_time_to_tsc(struct test *test __maybe_unused, int subtest __maybe
|
|||
comm2_time = sample.time;
|
||||
}
|
||||
next_event:
|
||||
perf_mmap__consume(md);
|
||||
perf_mmap__consume(&md->core);
|
||||
}
|
||||
perf_mmap__read_done(md);
|
||||
}
|
||||
|
|
|
@ -46,6 +46,7 @@
|
|||
#include <semaphore.h>
|
||||
#include <signal.h>
|
||||
#include <math.h>
|
||||
#include <perf/mmap.h>
|
||||
|
||||
static const char *get_filename_for_perf_kvm(void)
|
||||
{
|
||||
|
@ -766,7 +767,7 @@ static s64 perf_kvm__mmap_read_idx(struct perf_kvm_stat *kvm, int idx,
|
|||
while ((event = perf_mmap__read_event(md)) != NULL) {
|
||||
err = perf_evlist__parse_sample_timestamp(evlist, event, ×tamp);
|
||||
if (err) {
|
||||
perf_mmap__consume(md);
|
||||
perf_mmap__consume(&md->core);
|
||||
pr_err("Failed to parse sample\n");
|
||||
return -1;
|
||||
}
|
||||
|
@ -776,7 +777,7 @@ static s64 perf_kvm__mmap_read_idx(struct perf_kvm_stat *kvm, int idx,
|
|||
* FIXME: Here we can't consume the event, as perf_session__queue_event will
|
||||
* point to it, and it'll get possibly overwritten by the kernel.
|
||||
*/
|
||||
perf_mmap__consume(md);
|
||||
perf_mmap__consume(&md->core);
|
||||
|
||||
if (err) {
|
||||
pr_err("Failed to enqueue sample: %d\n", err);
|
||||
|
|
|
@ -82,6 +82,7 @@
|
|||
#include <linux/err.h>
|
||||
|
||||
#include <linux/ctype.h>
|
||||
#include <perf/mmap.h>
|
||||
|
||||
static volatile int done;
|
||||
static volatile int resize;
|
||||
|
@ -883,7 +884,7 @@ static void perf_top__mmap_read_idx(struct perf_top *top, int idx)
|
|||
if (ret)
|
||||
break;
|
||||
|
||||
perf_mmap__consume(md);
|
||||
perf_mmap__consume(&md->core);
|
||||
|
||||
if (top->qe.rotate) {
|
||||
pthread_mutex_lock(&top->qe.mutex);
|
||||
|
|
|
@ -77,6 +77,7 @@
|
|||
#include <sys/sysmacros.h>
|
||||
|
||||
#include <linux/ctype.h>
|
||||
#include <perf/mmap.h>
|
||||
|
||||
#ifndef O_CLOEXEC
|
||||
# define O_CLOEXEC 02000000
|
||||
|
@ -3810,7 +3811,7 @@ static int trace__run(struct trace *trace, int argc, const char **argv)
|
|||
if (err)
|
||||
goto out_disable;
|
||||
|
||||
perf_mmap__consume(md);
|
||||
perf_mmap__consume(&md->core);
|
||||
|
||||
if (interrupted)
|
||||
goto out_disable;
|
||||
|
|
|
@ -172,8 +172,9 @@ install_headers:
|
|||
$(call do_install,include/perf/cpumap.h,$(prefix)/include/perf,644); \
|
||||
$(call do_install,include/perf/threadmap.h,$(prefix)/include/perf,644); \
|
||||
$(call do_install,include/perf/evlist.h,$(prefix)/include/perf,644); \
|
||||
$(call do_install,include/perf/evsel.h,$(prefix)/include/perf,644);
|
||||
$(call do_install,include/perf/event.h,$(prefix)/include/perf,644);
|
||||
$(call do_install,include/perf/evsel.h,$(prefix)/include/perf,644); \
|
||||
$(call do_install,include/perf/event.h,$(prefix)/include/perf,644); \
|
||||
$(call do_install,include/perf/mmap.h,$(prefix)/include/perf,644);
|
||||
|
||||
install_pkgconfig: $(LIBPERF_PC)
|
||||
$(call QUIET_INSTALL, $(LIBPERF_PC)) \
|
||||
|
|
|
@ -49,4 +49,6 @@ void perf_mmap__munmap(struct perf_mmap *map);
|
|||
void perf_mmap__get(struct perf_mmap *map);
|
||||
void perf_mmap__put(struct perf_mmap *map);
|
||||
|
||||
u64 perf_mmap__read_head(struct perf_mmap *map);
|
||||
|
||||
#endif /* __LIBPERF_INTERNAL_MMAP_H */
|
||||
|
|
11
tools/perf/lib/include/perf/mmap.h
Normal file
11
tools/perf/lib/include/perf/mmap.h
Normal file
|
@ -0,0 +1,11 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef __LIBPERF_MMAP_H
|
||||
#define __LIBPERF_MMAP_H
|
||||
|
||||
#include <perf/core.h>
|
||||
|
||||
struct perf_mmap;
|
||||
|
||||
LIBPERF_API void perf_mmap__consume(struct perf_mmap *map);
|
||||
|
||||
#endif /* __LIBPERF_MMAP_H */
|
|
@ -40,6 +40,7 @@ LIBPERF_0.0.1 {
|
|||
perf_evlist__next;
|
||||
perf_evlist__set_maps;
|
||||
perf_evlist__poll;
|
||||
perf_mmap__consume;
|
||||
local:
|
||||
*;
|
||||
};
|
||||
|
|
|
@ -1,5 +1,8 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include <sys/mman.h>
|
||||
#include <linux/ring_buffer.h>
|
||||
#include <linux/perf_event.h>
|
||||
#include <perf/mmap.h>
|
||||
#include <internal/mmap.h>
|
||||
#include <internal/lib.h>
|
||||
#include <linux/kernel.h>
|
||||
|
@ -59,3 +62,32 @@ void perf_mmap__put(struct perf_mmap *map)
|
|||
if (refcount_dec_and_test(&map->refcnt))
|
||||
perf_mmap__munmap(map);
|
||||
}
|
||||
|
||||
static inline void perf_mmap__write_tail(struct perf_mmap *md, u64 tail)
|
||||
{
|
||||
ring_buffer_write_tail(md->base, tail);
|
||||
}
|
||||
|
||||
u64 perf_mmap__read_head(struct perf_mmap *map)
|
||||
{
|
||||
return ring_buffer_read_head(map->base);
|
||||
}
|
||||
|
||||
static bool perf_mmap__empty(struct perf_mmap *map)
|
||||
{
|
||||
struct perf_event_mmap_page *pc = map->base;
|
||||
|
||||
return perf_mmap__read_head(map) == map->prev && !pc->aux_size;
|
||||
}
|
||||
|
||||
void perf_mmap__consume(struct perf_mmap *map)
|
||||
{
|
||||
if (!map->overwrite) {
|
||||
u64 old = map->prev;
|
||||
|
||||
perf_mmap__write_tail(map, old);
|
||||
}
|
||||
|
||||
if (refcount_read(&map->refcnt) == 1 && perf_mmap__empty(map))
|
||||
perf_mmap__put(map);
|
||||
}
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
#include "util/mmap.h"
|
||||
#include <errno.h>
|
||||
#include <linux/string.h>
|
||||
#include <perf/mmap.h>
|
||||
|
||||
#define NR_ITERS 111
|
||||
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
#include <linux/string.h>
|
||||
#include <api/fs/fs.h>
|
||||
#include <bpf/bpf.h>
|
||||
#include <perf/mmap.h>
|
||||
#include "tests.h"
|
||||
#include "llvm.h"
|
||||
#include "debug.h"
|
||||
|
|
|
@ -10,6 +10,7 @@
|
|||
#include <sys/param.h>
|
||||
#include <perf/cpumap.h>
|
||||
#include <perf/evlist.h>
|
||||
#include <perf/mmap.h>
|
||||
|
||||
#include "debug.h"
|
||||
#include "dso.h"
|
||||
|
@ -430,7 +431,7 @@ static int process_events(struct machine *machine, struct evlist *evlist,
|
|||
|
||||
while ((event = perf_mmap__read_event(md)) != NULL) {
|
||||
ret = process_event(machine, evlist, event, state);
|
||||
perf_mmap__consume(md);
|
||||
perf_mmap__consume(&md->core);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -5,6 +5,7 @@
|
|||
#include <sys/prctl.h>
|
||||
#include <perf/cpumap.h>
|
||||
#include <perf/evlist.h>
|
||||
#include <perf/mmap.h>
|
||||
|
||||
#include "debug.h"
|
||||
#include "parse-events.h"
|
||||
|
@ -46,7 +47,7 @@ static int find_comm(struct evlist *evlist, const char *comm)
|
|||
(pid_t)event->comm.tid == getpid() &&
|
||||
strcmp(event->comm.comm, comm) == 0)
|
||||
found += 1;
|
||||
perf_mmap__consume(md);
|
||||
perf_mmap__consume(&md->core);
|
||||
}
|
||||
perf_mmap__read_done(md);
|
||||
}
|
||||
|
|
|
@ -16,6 +16,7 @@
|
|||
#include <linux/kernel.h>
|
||||
#include <linux/string.h>
|
||||
#include <perf/evlist.h>
|
||||
#include <perf/mmap.h>
|
||||
|
||||
/*
|
||||
* This test will generate random numbers of calls to some getpid syscalls,
|
||||
|
@ -139,7 +140,7 @@ int test__basic_mmap(struct test *test __maybe_unused, int subtest __maybe_unuse
|
|||
goto out_delete_evlist;
|
||||
}
|
||||
nr_events[evsel->idx]++;
|
||||
perf_mmap__consume(md);
|
||||
perf_mmap__consume(&md->core);
|
||||
}
|
||||
perf_mmap__read_done(md);
|
||||
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
#include "debug.h"
|
||||
#include "util/mmap.h"
|
||||
#include <errno.h>
|
||||
#include <perf/mmap.h>
|
||||
|
||||
#ifndef O_DIRECTORY
|
||||
#define O_DIRECTORY 00200000
|
||||
|
@ -103,7 +104,7 @@ int test__syscall_openat_tp_fields(struct test *test __maybe_unused, int subtest
|
|||
++nr_events;
|
||||
|
||||
if (type != PERF_RECORD_SAMPLE) {
|
||||
perf_mmap__consume(md);
|
||||
perf_mmap__consume(&md->core);
|
||||
continue;
|
||||
}
|
||||
|
||||
|
|
|
@ -6,6 +6,7 @@
|
|||
#include <pthread.h>
|
||||
|
||||
#include <sched.h>
|
||||
#include <perf/mmap.h>
|
||||
#include "evlist.h"
|
||||
#include "evsel.h"
|
||||
#include "debug.h"
|
||||
|
@ -276,7 +277,7 @@ int test__PERF_RECORD(struct test *test __maybe_unused, int subtest __maybe_unus
|
|||
++errs;
|
||||
}
|
||||
|
||||
perf_mmap__consume(md);
|
||||
perf_mmap__consume(&md->core);
|
||||
}
|
||||
perf_mmap__read_done(md);
|
||||
}
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
#include "util/mmap.h"
|
||||
#include "util/thread_map.h"
|
||||
#include <perf/evlist.h>
|
||||
#include <perf/mmap.h>
|
||||
|
||||
#define NR_LOOPS 10000000
|
||||
|
||||
|
@ -117,7 +118,7 @@ static int __test__sw_clock_freq(enum perf_sw_ids clock_id)
|
|||
total_periods += sample.period;
|
||||
nr_samples++;
|
||||
next_event:
|
||||
perf_mmap__consume(md);
|
||||
perf_mmap__consume(&md->core);
|
||||
}
|
||||
perf_mmap__read_done(md);
|
||||
|
||||
|
|
|
@ -8,6 +8,7 @@
|
|||
#include <linux/zalloc.h>
|
||||
#include <perf/cpumap.h>
|
||||
#include <perf/evlist.h>
|
||||
#include <perf/mmap.h>
|
||||
|
||||
#include "debug.h"
|
||||
#include "parse-events.h"
|
||||
|
@ -275,7 +276,7 @@ static int process_events(struct evlist *evlist,
|
|||
while ((event = perf_mmap__read_event(md)) != NULL) {
|
||||
cnt += 1;
|
||||
ret = add_event(evlist, &events, event);
|
||||
perf_mmap__consume(md);
|
||||
perf_mmap__consume(&md->core);
|
||||
if (ret < 0)
|
||||
goto out_free_nodes;
|
||||
}
|
||||
|
|
|
@ -12,6 +12,7 @@
|
|||
#include <linux/string.h>
|
||||
#include <perf/cpumap.h>
|
||||
#include <perf/evlist.h>
|
||||
#include <perf/mmap.h>
|
||||
|
||||
static int exited;
|
||||
static int nr_exit;
|
||||
|
@ -124,7 +125,7 @@ int test__task_exit(struct test *test __maybe_unused, int subtest __maybe_unused
|
|||
if (event->header.type == PERF_RECORD_EXIT)
|
||||
nr_exit++;
|
||||
|
||||
perf_mmap__consume(md);
|
||||
perf_mmap__consume(&md->core);
|
||||
}
|
||||
perf_mmap__read_done(md);
|
||||
|
||||
|
|
|
@ -42,6 +42,7 @@
|
|||
#include <perf/evlist.h>
|
||||
#include <perf/evsel.h>
|
||||
#include <perf/cpumap.h>
|
||||
#include <perf/mmap.h>
|
||||
|
||||
#include <internal/xyarray.h>
|
||||
|
||||
|
@ -1818,7 +1819,7 @@ static void *perf_evlist__poll_thread(void *arg)
|
|||
else
|
||||
pr_warning("cannot locate proper evsel for the side band event\n");
|
||||
|
||||
perf_mmap__consume(map);
|
||||
perf_mmap__consume(&map->core);
|
||||
got_data = true;
|
||||
}
|
||||
perf_mmap__read_done(map);
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <unistd.h> // sysconf()
|
||||
#include <perf/mmap.h>
|
||||
#ifdef HAVE_LIBNUMA_SUPPORT
|
||||
#include <numaif.h>
|
||||
#endif
|
||||
|
@ -95,7 +96,7 @@ union perf_event *perf_mmap__read_event(struct mmap *map)
|
|||
|
||||
/* non-overwirte doesn't pause the ringbuffer */
|
||||
if (!map->core.overwrite)
|
||||
map->core.end = perf_mmap__read_head(map);
|
||||
map->core.end = perf_mmap__read_head(&map->core);
|
||||
|
||||
event = perf_mmap__read(map, &map->core.start, map->core.end);
|
||||
|
||||
|
@ -105,25 +106,6 @@ union perf_event *perf_mmap__read_event(struct mmap *map)
|
|||
return event;
|
||||
}
|
||||
|
||||
static bool perf_mmap__empty(struct mmap *map)
|
||||
{
|
||||
struct perf_event_mmap_page *pc = map->core.base;
|
||||
|
||||
return perf_mmap__read_head(map) == map->core.prev && !pc->aux_size;
|
||||
}
|
||||
|
||||
void perf_mmap__consume(struct mmap *map)
|
||||
{
|
||||
if (!map->core.overwrite) {
|
||||
u64 old = map->core.prev;
|
||||
|
||||
perf_mmap__write_tail(map, old);
|
||||
}
|
||||
|
||||
if (refcount_read(&map->core.refcnt) == 1 && perf_mmap__empty(map))
|
||||
perf_mmap__put(&map->core);
|
||||
}
|
||||
|
||||
int __weak auxtrace_mmap__mmap(struct auxtrace_mmap *mm __maybe_unused,
|
||||
struct auxtrace_mmap_params *mp __maybe_unused,
|
||||
void *userpg __maybe_unused,
|
||||
|
@ -420,7 +402,7 @@ static int overwrite_rb_find_range(void *buf, int mask, u64 *start, u64 *end)
|
|||
*/
|
||||
static int __perf_mmap__read_init(struct mmap *md)
|
||||
{
|
||||
u64 head = perf_mmap__read_head(md);
|
||||
u64 head = perf_mmap__read_head(&md->core);
|
||||
u64 old = md->core.prev;
|
||||
unsigned char *data = md->core.base + page_size;
|
||||
unsigned long size;
|
||||
|
@ -437,7 +419,7 @@ static int __perf_mmap__read_init(struct mmap *md)
|
|||
WARN_ONCE(1, "failed to keep up with mmap data. (warn only once)\n");
|
||||
|
||||
md->core.prev = head;
|
||||
perf_mmap__consume(md);
|
||||
perf_mmap__consume(&md->core);
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
|
@ -466,7 +448,7 @@ int perf_mmap__read_init(struct mmap *map)
|
|||
int perf_mmap__push(struct mmap *md, void *to,
|
||||
int push(struct mmap *map, void *to, void *buf, size_t size))
|
||||
{
|
||||
u64 head = perf_mmap__read_head(md);
|
||||
u64 head = perf_mmap__read_head(&md->core);
|
||||
unsigned char *data = md->core.base + page_size;
|
||||
unsigned long size;
|
||||
void *buf;
|
||||
|
@ -499,7 +481,7 @@ int perf_mmap__push(struct mmap *md, void *to,
|
|||
}
|
||||
|
||||
md->core.prev = head;
|
||||
perf_mmap__consume(md);
|
||||
perf_mmap__consume(&md->core);
|
||||
out:
|
||||
return rc;
|
||||
}
|
||||
|
@ -518,5 +500,5 @@ void perf_mmap__read_done(struct mmap *map)
|
|||
if (!refcount_read(&map->core.refcnt))
|
||||
return;
|
||||
|
||||
map->core.prev = perf_mmap__read_head(map);
|
||||
map->core.prev = perf_mmap__read_head(&map->core);
|
||||
}
|
||||
|
|
|
@ -45,18 +45,6 @@ struct mmap_params {
|
|||
int mmap__mmap(struct mmap *map, struct mmap_params *mp, int fd, int cpu);
|
||||
void mmap__munmap(struct mmap *map);
|
||||
|
||||
void perf_mmap__consume(struct mmap *map);
|
||||
|
||||
static inline u64 perf_mmap__read_head(struct mmap *mm)
|
||||
{
|
||||
return ring_buffer_read_head(mm->core.base);
|
||||
}
|
||||
|
||||
static inline void perf_mmap__write_tail(struct mmap *md, u64 tail)
|
||||
{
|
||||
ring_buffer_write_tail(md->core.base, tail);
|
||||
}
|
||||
|
||||
union perf_event *perf_mmap__read_forward(struct mmap *map);
|
||||
|
||||
union perf_event *perf_mmap__read_event(struct mmap *map);
|
||||
|
|
|
@ -6,6 +6,7 @@
|
|||
#include <linux/err.h>
|
||||
#include <perf/cpumap.h>
|
||||
#include <traceevent/event-parse.h>
|
||||
#include <perf/mmap.h>
|
||||
#include "evlist.h"
|
||||
#include "callchain.h"
|
||||
#include "evsel.h"
|
||||
|
@ -1045,7 +1046,7 @@ static PyObject *pyrf_evlist__read_on_cpu(struct pyrf_evlist *pevlist,
|
|||
err = perf_evsel__parse_sample(evsel, event, &pevent->sample);
|
||||
|
||||
/* Consume the even only after we parsed it out. */
|
||||
perf_mmap__consume(md);
|
||||
perf_mmap__consume(&md->core);
|
||||
|
||||
if (err)
|
||||
return PyErr_Format(PyExc_OSError,
|
||||
|
|
Loading…
Reference in New Issue
Block a user