forked from luck/tmp_suning_uos_patched
libperf: Adopt perf_mmap__read_done() from tools/perf
Move perf_mmap__read_init() from tools/perf to libperf and export it in the perf/mmap.h header. Signed-off-by: Jiri Olsa <jolsa@kernel.org> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Michael Petlan <mpetlan@redhat.com> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Link: http://lore.kernel.org/lkml/20191007125344.14268-12-jolsa@kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
This commit is contained in:
parent
7c4d41824f
commit
32fdc2ca7e
|
@ -142,7 +142,7 @@ int test__perf_time_to_tsc(struct test *test __maybe_unused, int subtest __maybe
|
|||
next_event:
|
||||
perf_mmap__consume(&md->core);
|
||||
}
|
||||
perf_mmap__read_done(md);
|
||||
perf_mmap__read_done(&md->core);
|
||||
}
|
||||
|
||||
if (!comm1_time || !comm2_time)
|
||||
|
|
|
@ -794,7 +794,7 @@ static s64 perf_kvm__mmap_read_idx(struct perf_kvm_stat *kvm, int idx,
|
|||
break;
|
||||
}
|
||||
|
||||
perf_mmap__read_done(md);
|
||||
perf_mmap__read_done(&md->core);
|
||||
return n;
|
||||
}
|
||||
|
||||
|
|
|
@ -894,7 +894,7 @@ static void perf_top__mmap_read_idx(struct perf_top *top, int idx)
|
|||
}
|
||||
}
|
||||
|
||||
perf_mmap__read_done(md);
|
||||
perf_mmap__read_done(&md->core);
|
||||
}
|
||||
|
||||
static void perf_top__mmap_read(struct perf_top *top)
|
||||
|
|
|
@ -3821,7 +3821,7 @@ static int trace__run(struct trace *trace, int argc, const char **argv)
|
|||
draining = true;
|
||||
}
|
||||
}
|
||||
perf_mmap__read_done(md);
|
||||
perf_mmap__read_done(&md->core);
|
||||
}
|
||||
|
||||
if (trace->nr_events == before) {
|
||||
|
|
|
@ -8,5 +8,6 @@ struct perf_mmap;
|
|||
|
||||
LIBPERF_API void perf_mmap__consume(struct perf_mmap *map);
|
||||
LIBPERF_API int perf_mmap__read_init(struct perf_mmap *map);
|
||||
LIBPERF_API void perf_mmap__read_done(struct perf_mmap *map);
|
||||
|
||||
#endif /* __LIBPERF_MMAP_H */
|
||||
|
|
|
@ -42,6 +42,7 @@ LIBPERF_0.0.1 {
|
|||
perf_evlist__poll;
|
||||
perf_mmap__consume;
|
||||
perf_mmap__read_init;
|
||||
perf_mmap__read_done;
|
||||
local:
|
||||
*;
|
||||
};
|
||||
|
|
|
@ -175,3 +175,20 @@ int perf_mmap__read_init(struct perf_mmap *map)
|
|||
|
||||
return __perf_mmap__read_init(map);
|
||||
}
|
||||
|
||||
/*
|
||||
* Mandatory for overwrite mode
|
||||
* The direction of overwrite mode is backward.
|
||||
* The last perf_mmap__read() will set tail to map->core.prev.
|
||||
* Need to correct the map->core.prev to head which is the end of next read.
|
||||
*/
|
||||
void perf_mmap__read_done(struct perf_mmap *map)
|
||||
{
|
||||
/*
|
||||
* Check if event was unmapped due to a POLLHUP/POLLERR.
|
||||
*/
|
||||
if (!refcount_read(&map->refcnt))
|
||||
return;
|
||||
|
||||
map->prev = perf_mmap__read_head(map);
|
||||
}
|
||||
|
|
|
@ -54,7 +54,7 @@ static int count_samples(struct evlist *evlist, int *sample_count,
|
|||
return TEST_FAIL;
|
||||
}
|
||||
}
|
||||
perf_mmap__read_done(map);
|
||||
perf_mmap__read_done(&map->core);
|
||||
}
|
||||
return TEST_OK;
|
||||
}
|
||||
|
|
|
@ -194,7 +194,7 @@ static int do_test(struct bpf_object *obj, int (*func)(void),
|
|||
if (type == PERF_RECORD_SAMPLE)
|
||||
count ++;
|
||||
}
|
||||
perf_mmap__read_done(md);
|
||||
perf_mmap__read_done(&md->core);
|
||||
}
|
||||
|
||||
if (count != expect) {
|
||||
|
|
|
@ -435,7 +435,7 @@ static int process_events(struct machine *machine, struct evlist *evlist,
|
|||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
perf_mmap__read_done(md);
|
||||
perf_mmap__read_done(&md->core);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -49,7 +49,7 @@ static int find_comm(struct evlist *evlist, const char *comm)
|
|||
found += 1;
|
||||
perf_mmap__consume(&md->core);
|
||||
}
|
||||
perf_mmap__read_done(md);
|
||||
perf_mmap__read_done(&md->core);
|
||||
}
|
||||
return found;
|
||||
}
|
||||
|
|
|
@ -142,7 +142,7 @@ int test__basic_mmap(struct test *test __maybe_unused, int subtest __maybe_unuse
|
|||
nr_events[evsel->idx]++;
|
||||
perf_mmap__consume(&md->core);
|
||||
}
|
||||
perf_mmap__read_done(md);
|
||||
perf_mmap__read_done(&md->core);
|
||||
|
||||
out_init:
|
||||
err = 0;
|
||||
|
|
|
@ -124,7 +124,7 @@ int test__syscall_openat_tp_fields(struct test *test __maybe_unused, int subtest
|
|||
|
||||
goto out_ok;
|
||||
}
|
||||
perf_mmap__read_done(md);
|
||||
perf_mmap__read_done(&md->core);
|
||||
}
|
||||
|
||||
if (nr_events == before)
|
||||
|
|
|
@ -279,7 +279,7 @@ int test__PERF_RECORD(struct test *test __maybe_unused, int subtest __maybe_unus
|
|||
|
||||
perf_mmap__consume(&md->core);
|
||||
}
|
||||
perf_mmap__read_done(md);
|
||||
perf_mmap__read_done(&md->core);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -120,7 +120,7 @@ static int __test__sw_clock_freq(enum perf_sw_ids clock_id)
|
|||
next_event:
|
||||
perf_mmap__consume(&md->core);
|
||||
}
|
||||
perf_mmap__read_done(md);
|
||||
perf_mmap__read_done(&md->core);
|
||||
|
||||
out_init:
|
||||
if ((u64) nr_samples == total_periods) {
|
||||
|
|
|
@ -280,7 +280,7 @@ static int process_events(struct evlist *evlist,
|
|||
if (ret < 0)
|
||||
goto out_free_nodes;
|
||||
}
|
||||
perf_mmap__read_done(md);
|
||||
perf_mmap__read_done(&md->core);
|
||||
}
|
||||
|
||||
events_array = calloc(cnt, sizeof(struct event_node));
|
||||
|
|
|
@ -127,7 +127,7 @@ int test__task_exit(struct test *test __maybe_unused, int subtest __maybe_unused
|
|||
|
||||
perf_mmap__consume(&md->core);
|
||||
}
|
||||
perf_mmap__read_done(md);
|
||||
perf_mmap__read_done(&md->core);
|
||||
|
||||
out_init:
|
||||
if (!exited || !nr_exit) {
|
||||
|
|
|
@ -1822,7 +1822,7 @@ static void *perf_evlist__poll_thread(void *arg)
|
|||
perf_mmap__consume(&map->core);
|
||||
got_data = true;
|
||||
}
|
||||
perf_mmap__read_done(map);
|
||||
perf_mmap__read_done(&map->core);
|
||||
}
|
||||
|
||||
if (draining && !got_data)
|
||||
|
|
|
@ -405,20 +405,3 @@ int perf_mmap__push(struct mmap *md, void *to,
|
|||
out:
|
||||
return rc;
|
||||
}
|
||||
|
||||
/*
|
||||
* Mandatory for overwrite mode
|
||||
* The direction of overwrite mode is backward.
|
||||
* The last perf_mmap__read() will set tail to map->core.prev.
|
||||
* Need to correct the map->core.prev to head which is the end of next read.
|
||||
*/
|
||||
void perf_mmap__read_done(struct mmap *map)
|
||||
{
|
||||
/*
|
||||
* Check if event was unmapped due to a POLLHUP/POLLERR.
|
||||
*/
|
||||
if (!refcount_read(&map->core.refcnt))
|
||||
return;
|
||||
|
||||
map->core.prev = perf_mmap__read_head(&map->core);
|
||||
}
|
||||
|
|
|
@ -54,5 +54,4 @@ int perf_mmap__push(struct mmap *md, void *to,
|
|||
|
||||
size_t mmap__mmap_len(struct mmap *map);
|
||||
|
||||
void perf_mmap__read_done(struct mmap *map);
|
||||
#endif /*__PERF_MMAP_H */
|
||||
|
|
Loading…
Reference in New Issue
Block a user