forked from luck/tmp_suning_uos_patched
30a910d7d3
We'll continue reading its details from tracefs as we need it, but preallocate the whole thing otherwise we may realloc and end up with pointers to the previous buffer. I.e. in an upcoming algorithm we'll look for syscalls that have function signatures that are similar to a given syscall to see if we can reuse its BPF augmenter, so we may be at syscall 42, having a 'struct syscall' pointing to that slot in trace->syscalls.table[] and try to read the slot for an yet unread syscall, which would realloc that table to read the info for syscall 43, say, which would trigger a realoc of trace->syscalls.table[], and then the pointer we had for syscall 42 would be pointing to the previous block of memory. b00m. Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Brendan Gregg <brendan.d.gregg@gmail.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Luis Cláudio Gonçalves <lclaudio@redhat.com> Cc: Namhyung Kim <namhyung@kernel.org> Link: https://lkml.kernel.org/n/tip-m3cjzzifibs13imafhkk77a0@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
26 lines
679 B
C
26 lines
679 B
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef __PERF_SYSCALLTBL_H
|
|
#define __PERF_SYSCALLTBL_H
|
|
|
|
struct syscalltbl {
|
|
union {
|
|
int audit_machine;
|
|
struct {
|
|
int max_id;
|
|
int nr_entries;
|
|
void *entries;
|
|
} syscalls;
|
|
};
|
|
};
|
|
|
|
struct syscalltbl *syscalltbl__new(void);
|
|
void syscalltbl__delete(struct syscalltbl *tbl);
|
|
|
|
const char *syscalltbl__name(const struct syscalltbl *tbl, int id);
|
|
int syscalltbl__id(struct syscalltbl *tbl, const char *name);
|
|
|
|
int syscalltbl__strglobmatch_first(struct syscalltbl *tbl, const char *syscall_glob, int *idx);
|
|
int syscalltbl__strglobmatch_next(struct syscalltbl *tbl, const char *syscall_glob, int *idx);
|
|
|
|
#endif /* __PERF_SYSCALLTBL_H */
|