perf/core improvements and fixes:

- Update vendor events JSON metrics for Intel's Broadwell, Broadwell
   Server, Haswell, Haswell Server, IvyBridge, IvyTown, JakeTown, Sandy
   Bridge, Skylake and SkyLake Server (Andi Kleen)
 
 - Add vendor event file for Intel's Goldmont Plus V1 (Kan Liang)
 
 - Move perf_mmap methods from 'perf record' and evlist.c to a separate
   mmap.[ch] pair, to better separate things and pave the way for further
   work on multithreading tools (Arnaldo Carvalho de Melo)
 
 - Do not check ABI headers in a detached tarball build, as it the kernel
   headers from where we copied tools/include/ are by definition not
   available (Arnaldo Carvalho de Melo)
 
 - Make 'perf script' use fprintf() like printing, i.e. receiving a FILE
   pointer so that it gets consistent with other tools/ code and allows
   for printing to per-event files (Arnaldo Carvalho de Melo)
 
 - Error handling fixes (resource release on exit) for 'perf script'
   and 'perf kmem' (Christophe JAILLET)
 
 - Make some 'perf event attr' tests optional on virtual machines, where
   tested counters are not available (Jiri Olsa)
 
 Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEELb9bqkb7Te0zijNb1lAW81NSqkAFAlnufiQACgkQ1lAW81NS
 qkDLyBAAq9Q8acmhPIq5SVIpbr9CgglpJDK0MbWKH9d/sY7v3HaaK64yz+Zp5MlF
 djbulHl5rZwSatnFH31yxs5uYRaVOUAKrJMtBUMGGfSh9oqOeEfUA4LItBF72z+q
 U65Zje4/iH/M5lxB91er3jAoPFQPQ8brWsHXLHFjncMdovdz9EoOjq43UAfGTWtU
 SOhRLgQN6qLIA+vjDGBOAeI4k0pIB2zZt0I2Hs96RehOX0vDn2CmkmFzwdDXIv/5
 kSB/1qr2RCXl7FSB6Spnd6sWtRYUPhjYZhcvjQy3F/Qf/KgYwlcJ2PTJdnyFnpdy
 Vn5DUviDybmFWeiGOy/aPa8gXvQET82DiKh3UfkEw1CA1jlES02JQq5l914OJHgQ
 +s/BJ6KtcLaPDswaDevp+GCwm5JlZ1KKxwEgx6/HfueIqbv20IbpXe9bBRkqbgPp
 oEsvH3tsM+ePATZpe9iYgM1QT3f8fAvRCxDwRHan82EFpuM4zggniO13sDZOIKq8
 DkUA+gYYPMfbmUXwf4ZNoIaQjyvjXiHiokJVXiHRjuYV/uKvoCjzTKRJ67bzG0zF
 Yz+YD+6VO3LQ54FXdo5guH+s2OQZZmcGVfiGrID8TD25FouAD72fA1GTRNxtzBaU
 /lqZiFYPxw4BOkArze2Uxc76XukhKh0q9cUCZP+O/9QfH8XzfoY=
 =0saa
 -----END PGP SIGNATURE-----

Merge tag 'perf-core-for-mingo-4.15-20171023' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux into perf/core

Pull perf/core improvements and fixes from Arnaldo Carvalho de Melo:

 - Update vendor events JSON metrics for Intel's Broadwell, Broadwell
   Server, Haswell, Haswell Server, IvyBridge, IvyTown, JakeTown, Sandy
   Bridge, Skylake and SkyLake Server (Andi Kleen)

 - Add vendor event file for Intel's Goldmont Plus V1 (Kan Liang)

 - Move perf_mmap methods from 'perf record' and evlist.c to a separate
   mmap.[ch] pair, to better separate things and pave the way for further
   work on multithreading tools (Arnaldo Carvalho de Melo)

 - Do not check ABI headers in a detached tarball build, as it the kernel
   headers from where we copied tools/include/ are by definition not
   available (Arnaldo Carvalho de Melo)

 - Make 'perf script' use fprintf() like printing, i.e. receiving a FILE
   pointer so that it gets consistent with other tools/ code and allows
   for printing to per-event files (Arnaldo Carvalho de Melo)

 - Error handling fixes (resource release on exit) for 'perf script'
   and 'perf kmem' (Christophe JAILLET)

 - Make some 'perf event attr' tests optional on virtual machines, where
   tested counters are not available (Jiri Olsa)

Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Ingo Molnar 2017-10-24 10:53:04 +02:00
commit 9b7c85473c
46 changed files with 3367 additions and 830 deletions

View File

@ -204,7 +204,7 @@ For example Intel Core CPUs typically have four generic performance counters
for the core, plus three fixed counters for instructions, cycles and
ref-cycles. Some special events have restrictions on which counter they
can schedule, and may not support multiple instances in a single group.
When too many events are specified in the group none of them will not
When too many events are specified in the group some of them will not
be measured.
Globally pinned events can limit the number of counters available for

View File

@ -1,3 +1,4 @@
#include <linux/compiler.h>
#include <sys/types.h>
#include <regex.h>
@ -23,7 +24,7 @@ static struct ins_ops *arm__associate_instruction_ops(struct arch *arch, const c
return ops;
}
static int arm__annotate_init(struct arch *arch)
static int arm__annotate_init(struct arch *arch, char *cpuid __maybe_unused)
{
struct arm_annotate *arm;
int err;

View File

@ -1,3 +1,4 @@
#include <linux/compiler.h>
#include <sys/types.h>
#include <regex.h>
@ -25,7 +26,7 @@ static struct ins_ops *arm64__associate_instruction_ops(struct arch *arch, const
return ops;
}
static int arm64__annotate_init(struct arch *arch)
static int arm64__annotate_init(struct arch *arch, char *cpuid __maybe_unused)
{
struct arm64_annotate *arm;
int err;

View File

@ -1,3 +1,5 @@
#include <linux/compiler.h>
static struct ins_ops *powerpc__associate_instruction_ops(struct arch *arch, const char *name)
{
int i;
@ -46,7 +48,7 @@ static struct ins_ops *powerpc__associate_instruction_ops(struct arch *arch, con
return ops;
}
static int powerpc__annotate_init(struct arch *arch)
static int powerpc__annotate_init(struct arch *arch, char *cpuid __maybe_unused)
{
if (!arch->initialized) {
arch->initialized = true;

View File

@ -1,3 +1,5 @@
#include <linux/compiler.h>
static struct ins_ops *s390__associate_ins_ops(struct arch *arch, const char *name)
{
struct ins_ops *ops = NULL;
@ -19,7 +21,7 @@ static struct ins_ops *s390__associate_ins_ops(struct arch *arch, const char *na
return ops;
}
static int s390__annotate_init(struct arch *arch)
static int s390__annotate_init(struct arch *arch, char *cpuid __maybe_unused)
{
if (!arch->initialized) {
arch->initialized = true;

View File

@ -122,3 +122,17 @@ static int x86__cpuid_parse(struct arch *arch, char *cpuid)
return -1;
}
static int x86__annotate_init(struct arch *arch, char *cpuid)
{
int err = 0;
if (arch->initialized)
return 0;
if (cpuid)
err = x86__cpuid_parse(arch, cpuid);
arch->initialized = true;
return err;
}

View File

@ -1983,7 +1983,8 @@ int cmd_kmem(int argc, const char **argv)
if (perf_time__parse_str(&ptime, time_str) != 0) {
pr_err("Invalid time string\n");
return -EINVAL;
ret = -EINVAL;
goto out_delete;
}
if (!strcmp(argv[0], "stat")) {

View File

@ -129,107 +129,12 @@ static int process_synthesized_event(struct perf_tool *tool,
return record__write(rec, event, event->header.size);
}
static int
backward_rb_find_range(void *buf, int mask, u64 head, u64 *start, u64 *end)
static int record__pushfn(void *to, void *bf, size_t size)
{
struct perf_event_header *pheader;
u64 evt_head = head;
int size = mask + 1;
pr_debug2("backward_rb_find_range: buf=%p, head=%"PRIx64"\n", buf, head);
pheader = (struct perf_event_header *)(buf + (head & mask));
*start = head;
while (true) {
if (evt_head - head >= (unsigned int)size) {
pr_debug("Finished reading backward ring buffer: rewind\n");
if (evt_head - head > (unsigned int)size)
evt_head -= pheader->size;
*end = evt_head;
return 0;
}
pheader = (struct perf_event_header *)(buf + (evt_head & mask));
if (pheader->size == 0) {
pr_debug("Finished reading backward ring buffer: get start\n");
*end = evt_head;
return 0;
}
evt_head += pheader->size;
pr_debug3("move evt_head: %"PRIx64"\n", evt_head);
}
WARN_ONCE(1, "Shouldn't get here\n");
return -1;
}
static int
rb_find_range(void *data, int mask, u64 head, u64 old,
u64 *start, u64 *end, bool backward)
{
if (!backward) {
*start = old;
*end = head;
return 0;
}
return backward_rb_find_range(data, mask, head, start, end);
}
static int
record__mmap_read(struct record *rec, struct perf_mmap *md,
bool overwrite, bool backward)
{
u64 head = perf_mmap__read_head(md);
u64 old = md->prev;
u64 end = head, start = old;
unsigned char *data = md->base + page_size;
unsigned long size;
void *buf;
int rc = 0;
if (rb_find_range(data, md->mask, head,
old, &start, &end, backward))
return -1;
if (start == end)
return 0;
struct record *rec = to;
rec->samples++;
size = end - start;
if (size > (unsigned long)(md->mask) + 1) {
WARN_ONCE(1, "failed to keep up with mmap data. (warn only once)\n");
md->prev = head;
perf_mmap__consume(md, overwrite || backward);
return 0;
}
if ((start & md->mask) + size != (end & md->mask)) {
buf = &data[start & md->mask];
size = md->mask + 1 - (start & md->mask);
start += size;
if (record__write(rec, buf, size) < 0) {
rc = -1;
goto out;
}
}
buf = &data[start & md->mask];
size = end - start;
start += size;
if (record__write(rec, buf, size) < 0) {
rc = -1;
goto out;
}
md->prev = head;
perf_mmap__consume(md, overwrite || backward);
out:
return rc;
return record__write(rec, bf, size);
}
static volatile int done;
@ -576,8 +481,7 @@ static int record__mmap_read_evlist(struct record *rec, struct perf_evlist *evli
struct auxtrace_mmap *mm = &maps[i].auxtrace_mmap;
if (maps[i].base) {
if (record__mmap_read(rec, &maps[i],
evlist->overwrite, backward) != 0) {
if (perf_mmap__push(&maps[i], evlist->overwrite, backward, rec, record__pushfn) != 0) {
rc = -1;
goto out;
}

File diff suppressed because it is too large Load Diff

View File

@ -1828,16 +1828,14 @@ static int trace__sched_stat_runtime(struct trace *trace, struct perf_evsel *evs
goto out_put;
}
static void bpf_output__printer(enum binary_printer_ops op,
unsigned int val, void *extra)
static int bpf_output__printer(enum binary_printer_ops op,
unsigned int val, void *extra __maybe_unused, FILE *fp)
{
FILE *output = extra;
unsigned char ch = (unsigned char)val;
switch (op) {
case BINARY_PRINT_CHAR_DATA:
fprintf(output, "%c", isprint(ch) ? ch : '.');
break;
return fprintf(fp, "%c", isprint(ch) ? ch : '.');
case BINARY_PRINT_DATA_BEGIN:
case BINARY_PRINT_LINE_BEGIN:
case BINARY_PRINT_ADDR:
@ -1850,13 +1848,15 @@ static void bpf_output__printer(enum binary_printer_ops op,
default:
break;
}
return 0;
}
static void bpf_output__fprintf(struct trace *trace,
struct perf_sample *sample)
{
print_binary(sample->raw_data, sample->raw_size, 8,
bpf_output__printer, trace->output);
binary__fprintf(sample->raw_data, sample->raw_size, 8,
bpf_output__printer, NULL, trace->output);
}
static int trace__event_handler(struct trace *trace, struct perf_evsel *evsel,

View File

@ -57,6 +57,11 @@ check () {
}
# Check if we have the kernel headers (tools/perf/../../include), else
# we're probably on a detached tarball, so no point in trying to check
# differences.
test -d ../../include || exit 0
# simple diff check
for i in $HEADERS; do
check $i -B

View File

@ -13,7 +13,7 @@
},
{
"BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely consumed by program instructions",
"MetricExpr": "min( 1 , IDQ.MITE_UOPS / ( UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY * 16 * ( ICACHE.HIT + ICACHE.MISSES ) / 4.0 ) )",
"MetricExpr": "min( 1 , IDQ.MITE_UOPS / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 16 * ( ICACHE.HIT + ICACHE.MISSES ) / 4.0 ) )",
"MetricGroup": "Frontend",
"MetricName": "IFetch_Line_Utilization"
},
@ -25,7 +25,7 @@
},
{
"BriefDescription": "Cycles Per Instruction (threaded)",
"MetricExpr": "1 / INST_RETIRED.ANY / cycles",
"MetricExpr": "1 / (INST_RETIRED.ANY / cycles)",
"MetricGroup": "Pipeline;Summary",
"MetricName": "CPI"
},
@ -37,7 +37,7 @@
},
{
"BriefDescription": "Total issue-pipeline slots",
"MetricExpr": "4*( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles",
"MetricExpr": "4*(( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
"MetricGroup": "TopDownL1",
"MetricName": "SLOTS"
},
@ -49,19 +49,19 @@
},
{
"BriefDescription": "Instructions Per Cycle (per physical core)",
"MetricExpr": "INST_RETIRED.ANY / ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles",
"MetricExpr": "INST_RETIRED.ANY / (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
"MetricGroup": "SMT",
"MetricName": "CoreIPC"
},
{
"BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)",
"MetricExpr": "UOPS_EXECUTED.THREAD / ( cpu@uops_executed.core\\,cmask\\=1@ / 2) if #SMT_on else UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC",
"MetricExpr": "UOPS_EXECUTED.THREAD / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2) if #SMT_on else UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC)",
"MetricGroup": "Pipeline;Ports_Utilization",
"MetricName": "ILP"
},
{
"BriefDescription": "Average Branch Address Clear Cost (fraction of cycles)",
"MetricExpr": "2* ( RS_EVENTS.EMPTY_CYCLES - ICACHE.IFDATA_STALL - ( 14 * ITLB_MISSES.STLB_HIT + cpu@ITLB_MISSES.WALK_DURATION\\,cmask\\=1@ + 7* ITLB_MISSES.WALK_COMPLETED ) ) / RS_EVENTS.EMPTY_END",
"MetricExpr": "2* (( RS_EVENTS.EMPTY_CYCLES - ICACHE.IFDATA_STALL - (( 14 * ITLB_MISSES.STLB_HIT + cpu@ITLB_MISSES.WALK_DURATION\\,cmask\\=1@ + 7* ITLB_MISSES.WALK_COMPLETED )) ) / RS_EVENTS.EMPTY_END)",
"MetricGroup": "Unknown_Branches",
"MetricName": "BAClear_Cost"
},
@ -79,13 +79,13 @@
},
{
"BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least 1 such miss)",
"MetricExpr": "L1D_PEND_MISS.PENDING / ( cpu@l1d_pend_miss.pending_cycles\\,any\\=1@ / 2) if #SMT_on else L1D_PEND_MISS.PENDING_CYCLES",
"MetricExpr": "L1D_PEND_MISS.PENDING / (( cpu@l1d_pend_miss.pending_cycles\\,any\\=1@ / 2) if #SMT_on else L1D_PEND_MISS.PENDING_CYCLES)",
"MetricGroup": "Memory_Bound;Memory_BW",
"MetricName": "MLP"
},
{
"BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
"MetricExpr": "( cpu@ITLB_MISSES.WALK_DURATION\\,cmask\\=1@ + cpu@DTLB_LOAD_MISSES.WALK_DURATION\\,cmask\\=1@ + cpu@DTLB_STORE_MISSES.WALK_DURATION\\,cmask\\=1@ + 7*(DTLB_STORE_MISSES.WALK_COMPLETED+DTLB_LOAD_MISSES.WALK_COMPLETED+ITLB_MISSES.WALK_COMPLETED)) / ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles",
"MetricExpr": "( cpu@ITLB_MISSES.WALK_DURATION\\,cmask\\=1@ + cpu@DTLB_LOAD_MISSES.WALK_DURATION\\,cmask\\=1@ + cpu@DTLB_STORE_MISSES.WALK_DURATION\\,cmask\\=1@ + 7*(DTLB_STORE_MISSES.WALK_COMPLETED+DTLB_LOAD_MISSES.WALK_COMPLETED+ITLB_MISSES.WALK_COMPLETED)) / (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
"MetricGroup": "TLB",
"MetricName": "Page_Walks_Utilization"
},
@ -97,7 +97,7 @@
},
{
"BriefDescription": "Giga Floating Point Operations Per Second",
"MetricExpr": "( 1*( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2* FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4*( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8* FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE ) / 1000000000 / duration_time",
"MetricExpr": "(( 1*( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2* FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4*( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8* FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE )) / 1000000000 / duration_time",
"MetricGroup": "FLOPS;Summary",
"MetricName": "GFLOPs"
},

View File

@ -13,7 +13,7 @@
},
{
"BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely consumed by program instructions",
"MetricExpr": "min( 1 , IDQ.MITE_UOPS / ( UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY * 16 * ( ICACHE.HIT + ICACHE.MISSES ) / 4.0 ) )",
"MetricExpr": "min( 1 , IDQ.MITE_UOPS / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 16 * ( ICACHE.HIT + ICACHE.MISSES ) / 4.0 ) )",
"MetricGroup": "Frontend",
"MetricName": "IFetch_Line_Utilization"
},
@ -25,7 +25,7 @@
},
{
"BriefDescription": "Cycles Per Instruction (threaded)",
"MetricExpr": "1 / INST_RETIRED.ANY / cycles",
"MetricExpr": "1 / (INST_RETIRED.ANY / cycles)",
"MetricGroup": "Pipeline;Summary",
"MetricName": "CPI"
},
@ -37,7 +37,7 @@
},
{
"BriefDescription": "Total issue-pipeline slots",
"MetricExpr": "4*( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles",
"MetricExpr": "4*(( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
"MetricGroup": "TopDownL1",
"MetricName": "SLOTS"
},
@ -49,19 +49,19 @@
},
{
"BriefDescription": "Instructions Per Cycle (per physical core)",
"MetricExpr": "INST_RETIRED.ANY / ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles",
"MetricExpr": "INST_RETIRED.ANY / (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
"MetricGroup": "SMT",
"MetricName": "CoreIPC"
},
{
"BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)",
"MetricExpr": "UOPS_EXECUTED.THREAD / ( cpu@uops_executed.core\\,cmask\\=1@ / 2) if #SMT_on else UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC",
"MetricExpr": "UOPS_EXECUTED.THREAD / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2) if #SMT_on else UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC)",
"MetricGroup": "Pipeline;Ports_Utilization",
"MetricName": "ILP"
},
{
"BriefDescription": "Average Branch Address Clear Cost (fraction of cycles)",
"MetricExpr": "2* ( RS_EVENTS.EMPTY_CYCLES - ICACHE.IFDATA_STALL - ( 14 * ITLB_MISSES.STLB_HIT + cpu@ITLB_MISSES.WALK_DURATION\\,cmask\\=1@ + 7* ITLB_MISSES.WALK_COMPLETED ) ) / RS_EVENTS.EMPTY_END",
"MetricExpr": "2* (( RS_EVENTS.EMPTY_CYCLES - ICACHE.IFDATA_STALL - (( 14 * ITLB_MISSES.STLB_HIT + cpu@ITLB_MISSES.WALK_DURATION\\,cmask\\=1@ + 7* ITLB_MISSES.WALK_COMPLETED )) ) / RS_EVENTS.EMPTY_END)",
"MetricGroup": "Unknown_Branches",
"MetricName": "BAClear_Cost"
},
@ -79,13 +79,13 @@
},
{
"BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least 1 such miss)",
"MetricExpr": "L1D_PEND_MISS.PENDING / ( cpu@l1d_pend_miss.pending_cycles\\,any\\=1@ / 2) if #SMT_on else L1D_PEND_MISS.PENDING_CYCLES",
"MetricExpr": "L1D_PEND_MISS.PENDING / (( cpu@l1d_pend_miss.pending_cycles\\,any\\=1@ / 2) if #SMT_on else L1D_PEND_MISS.PENDING_CYCLES)",
"MetricGroup": "Memory_Bound;Memory_BW",
"MetricName": "MLP"
},
{
"BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
"MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION + 7*(DTLB_STORE_MISSES.WALK_COMPLETED+DTLB_LOAD_MISSES.WALK_COMPLETED+ITLB_MISSES.WALK_COMPLETED) ) / (2*( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
"MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION + 7*(DTLB_STORE_MISSES.WALK_COMPLETED+DTLB_LOAD_MISSES.WALK_COMPLETED+ITLB_MISSES.WALK_COMPLETED) ) / (2*(( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles))",
"MetricGroup": "TLB",
"MetricName": "Page_Walks_Utilization"
},
@ -97,7 +97,7 @@
},
{
"BriefDescription": "Giga Floating Point Operations Per Second",
"MetricExpr": "( 1*( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2* FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4*( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8* FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE ) / 1000000000 / duration_time",
"MetricExpr": "(( 1*( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2* FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4*( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8* FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE )) / 1000000000 / duration_time",
"MetricGroup": "FLOPS;Summary",
"MetricName": "GFLOPs"
},

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,62 @@
[
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts requests to the Instruction Cache (ICache) for one or more bytes in an ICache Line and that cache line is in the ICache (hit). The event strives to count on a cache line basis, so that multiple accesses which hit in a single cache line count as one ICACHE.HIT. Specifically, the event counts when straight line code crosses the cache line boundary, or when a branch target is to a new line, and that cache line is in the ICache. This event counts differently than Intel processors based on Silvermont microarchitecture.",
"EventCode": "0x80",
"Counter": "0,1,2,3",
"UMask": "0x1",
"PEBScounters": "0,1,2,3",
"EventName": "ICACHE.HIT",
"PDIR_COUNTER": "na",
"SampleAfterValue": "200003",
"BriefDescription": "References per ICache line that are available in the ICache (hit). This event counts differently than Intel processors based on Silvermont microarchitecture"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts requests to the Instruction Cache (ICache) for one or more bytes in an ICache Line and that cache line is not in the ICache (miss). The event strives to count on a cache line basis, so that multiple accesses which miss in a single cache line count as one ICACHE.MISS. Specifically, the event counts when straight line code crosses the cache line boundary, or when a branch target is to a new line, and that cache line is not in the ICache. This event counts differently than Intel processors based on Silvermont microarchitecture.",
"EventCode": "0x80",
"Counter": "0,1,2,3",
"UMask": "0x2",
"PEBScounters": "0,1,2,3",
"EventName": "ICACHE.MISSES",
"PDIR_COUNTER": "na",
"SampleAfterValue": "200003",
"BriefDescription": "References per ICache line that are not available in the ICache (miss). This event counts differently than Intel processors based on Silvermont microarchitecture"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts requests to the Instruction Cache (ICache) for one or more bytes in an ICache Line. The event strives to count on a cache line basis, so that multiple fetches to a single cache line count as one ICACHE.ACCESS. Specifically, the event counts when accesses from straight line code crosses the cache line boundary, or when a branch target is to a new line.\r\nThis event counts differently than Intel processors based on Silvermont microarchitecture.",
"EventCode": "0x80",
"Counter": "0,1,2,3",
"UMask": "0x3",
"PEBScounters": "0,1,2,3",
"EventName": "ICACHE.ACCESSES",
"PDIR_COUNTER": "na",
"SampleAfterValue": "200003",
"BriefDescription": "References per ICache line. This event counts differently than Intel processors based on Silvermont microarchitecture"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts the number of times the Microcode Sequencer (MS) starts a flow of uops from the MSROM. It does not count every time a uop is read from the MSROM. The most common case that this counts is when a micro-coded instruction is encountered by the front end of the machine. Other cases include when an instruction encounters a fault, trap, or microcode assist of any sort that initiates a flow of uops. The event will count MS startups for uops that are speculative, and subsequently cleared by branch mispredict or a machine clear.",
"EventCode": "0xE7",
"Counter": "0,1,2,3",
"UMask": "0x1",
"PEBScounters": "0,1,2,3",
"EventName": "MS_DECODED.MS_ENTRY",
"PDIR_COUNTER": "na",
"SampleAfterValue": "200003",
"BriefDescription": "MS decode starts"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts the number of times the prediction (from the predecode cache) for instruction length is incorrect.",
"EventCode": "0xE9",
"Counter": "0,1,2,3",
"UMask": "0x1",
"PEBScounters": "0,1,2,3",
"EventName": "DECODE_RESTRICTION.PREDECODE_WRONG",
"PDIR_COUNTER": "na",
"SampleAfterValue": "200003",
"BriefDescription": "Decode restrictions due to predicting wrong instruction length"
}
]

View File

@ -0,0 +1,38 @@
[
{
"PEBS": "2",
"CollectPEBSRecord": "2",
"PublicDescription": "Counts when a memory load of a uop spans a page boundary (a split) is retired.",
"EventCode": "0x13",
"Counter": "0,1,2,3",
"UMask": "0x2",
"PEBScounters": "0,1,2,3",
"EventName": "MISALIGN_MEM_REF.LOAD_PAGE_SPLIT",
"SampleAfterValue": "200003",
"BriefDescription": "Load uops that split a page (Precise event capable)"
},
{
"PEBS": "2",
"CollectPEBSRecord": "2",
"PublicDescription": "Counts when a memory store of a uop spans a page boundary (a split) is retired.",
"EventCode": "0x13",
"Counter": "0,1,2,3",
"UMask": "0x4",
"PEBScounters": "0,1,2,3",
"EventName": "MISALIGN_MEM_REF.STORE_PAGE_SPLIT",
"SampleAfterValue": "200003",
"BriefDescription": "Store uops that split a page (Precise event capable)"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts machine clears due to memory ordering issues. This occurs when a snoop request happens and the machine is uncertain if memory ordering will be preserved - as another core is in the process of modifying the data.",
"EventCode": "0xC3",
"Counter": "0,1,2,3",
"UMask": "0x2",
"PEBScounters": "0,1,2,3",
"EventName": "MACHINE_CLEARS.MEMORY_ORDERING",
"PDIR_COUNTER": "na",
"SampleAfterValue": "20003",
"BriefDescription": "Machine clears due to memory ordering issue"
}
]

View File

@ -0,0 +1,98 @@
[
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts cycles that fetch is stalled due to any reason. That is, the decoder queue is able to accept bytes, but the fetch unit is unable to provide bytes. This will include cycles due to an ITLB miss, ICache miss and other events.",
"EventCode": "0x86",
"Counter": "0,1,2,3",
"UMask": "0x0",
"PEBScounters": "0,1,2,3",
"EventName": "FETCH_STALL.ALL",
"PDIR_COUNTER": "na",
"SampleAfterValue": "200003",
"BriefDescription": "Cycles code-fetch stalled due to any reason."
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts cycles that fetch is stalled due to an outstanding ITLB miss. That is, the decoder queue is able to accept bytes, but the fetch unit is unable to provide bytes due to an ITLB miss. Note: this event is not the same as page walk cycles to retrieve an instruction translation.",
"EventCode": "0x86",
"Counter": "0,1,2,3",
"UMask": "0x1",
"PEBScounters": "0,1,2,3",
"EventName": "FETCH_STALL.ITLB_FILL_PENDING_CYCLES",
"PDIR_COUNTER": "na",
"SampleAfterValue": "200003",
"BriefDescription": "Cycles the code-fetch stalls and an ITLB miss is outstanding."
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts the number of issue slots per core cycle that were not consumed by the backend due to either a full resource in the backend (RESOURCE_FULL) or due to the processor recovering from some event (RECOVERY).",
"EventCode": "0xCA",
"Counter": "0,1,2,3",
"UMask": "0x0",
"PEBScounters": "0,1,2,3",
"EventName": "ISSUE_SLOTS_NOT_CONSUMED.ANY",
"PDIR_COUNTER": "na",
"SampleAfterValue": "200003",
"BriefDescription": "Unfilled issue slots per cycle"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts the number of issue slots per core cycle that were not consumed because of a full resource in the backend. Including but not limited to resources such as the Re-order Buffer (ROB), reservation stations (RS), load/store buffers, physical registers, or any other needed machine resource that is currently unavailable. Note that uops must be available for consumption in order for this event to fire. If a uop is not available (Instruction Queue is empty), this event will not count.",
"EventCode": "0xCA",
"Counter": "0,1,2,3",
"UMask": "0x1",
"PEBScounters": "0,1,2,3",
"EventName": "ISSUE_SLOTS_NOT_CONSUMED.RESOURCE_FULL",
"PDIR_COUNTER": "na",
"SampleAfterValue": "200003",
"BriefDescription": "Unfilled issue slots per cycle because of a full resource in the backend"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts the number of issue slots per core cycle that were not consumed by the backend because allocation is stalled waiting for a mispredicted jump to retire or other branch-like conditions (e.g. the event is relevant during certain microcode flows). Counts all issue slots blocked while within this window including slots where uops were not available in the Instruction Queue.",
"EventCode": "0xCA",
"Counter": "0,1,2,3",
"UMask": "0x2",
"PEBScounters": "0,1,2,3",
"EventName": "ISSUE_SLOTS_NOT_CONSUMED.RECOVERY",
"PDIR_COUNTER": "na",
"SampleAfterValue": "200003",
"BriefDescription": "Unfilled issue slots per cycle to recover"
},
{
"CollectPEBSRecord": "2",
"PublicDescription": "Counts hardware interrupts received by the processor.",
"EventCode": "0xCB",
"Counter": "0,1,2,3",
"UMask": "0x1",
"PEBScounters": "0,1,2,3",
"EventName": "HW_INTERRUPTS.RECEIVED",
"PDIR_COUNTER": "na",
"SampleAfterValue": "203",
"BriefDescription": "Hardware interrupts received"
},
{
"CollectPEBSRecord": "2",
"PublicDescription": "Counts the number of core cycles during which interrupts are masked (disabled). Increments by 1 each core cycle that EFLAGS.IF is 0, regardless of whether interrupts are pending or not.",
"EventCode": "0xCB",
"Counter": "0,1,2,3",
"UMask": "0x2",
"PEBScounters": "0,1,2,3",
"EventName": "HW_INTERRUPTS.MASKED",
"PDIR_COUNTER": "na",
"SampleAfterValue": "200003",
"BriefDescription": "Cycles hardware interrupts are masked"
},
{
"CollectPEBSRecord": "2",
"PublicDescription": "Counts core cycles during which there are pending interrupts, but interrupts are masked (EFLAGS.IF = 0).",
"EventCode": "0xCB",
"Counter": "0,1,2,3",
"UMask": "0x4",
"PEBScounters": "0,1,2,3",
"EventName": "HW_INTERRUPTS.PENDING_AND_MASKED",
"PDIR_COUNTER": "na",
"SampleAfterValue": "200003",
"BriefDescription": "Cycles pending interrupts are masked"
}
]

View File

@ -0,0 +1,544 @@
[
{
"PEBS": "2",
"CollectPEBSRecord": "1",
"PublicDescription": "Counts the number of instructions that retire execution. For instructions that consist of multiple uops, this event counts the retirement of the last uop of the instruction. The counter continues counting during hardware interrupts, traps, and inside interrupt handlers. This event uses fixed counter 0. You cannot collect a PEBs record for this event.",
"EventCode": "0x00",
"Counter": "Fixed counter 0",
"UMask": "0x1",
"PEBScounters": "32",
"EventName": "INST_RETIRED.ANY",
"PDIR_COUNTER": "na",
"SampleAfterValue": "2000003",
"BriefDescription": "Instructions retired (Fixed event)"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts the number of core cycles while the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. In mobile systems the core frequency may change from time to time. For this reason this event may have a changing ratio with regards to time. This event uses fixed counter 1. You cannot collect a PEBs record for this event.",
"EventCode": "0x00",
"Counter": "Fixed counter 1",
"UMask": "0x2",
"PEBScounters": "33",
"EventName": "CPU_CLK_UNHALTED.CORE",
"PDIR_COUNTER": "na",
"SampleAfterValue": "2000003",
"BriefDescription": "Core cycles when core is not halted (Fixed event)"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts the number of reference cycles that the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. In mobile systems the core frequency may change from time. This event is not affected by core frequency changes but counts as if the core is running at the maximum frequency all the time. This event uses fixed counter 2. You cannot collect a PEBs record for this event.",
"EventCode": "0x00",
"Counter": "Fixed counter 2",
"UMask": "0x3",
"PEBScounters": "34",
"EventName": "CPU_CLK_UNHALTED.REF_TSC",
"PDIR_COUNTER": "na",
"SampleAfterValue": "2000003",
"BriefDescription": "Reference cycles when core is not halted (Fixed event)"
},
{
"PEBS": "2",
"CollectPEBSRecord": "2",
"PublicDescription": "Counts a load blocked from using a store forward, but did not occur because the store data was not available at the right time. The forward might occur subsequently when the data is available.",
"EventCode": "0x03",
"Counter": "0,1,2,3",
"UMask": "0x1",
"PEBScounters": "0,1,2,3",
"EventName": "LD_BLOCKS.DATA_UNKNOWN",
"SampleAfterValue": "200003",
"BriefDescription": "Loads blocked due to store data not ready (Precise event capable)"
},
{
"PEBS": "2",
"CollectPEBSRecord": "2",
"PublicDescription": "Counts a load blocked from using a store forward because of an address/size mismatch, only one of the loads blocked from each store will be counted.",
"EventCode": "0x03",
"Counter": "0,1,2,3",
"UMask": "0x2",
"PEBScounters": "0,1,2,3",
"EventName": "LD_BLOCKS.STORE_FORWARD",
"SampleAfterValue": "200003",
"BriefDescription": "Loads blocked due to store forward restriction (Precise event capable)"
},
{
"PEBS": "2",
"CollectPEBSRecord": "2",
"PublicDescription": "Counts loads that block because their address modulo 4K matches a pending store.",
"EventCode": "0x03",
"Counter": "0,1,2,3",
"UMask": "0x4",
"PEBScounters": "0,1,2,3",
"EventName": "LD_BLOCKS.4K_ALIAS",
"SampleAfterValue": "200003",
"BriefDescription": "Loads blocked because address has 4k partial address false dependence (Precise event capable)"
},
{
"PEBS": "2",
"CollectPEBSRecord": "2",
"PublicDescription": "Counts loads blocked because they are unable to find their physical address in the micro TLB (UTLB).",
"EventCode": "0x03",
"Counter": "0,1,2,3",
"UMask": "0x8",
"PEBScounters": "0,1,2,3",
"EventName": "LD_BLOCKS.UTLB_MISS",
"SampleAfterValue": "200003",
"BriefDescription": "Loads blocked because address in not in the UTLB (Precise event capable)"
},
{
"PEBS": "2",
"CollectPEBSRecord": "2",
"PublicDescription": "Counts anytime a load that retires is blocked for any reason.",
"EventCode": "0x03",
"Counter": "0,1,2,3",
"UMask": "0x10",
"PEBScounters": "0,1,2,3",
"EventName": "LD_BLOCKS.ALL_BLOCK",
"SampleAfterValue": "200003",
"BriefDescription": "Loads blocked (Precise event capable)"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts uops issued by the front end and allocated into the back end of the machine. This event counts uops that retire as well as uops that were speculatively executed but didn't retire. The sort of speculative uops that might be counted includes, but is not limited to those uops issued in the shadow of a miss-predicted branch, those uops that are inserted during an assist (such as for a denormal floating point result), and (previously allocated) uops that might be canceled during a machine clear.",
"EventCode": "0x0E",
"Counter": "0,1,2,3",
"UMask": "0x0",
"PEBScounters": "0,1,2,3",
"EventName": "UOPS_ISSUED.ANY",
"PDIR_COUNTER": "na",
"SampleAfterValue": "200003",
"BriefDescription": "Uops issued to the back end per cycle"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Core cycles when core is not halted. This event uses a (_P)rogrammable general purpose performance counter.",
"EventCode": "0x3C",
"Counter": "0,1,2,3",
"UMask": "0x0",
"PEBScounters": "0,1,2,3",
"EventName": "CPU_CLK_UNHALTED.CORE_P",
"PDIR_COUNTER": "na",
"SampleAfterValue": "2000003",
"BriefDescription": "Core cycles when core is not halted"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Reference cycles when core is not halted. This event uses a (_P)rogrammable general purpose performance counter.",
"EventCode": "0x3C",
"Counter": "0,1,2,3",
"UMask": "0x1",
"PEBScounters": "0,1,2,3",
"EventName": "CPU_CLK_UNHALTED.REF",
"PDIR_COUNTER": "na",
"SampleAfterValue": "2000003",
"BriefDescription": "Reference cycles when core is not halted"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "This event used to measure front-end inefficiencies. I.e. when front-end of the machine is not delivering uops to the back-end and the back-end has is not stalled. This event can be used to identify if the machine is truly front-end bound. When this event occurs, it is an indication that the front-end of the machine is operating at less than its theoretical peak performance. Background: We can think of the processor pipeline as being divided into 2 broader parts: Front-end and Back-end. Front-end is responsible for fetching the instruction, decoding into uops in machine understandable format and putting them into a uop queue to be consumed by back end. The back-end then takes these uops, allocates the required resources. When all resources are ready, uops are executed. If the back-end is not ready to accept uops from the front-end, then we do not want to count these as front-end bottlenecks. However, whenever we have bottlenecks in the back-end, we will have allocation unit stalls and eventually forcing the front-end to wait until the back-end is ready to receive more uops. This event counts only when back-end is requesting more uops and front-end is not able to provide them. When 3 uops are requested and no uops are delivered, the event counts 3. When 3 are requested, and only 1 is delivered, the event counts 2. When only 2 are delivered, the event counts 1. Alternatively stated, the event will not count if 3 uops are delivered, or if the back end is stalled and not requesting any uops at all. Counts indicate missed opportunities for the front-end to deliver a uop to the back end. Some examples of conditions that cause front-end efficiencies are: ICache misses, ITLB misses, and decoder restrictions that limit the front-end bandwidth. Known Issues: Some uops require multiple allocation slots. These uops will not be charged as a front end 'not delivered' opportunity, and will be regarded as a back end problem. For example, the INC instruction has one uop that requires 2 issue slots. A stream of INC instructions will not count as UOPS_NOT_DELIVERED, even though only one instruction can be issued per clock. The low uop issue rate for a stream of INC instructions is considered to be a back end issue.",
"EventCode": "0x9C",
"Counter": "0,1,2,3",
"UMask": "0x0",
"PEBScounters": "0,1,2,3",
"EventName": "UOPS_NOT_DELIVERED.ANY",
"PDIR_COUNTER": "na",
"SampleAfterValue": "200003",
"BriefDescription": "Uops requested but not-delivered to the back-end per cycle"
},
{
"PEBS": "2",
"CollectPEBSRecord": "1",
"PublicDescription": "Counts the number of instructions that retire execution. For instructions that consist of multiple uops, this event counts the retirement of the last uop of the instruction. The event continues counting during hardware interrupts, traps, and inside interrupt handlers. This is an architectural performance event. This event uses a (_P)rogrammable general purpose performance counter. *This event is Precise Event capable: The EventingRIP field in the PEBS record is precise to the address of the instruction which caused the event. Note: Because PEBS records can be collected only on IA32_PMC0, only one event can use the PEBS facility at a time.",
"EventCode": "0xC0",
"Counter": "0,1,2,3",
"UMask": "0x0",
"PEBScounters": "0,1,2,3",
"EventName": "INST_RETIRED.ANY_P",
"SampleAfterValue": "2000003",
"BriefDescription": "Instructions retired (Precise event capable)"
},
{
"PEBS": "2",
"CollectPEBSRecord": "2",
"PublicDescription": "Counts INST_RETIRED.ANY using the Reduced Skid PEBS feature that reduces the shadow in which events aren't counted allowing for a more unbiased distribution of samples across instructions retired.",
"EventCode": "0xC0",
"Counter": "0,1,2,3",
"UMask": "0x0",
"EventName": "INST_RETIRED.PREC_DIST",
"SampleAfterValue": "2000003",
"BriefDescription": "Instructions retired - using Reduced Skid PEBS feature"
},
{
"PEBS": "2",
"CollectPEBSRecord": "2",
"PublicDescription": "Counts uops which retired.",
"EventCode": "0xC2",
"Counter": "0,1,2,3",
"UMask": "0x0",
"PEBScounters": "0,1,2,3",
"EventName": "UOPS_RETIRED.ANY",
"PDIR_COUNTER": "na",
"SampleAfterValue": "2000003",
"BriefDescription": "Uops retired (Precise event capable)"
},
{
"PEBS": "2",
"CollectPEBSRecord": "2",
"PublicDescription": "Counts uops retired that are from the complex flows issued by the micro-sequencer (MS). Counts both the uops from a micro-coded instruction, and the uops that might be generated from a micro-coded assist.",
"EventCode": "0xC2",
"Counter": "0,1,2,3",
"UMask": "0x1",
"PEBScounters": "0,1,2,3",
"EventName": "UOPS_RETIRED.MS",
"PDIR_COUNTER": "na",
"SampleAfterValue": "2000003",
"BriefDescription": "MS uops retired (Precise event capable)"
},
{
"PEBS": "2",
"CollectPEBSRecord": "1",
"PublicDescription": "Counts the number of floating point divide uops retired.",
"EventCode": "0xC2",
"Counter": "0,1,2,3",
"UMask": "0x8",
"PEBScounters": "0,1,2,3",
"EventName": "UOPS_RETIRED.FPDIV",
"SampleAfterValue": "2000003",
"BriefDescription": "Floating point divide uops retired (Precise Event Capable)"
},
{
"PEBS": "2",
"CollectPEBSRecord": "1",
"PublicDescription": "Counts the number of integer divide uops retired.",
"EventCode": "0xC2",
"Counter": "0,1,2,3",
"UMask": "0x10",
"PEBScounters": "0,1,2,3",
"EventName": "UOPS_RETIRED.IDIV",
"SampleAfterValue": "2000003",
"BriefDescription": "Integer divide uops retired (Precise Event Capable)"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts machine clears for any reason.",
"EventCode": "0xC3",
"Counter": "0,1,2,3",
"UMask": "0x0",
"PEBScounters": "0,1,2,3",
"EventName": "MACHINE_CLEARS.ALL",
"PDIR_COUNTER": "na",
"SampleAfterValue": "20003",
"BriefDescription": "All machine clears"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts the number of times that the processor detects that a program is writing to a code section and has to perform a machine clear because of that modification. Self-modifying code (SMC) causes a severe penalty in all Intel architecture processors.",
"EventCode": "0xC3",
"Counter": "0,1,2,3",
"UMask": "0x1",
"PEBScounters": "0,1,2,3",
"EventName": "MACHINE_CLEARS.SMC",
"PDIR_COUNTER": "na",
"SampleAfterValue": "20003",
"BriefDescription": "Self-Modifying Code detected"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts machine clears due to floating point (FP) operations needing assists. For instance, if the result was a floating point denormal, the hardware clears the pipeline and reissues uops to produce the correct IEEE compliant denormal result.",
"EventCode": "0xC3",
"Counter": "0,1,2,3",
"UMask": "0x4",
"PEBScounters": "0,1,2,3",
"EventName": "MACHINE_CLEARS.FP_ASSIST",
"PDIR_COUNTER": "na",
"SampleAfterValue": "20003",
"BriefDescription": "Machine clears due to FP assists"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts machine clears due to memory disambiguation. Memory disambiguation happens when a load which has been issued conflicts with a previous unretired store in the pipeline whose address was not known at issue time, but is later resolved to be the same as the load address.",
"EventCode": "0xC3",
"Counter": "0,1,2,3",
"UMask": "0x8",
"PEBScounters": "0,1,2,3",
"EventName": "MACHINE_CLEARS.DISAMBIGUATION",
"PDIR_COUNTER": "na",
"SampleAfterValue": "20003",
"BriefDescription": "Machine clears due to memory disambiguation"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts the number of times that the machines clears due to a page fault. Covers both I-side and D-side(Loads/Stores) page faults. A page fault occurs when either page is not present, or an access violation",
"EventCode": "0xC3",
"Counter": "0,1,2,3",
"UMask": "0x20",
"PEBScounters": "0,1,2,3",
"EventName": "MACHINE_CLEARS.PAGE_FAULT",
"PDIR_COUNTER": "na",
"SampleAfterValue": "20003",
"BriefDescription": "Machines clear due to a page fault"
},
{
"PEBS": "2",
"CollectPEBSRecord": "2",
"PublicDescription": "Counts branch instructions retired for all branch types. This is an architectural performance event.",
"EventCode": "0xC4",
"Counter": "0,1,2,3",
"UMask": "0x0",
"PEBScounters": "0,1,2,3",
"EventName": "BR_INST_RETIRED.ALL_BRANCHES",
"SampleAfterValue": "200003",
"BriefDescription": "Retired branch instructions (Precise event capable)"
},
{
"PEBS": "2",
"CollectPEBSRecord": "2",
"PublicDescription": "Counts retired Jcc (Jump on Conditional Code/Jump if Condition is Met) branch instructions retired, including both when the branch was taken and when it was not taken.",
"EventCode": "0xC4",
"Counter": "0,1,2,3",
"UMask": "0x7e",
"PEBScounters": "0,1,2,3",
"EventName": "BR_INST_RETIRED.JCC",
"SampleAfterValue": "200003",
"BriefDescription": "Retired conditional branch instructions (Precise event capable)"
},
{
"PEBS": "2",
"CollectPEBSRecord": "2",
"PublicDescription": "Counts the number of taken branch instructions retired.",
"EventCode": "0xC4",
"Counter": "0,1,2,3",
"UMask": "0x80",
"PEBScounters": "0,1,2,3",
"EventName": "BR_INST_RETIRED.ALL_TAKEN_BRANCHES",
"SampleAfterValue": "200003",
"BriefDescription": "Retired taken branch instructions (Precise event capable)"
},
{
"PEBS": "2",
"CollectPEBSRecord": "2",
"PublicDescription": "Counts far branch instructions retired. This includes far jump, far call and return, and Interrupt call and return.",
"EventCode": "0xC4",
"Counter": "0,1,2,3",
"UMask": "0xbf",
"PEBScounters": "0,1,2,3",
"EventName": "BR_INST_RETIRED.FAR_BRANCH",
"SampleAfterValue": "200003",
"BriefDescription": "Retired far branch instructions (Precise event capable)"
},
{
"PEBS": "2",
"CollectPEBSRecord": "2",
"PublicDescription": "Counts near indirect call or near indirect jmp branch instructions retired.",
"EventCode": "0xC4",
"Counter": "0,1,2,3",
"UMask": "0xeb",
"PEBScounters": "0,1,2,3",
"EventName": "BR_INST_RETIRED.NON_RETURN_IND",
"SampleAfterValue": "200003",
"BriefDescription": "Retired instructions of near indirect Jmp or call (Precise event capable)"
},
{
"PEBS": "2",
"CollectPEBSRecord": "2",
"PublicDescription": "Counts near return branch instructions retired.",
"EventCode": "0xC4",
"Counter": "0,1,2,3",
"UMask": "0xf7",
"PEBScounters": "0,1,2,3",
"EventName": "BR_INST_RETIRED.RETURN",
"SampleAfterValue": "200003",
"BriefDescription": "Retired near return instructions (Precise event capable)"
},
{
"PEBS": "2",
"CollectPEBSRecord": "2",
"PublicDescription": "Counts near CALL branch instructions retired.",
"EventCode": "0xC4",
"Counter": "0,1,2,3",
"UMask": "0xf9",
"PEBScounters": "0,1,2,3",
"EventName": "BR_INST_RETIRED.CALL",
"SampleAfterValue": "200003",
"BriefDescription": "Retired near call instructions (Precise event capable)"
},
{
"PEBS": "2",
"CollectPEBSRecord": "2",
"PublicDescription": "Counts near indirect CALL branch instructions retired.",
"EventCode": "0xC4",
"Counter": "0,1,2,3",
"UMask": "0xfb",
"PEBScounters": "0,1,2,3",
"EventName": "BR_INST_RETIRED.IND_CALL",
"SampleAfterValue": "200003",
"BriefDescription": "Retired near indirect call instructions (Precise event capable)"
},
{
"PEBS": "2",
"CollectPEBSRecord": "2",
"PublicDescription": "Counts near relative CALL branch instructions retired.",
"EventCode": "0xC4",
"Counter": "0,1,2,3",
"UMask": "0xfd",
"PEBScounters": "0,1,2,3",
"EventName": "BR_INST_RETIRED.REL_CALL",
"SampleAfterValue": "200003",
"BriefDescription": "Retired near relative call instructions (Precise event capable)"
},
{
"PEBS": "2",
"CollectPEBSRecord": "2",
"PublicDescription": "Counts Jcc (Jump on Conditional Code/Jump if Condition is Met) branch instructions retired that were taken and does not count when the Jcc branch instruction were not taken.",
"EventCode": "0xC4",
"Counter": "0,1,2,3",
"UMask": "0xfe",
"PEBScounters": "0,1,2,3",
"EventName": "BR_INST_RETIRED.TAKEN_JCC",
"SampleAfterValue": "200003",
"BriefDescription": "Retired conditional branch instructions that were taken (Precise event capable)"
},
{
"PEBS": "2",
"CollectPEBSRecord": "2",
"PublicDescription": "Counts mispredicted branch instructions retired including all branch types.",
"EventCode": "0xC5",
"Counter": "0,1,2,3",
"UMask": "0x0",
"PEBScounters": "0,1,2,3",
"EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
"SampleAfterValue": "200003",
"BriefDescription": "Retired mispredicted branch instructions (Precise event capable)"
},
{
"PEBS": "2",
"CollectPEBSRecord": "2",
"PublicDescription": "Counts mispredicted retired Jcc (Jump on Conditional Code/Jump if Condition is Met) branch instructions retired, including both when the branch was supposed to be taken and when it was not supposed to be taken (but the processor predicted the opposite condition).",
"EventCode": "0xC5",
"Counter": "0,1,2,3",
"UMask": "0x7e",
"PEBScounters": "0,1,2,3",
"EventName": "BR_MISP_RETIRED.JCC",
"SampleAfterValue": "200003",
"BriefDescription": "Retired mispredicted conditional branch instructions (Precise event capable)"
},
{
"PEBS": "2",
"CollectPEBSRecord": "2",
"PublicDescription": "Counts mispredicted branch instructions retired that were near indirect call or near indirect jmp, where the target address taken was not what the processor predicted.",
"EventCode": "0xC5",
"Counter": "0,1,2,3",
"UMask": "0xeb",
"PEBScounters": "0,1,2,3",
"EventName": "BR_MISP_RETIRED.NON_RETURN_IND",
"SampleAfterValue": "200003",
"BriefDescription": "Retired mispredicted instructions of near indirect Jmp or near indirect call (Precise event capable)"
},
{
"PEBS": "2",
"CollectPEBSRecord": "2",
"PublicDescription": "Counts mispredicted near RET branch instructions retired, where the return address taken was not what the processor predicted.",
"EventCode": "0xC5",
"Counter": "0,1,2,3",
"UMask": "0xf7",
"PEBScounters": "0,1,2,3",
"EventName": "BR_MISP_RETIRED.RETURN",
"SampleAfterValue": "200003",
"BriefDescription": "Retired mispredicted near return instructions (Precise event capable)"
},
{
"PEBS": "2",
"CollectPEBSRecord": "2",
"PublicDescription": "Counts mispredicted near indirect CALL branch instructions retired, where the target address taken was not what the processor predicted.",
"EventCode": "0xC5",
"Counter": "0,1,2,3",
"UMask": "0xfb",
"PEBScounters": "0,1,2,3",
"EventName": "BR_MISP_RETIRED.IND_CALL",
"SampleAfterValue": "200003",
"BriefDescription": "Retired mispredicted near indirect call instructions (Precise event capable)"
},
{
"PEBS": "2",
"CollectPEBSRecord": "2",
"PublicDescription": "Counts mispredicted retired Jcc (Jump on Conditional Code/Jump if Condition is Met) branch instructions retired that were supposed to be taken but the processor predicted that it would not be taken.",
"EventCode": "0xC5",
"Counter": "0,1,2,3",
"UMask": "0xfe",
"PEBScounters": "0,1,2,3",
"EventName": "BR_MISP_RETIRED.TAKEN_JCC",
"SampleAfterValue": "200003",
"BriefDescription": "Retired mispredicted conditional branch instructions that were taken (Precise event capable)"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts core cycles if either divide unit is busy.",
"EventCode": "0xCD",
"Counter": "0,1,2,3",
"UMask": "0x0",
"PEBScounters": "0,1,2,3",
"EventName": "CYCLES_DIV_BUSY.ALL",
"PDIR_COUNTER": "na",
"SampleAfterValue": "2000003",
"BriefDescription": "Cycles a divider is busy"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts core cycles the integer divide unit is busy.",
"EventCode": "0xCD",
"Counter": "0,1,2,3",
"UMask": "0x1",
"PEBScounters": "0,1,2,3",
"EventName": "CYCLES_DIV_BUSY.IDIV",
"PDIR_COUNTER": "na",
"SampleAfterValue": "200003",
"BriefDescription": "Cycles the integer divide unit is busy"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts core cycles the floating point divide unit is busy.",
"EventCode": "0xCD",
"Counter": "0,1,2,3",
"UMask": "0x2",
"PEBScounters": "0,1,2,3",
"EventName": "CYCLES_DIV_BUSY.FPDIV",
"PDIR_COUNTER": "na",
"SampleAfterValue": "200003",
"BriefDescription": "Cycles the FP divide unit is busy"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts the number of times a BACLEAR is signaled for any reason, including, but not limited to indirect branch/call, Jcc (Jump on Conditional Code/Jump if Condition is Met) branch, unconditional branch/call, and returns.",
"EventCode": "0xE6",
"Counter": "0,1,2,3",
"UMask": "0x1",
"PEBScounters": "0,1,2,3",
"EventName": "BACLEARS.ALL",
"PDIR_COUNTER": "na",
"SampleAfterValue": "200003",
"BriefDescription": "BACLEARs asserted for any branch type"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts BACLEARS on return instructions.",
"EventCode": "0xE6",
"Counter": "0,1,2,3",
"UMask": "0x8",
"PEBScounters": "0,1,2,3",
"EventName": "BACLEARS.RETURN",
"PDIR_COUNTER": "na",
"SampleAfterValue": "200003",
"BriefDescription": "BACLEARs asserted for return branch"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts BACLEARS on Jcc (Jump on Conditional Code/Jump if Condition is Met) branches.",
"EventCode": "0xE6",
"Counter": "0,1,2,3",
"UMask": "0x10",
"PEBScounters": "0,1,2,3",
"EventName": "BACLEARS.COND",
"PDIR_COUNTER": "na",
"SampleAfterValue": "200003",
"BriefDescription": "BACLEARs asserted for conditional branch"
}
]

View File

@ -0,0 +1,218 @@
[
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts page walks completed due to demand data loads (including SW prefetches) whose address translations missed in all TLB levels and were mapped to 4K pages. The page walks can end with or without a page fault.",
"EventCode": "0x08",
"Counter": "0,1,2,3",
"UMask": "0x2",
"PEBScounters": "0,1,2,3",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_4K",
"PDIR_COUNTER": "na",
"SampleAfterValue": "200003",
"BriefDescription": "Page walk completed due to a demand load to a 4K page"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts page walks completed due to demand data loads (including SW prefetches) whose address translations missed in all TLB levels and were mapped to 2M or 4M pages. The page walks can end with or without a page fault.",
"EventCode": "0x08",
"Counter": "0,1,2,3",
"UMask": "0x4",
"PEBScounters": "0,1,2,3",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M",
"PDIR_COUNTER": "na",
"SampleAfterValue": "200003",
"BriefDescription": "Page walk completed due to a demand load to a 2M or 4M page"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts page walks completed due to demand data loads (including SW prefetches) whose address translations missed in all TLB levels and were mapped to 1GB pages. The page walks can end with or without a page fault.",
"EventCode": "0x08",
"Counter": "0,1,2,3",
"UMask": "0x8",
"PEBScounters": "0,1,2,3",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_1GB",
"PDIR_COUNTER": "na",
"SampleAfterValue": "200003",
"BriefDescription": "Page walk completed due to a demand load to a 1GB page"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts once per cycle for each page walk occurring due to a load (demand data loads or SW prefetches). Includes cycles spent traversing the Extended Page Table (EPT). Average cycles per walk can be calculated by dividing by the number of walks.",
"EventCode": "0x08",
"Counter": "0,1,2,3",
"UMask": "0x10",
"PEBScounters": "0,1,2,3",
"EventName": "DTLB_LOAD_MISSES.WALK_PENDING",
"PDIR_COUNTER": "na",
"SampleAfterValue": "200003",
"BriefDescription": "Page walks outstanding due to a demand load every cycle."
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts page walks completed due to demand data stores whose address translations missed in the TLB and were mapped to 4K pages. The page walks can end with or without a page fault.",
"EventCode": "0x49",
"Counter": "0,1,2,3",
"UMask": "0x2",
"PEBScounters": "0,1,2,3",
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_4K",
"PDIR_COUNTER": "na",
"SampleAfterValue": "2000003",
"BriefDescription": "Page walk completed due to a demand data store to a 4K page"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts page walks completed due to demand data stores whose address translations missed in the TLB and were mapped to 2M or 4M pages. The page walks can end with or without a page fault.",
"EventCode": "0x49",
"Counter": "0,1,2,3",
"UMask": "0x4",
"PEBScounters": "0,1,2,3",
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M",
"PDIR_COUNTER": "na",
"SampleAfterValue": "2000003",
"BriefDescription": "Page walk completed due to a demand data store to a 2M or 4M page"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts page walks completed due to demand data stores whose address translations missed in the TLB and were mapped to 1GB pages. The page walks can end with or without a page fault.",
"EventCode": "0x49",
"Counter": "0,1,2,3",
"UMask": "0x8",
"PEBScounters": "0,1,2,3",
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_1GB",
"PDIR_COUNTER": "na",
"SampleAfterValue": "2000003",
"BriefDescription": "Page walk completed due to a demand data store to a 1GB page"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts once per cycle for each page walk occurring due to a demand data store. Includes cycles spent traversing the Extended Page Table (EPT). Average cycles per walk can be calculated by dividing by the number of walks.",
"EventCode": "0x49",
"Counter": "0,1,2,3",
"UMask": "0x10",
"PEBScounters": "0,1,2,3",
"EventName": "DTLB_STORE_MISSES.WALK_PENDING",
"PDIR_COUNTER": "na",
"SampleAfterValue": "200003",
"BriefDescription": "Page walks outstanding due to a demand data store every cycle."
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts once per cycle for each page walk only while traversing the Extended Page Table (EPT), and does not count during the rest of the translation. The EPT is used for translating Guest-Physical Addresses to Physical Addresses for Virtual Machine Monitors (VMMs). Average cycles per walk can be calculated by dividing the count by number of walks.",
"EventCode": "0x4F",
"Counter": "0,1,2,3",
"UMask": "0x10",
"PEBScounters": "0,1,2,3",
"EventName": "EPT.WALK_PENDING",
"PDIR_COUNTER": "na",
"SampleAfterValue": "200003",
"BriefDescription": "Page walks outstanding due to walking the EPT every cycle"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts the number of times the machine was unable to find a translation in the Instruction Translation Lookaside Buffer (ITLB) for a linear address of an instruction fetch. It counts when new translation are filled into the ITLB. The event is speculative in nature, but will not count translations (page walks) that are begun and not finished, or translations that are finished but not filled into the ITLB.",
"EventCode": "0x81",
"Counter": "0,1,2,3",
"UMask": "0x4",
"PEBScounters": "0,1,2,3",
"EventName": "ITLB.MISS",
"PDIR_COUNTER": "na",
"SampleAfterValue": "200003",
"BriefDescription": "ITLB misses"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts page walks completed due to instruction fetches whose address translations missed in the TLB and were mapped to 4K pages. The page walks can end with or without a page fault.",
"EventCode": "0x85",
"Counter": "0,1,2,3",
"UMask": "0x2",
"PEBScounters": "0,1,2,3",
"EventName": "ITLB_MISSES.WALK_COMPLETED_4K",
"PDIR_COUNTER": "na",
"SampleAfterValue": "2000003",
"BriefDescription": "Page walk completed due to an instruction fetch in a 4K page"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts page walks completed due to instruction fetches whose address translations missed in the TLB and were mapped to 2M or 4M pages. The page walks can end with or without a page fault.",
"EventCode": "0x85",
"Counter": "0,1,2,3",
"UMask": "0x4",
"PEBScounters": "0,1,2,3",
"EventName": "ITLB_MISSES.WALK_COMPLETED_2M_4M",
"PDIR_COUNTER": "na",
"SampleAfterValue": "2000003",
"BriefDescription": "Page walk completed due to an instruction fetch in a 2M or 4M page"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts page walks completed due to instruction fetches whose address translations missed in the TLB and were mapped to 1GB pages. The page walks can end with or without a page fault.",
"EventCode": "0x85",
"Counter": "0,1,2,3",
"UMask": "0x8",
"PEBScounters": "0,1,2,3",
"EventName": "ITLB_MISSES.WALK_COMPLETED_1GB",
"PDIR_COUNTER": "na",
"SampleAfterValue": "2000003",
"BriefDescription": "Page walk completed due to an instruction fetch in a 1GB page"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts once per cycle for each page walk occurring due to an instruction fetch. Includes cycles spent traversing the Extended Page Table (EPT). Average cycles per walk can be calculated by dividing by the number of walks.",
"EventCode": "0x85",
"Counter": "0,1,2,3",
"UMask": "0x10",
"PEBScounters": "0,1,2,3",
"EventName": "ITLB_MISSES.WALK_PENDING",
"PDIR_COUNTER": "na",
"SampleAfterValue": "200003",
"BriefDescription": "Page walks outstanding due to an instruction fetch every cycle."
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts STLB flushes. The TLBs are flushed on instructions like INVLPG and MOV to CR3.",
"EventCode": "0xBD",
"Counter": "0,1,2,3",
"UMask": "0x20",
"PEBScounters": "0,1,2,3",
"EventName": "TLB_FLUSHES.STLB_ANY",
"PDIR_COUNTER": "na",
"SampleAfterValue": "20003",
"BriefDescription": "STLB flushes"
},
{
"PEBS": "2",
"CollectPEBSRecord": "2",
"PublicDescription": "Counts load uops retired that caused a DTLB miss.",
"EventCode": "0xD0",
"Counter": "0,1,2,3",
"UMask": "0x11",
"PEBScounters": "0,1,2,3",
"EventName": "MEM_UOPS_RETIRED.DTLB_MISS_LOADS",
"SampleAfterValue": "200003",
"BriefDescription": "Load uops retired that missed the DTLB (Precise event capable)"
},
{
"PEBS": "2",
"CollectPEBSRecord": "2",
"PublicDescription": "Counts store uops retired that caused a DTLB miss.",
"EventCode": "0xD0",
"Counter": "0,1,2,3",
"UMask": "0x12",
"PEBScounters": "0,1,2,3",
"EventName": "MEM_UOPS_RETIRED.DTLB_MISS_STORES",
"SampleAfterValue": "200003",
"BriefDescription": "Store uops retired that missed the DTLB (Precise event capable)"
},
{
"PEBS": "2",
"CollectPEBSRecord": "2",
"PublicDescription": "Counts uops retired that had a DTLB miss on load, store or either. Note that when two distinct memory operations to the same page miss the DTLB, only one of them will be recorded as a DTLB miss.",
"EventCode": "0xD0",
"Counter": "0,1,2,3",
"UMask": "0x13",
"PEBScounters": "0,1,2,3",
"EventName": "MEM_UOPS_RETIRED.DTLB_MISS",
"SampleAfterValue": "200003",
"BriefDescription": "Memory uops retired that missed the DTLB (Precise event capable)"
}
]

View File

@ -13,7 +13,7 @@
},
{
"BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely consumed by program instructions",
"MetricExpr": "min( 1 , IDQ.MITE_UOPS / ( UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY * 16 * ( ICACHE.HIT + ICACHE.MISSES ) / 4.0 ) )",
"MetricExpr": "min( 1 , IDQ.MITE_UOPS / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 16 * ( ICACHE.HIT + ICACHE.MISSES ) / 4.0 ) )",
"MetricGroup": "Frontend",
"MetricName": "IFetch_Line_Utilization"
},
@ -25,7 +25,7 @@
},
{
"BriefDescription": "Cycles Per Instruction (threaded)",
"MetricExpr": "1 / INST_RETIRED.ANY / cycles",
"MetricExpr": "1 / (INST_RETIRED.ANY / cycles)",
"MetricGroup": "Pipeline;Summary",
"MetricName": "CPI"
},
@ -37,7 +37,7 @@
},
{
"BriefDescription": "Total issue-pipeline slots",
"MetricExpr": "4*( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles",
"MetricExpr": "4*(( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
"MetricGroup": "TopDownL1",
"MetricName": "SLOTS"
},
@ -49,19 +49,19 @@
},
{
"BriefDescription": "Instructions Per Cycle (per physical core)",
"MetricExpr": "INST_RETIRED.ANY / ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles",
"MetricExpr": "INST_RETIRED.ANY / (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
"MetricGroup": "SMT",
"MetricName": "CoreIPC"
},
{
"BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)",
"MetricExpr": "( UOPS_EXECUTED.CORE / 2 / ( cpu@uops_executed.core\\,cmask\\=1@ / 2)) if #SMT_on else (UOPS_EXECUTED.CORE / cpu@uops_executed.core\\,cmask\\=1@)",
"MetricExpr": "( UOPS_EXECUTED.CORE / 2 / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2) if #SMT_on else cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@) ) if #SMT_on else UOPS_EXECUTED.CORE / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2) if #SMT_on else cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@)",
"MetricGroup": "Pipeline;Ports_Utilization",
"MetricName": "ILP"
},
{
"BriefDescription": "Average Branch Address Clear Cost (fraction of cycles)",
"MetricExpr": "2* ( RS_EVENTS.EMPTY_CYCLES - ICACHE.IFDATA_STALL - ( 14 * ITLB_MISSES.STLB_HIT + ITLB_MISSES.WALK_DURATION ) ) / RS_EVENTS.EMPTY_END",
"MetricExpr": "2* (( RS_EVENTS.EMPTY_CYCLES - ICACHE.IFDATA_STALL - (( 14 * ITLB_MISSES.STLB_HIT + ITLB_MISSES.WALK_DURATION )) ) / RS_EVENTS.EMPTY_END)",
"MetricGroup": "Unknown_Branches",
"MetricName": "BAClear_Cost"
},
@ -79,13 +79,13 @@
},
{
"BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least 1 such miss)",
"MetricExpr": "L1D_PEND_MISS.PENDING / ( cpu@l1d_pend_miss.pending_cycles\\,any\\=1@ / 2) if #SMT_on else L1D_PEND_MISS.PENDING_CYCLES",
"MetricExpr": "L1D_PEND_MISS.PENDING / (( cpu@l1d_pend_miss.pending_cycles\\,any\\=1@ / 2) if #SMT_on else L1D_PEND_MISS.PENDING_CYCLES)",
"MetricGroup": "Memory_Bound;Memory_BW",
"MetricName": "MLP"
},
{
"BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
"MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION ) / ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles",
"MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION ) / (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
"MetricGroup": "TLB",
"MetricName": "Page_Walks_Utilization"
},

View File

@ -13,7 +13,7 @@
},
{
"BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely consumed by program instructions",
"MetricExpr": "min( 1 , IDQ.MITE_UOPS / ( UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY * 16 * ( ICACHE.HIT + ICACHE.MISSES ) / 4.0 ) )",
"MetricExpr": "min( 1 , IDQ.MITE_UOPS / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 16 * ( ICACHE.HIT + ICACHE.MISSES ) / 4.0 ) )",
"MetricGroup": "Frontend",
"MetricName": "IFetch_Line_Utilization"
},
@ -25,7 +25,7 @@
},
{
"BriefDescription": "Cycles Per Instruction (threaded)",
"MetricExpr": "1 / INST_RETIRED.ANY / cycles",
"MetricExpr": "1 / (INST_RETIRED.ANY / cycles)",
"MetricGroup": "Pipeline;Summary",
"MetricName": "CPI"
},
@ -37,7 +37,7 @@
},
{
"BriefDescription": "Total issue-pipeline slots",
"MetricExpr": "4*( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles",
"MetricExpr": "4*(( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
"MetricGroup": "TopDownL1",
"MetricName": "SLOTS"
},
@ -49,19 +49,19 @@
},
{
"BriefDescription": "Instructions Per Cycle (per physical core)",
"MetricExpr": "INST_RETIRED.ANY / ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles",
"MetricExpr": "INST_RETIRED.ANY / (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
"MetricGroup": "SMT",
"MetricName": "CoreIPC"
},
{
"BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)",
"MetricExpr": "( UOPS_EXECUTED.CORE / 2 / ( cpu@uops_executed.core\\,cmask\\=1@ / 2)) if #SMT_on else UOPS_EXECUTED.CORE / cpu@uops_executed.core\\,cmask\\=1@",
"MetricExpr": "( UOPS_EXECUTED.CORE / 2 / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2) if #SMT_on else cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@) ) if #SMT_on else UOPS_EXECUTED.CORE / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2) if #SMT_on else cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@)",
"MetricGroup": "Pipeline;Ports_Utilization",
"MetricName": "ILP"
},
{
"BriefDescription": "Average Branch Address Clear Cost (fraction of cycles)",
"MetricExpr": "2* ( RS_EVENTS.EMPTY_CYCLES - ICACHE.IFDATA_STALL - ( 14 * ITLB_MISSES.STLB_HIT + ITLB_MISSES.WALK_DURATION ) ) / RS_EVENTS.EMPTY_END",
"MetricExpr": "2* (( RS_EVENTS.EMPTY_CYCLES - ICACHE.IFDATA_STALL - (( 14 * ITLB_MISSES.STLB_HIT + ITLB_MISSES.WALK_DURATION )) ) / RS_EVENTS.EMPTY_END)",
"MetricGroup": "Unknown_Branches",
"MetricName": "BAClear_Cost"
},
@ -79,13 +79,13 @@
},
{
"BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least 1 such miss)",
"MetricExpr": "L1D_PEND_MISS.PENDING / ( cpu@l1d_pend_miss.pending_cycles\\,any\\=1@ / 2) if #SMT_on else L1D_PEND_MISS.PENDING_CYCLES",
"MetricExpr": "L1D_PEND_MISS.PENDING / (( cpu@l1d_pend_miss.pending_cycles\\,any\\=1@ / 2) if #SMT_on else L1D_PEND_MISS.PENDING_CYCLES)",
"MetricGroup": "Memory_Bound;Memory_BW",
"MetricName": "MLP"
},
{
"BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
"MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION ) / ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles",
"MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION ) / (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
"MetricGroup": "TLB",
"MetricName": "Page_Walks_Utilization"
},

View File

@ -13,7 +13,7 @@
},
{
"BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely consumed by program instructions",
"MetricExpr": "min( 1 , UOPS_ISSUED.ANY / ( UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY * 32 * ( ICACHE.HIT + ICACHE.MISSES ) / 4) )",
"MetricExpr": "min( 1 , UOPS_ISSUED.ANY / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 32 * ( ICACHE.HIT + ICACHE.MISSES ) / 4) )",
"MetricGroup": "Frontend",
"MetricName": "IFetch_Line_Utilization"
},
@ -25,7 +25,7 @@
},
{
"BriefDescription": "Cycles Per Instruction (threaded)",
"MetricExpr": "1 / INST_RETIRED.ANY / cycles",
"MetricExpr": "1 / (INST_RETIRED.ANY / cycles)",
"MetricGroup": "Pipeline;Summary",
"MetricName": "CPI"
},
@ -37,7 +37,7 @@
},
{
"BriefDescription": "Total issue-pipeline slots",
"MetricExpr": "4*( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles",
"MetricExpr": "4*(( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
"MetricGroup": "TopDownL1",
"MetricName": "SLOTS"
},
@ -49,19 +49,19 @@
},
{
"BriefDescription": "Instructions Per Cycle (per physical core)",
"MetricExpr": "INST_RETIRED.ANY / ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles",
"MetricExpr": "INST_RETIRED.ANY / (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
"MetricGroup": "SMT",
"MetricName": "CoreIPC"
},
{
"BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)",
"MetricExpr": "UOPS_EXECUTED.THREAD / ( cpu@uops_executed.core\\,cmask\\=1@ / 2) if #SMT_on else UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC",
"MetricExpr": "UOPS_EXECUTED.THREAD / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2) if #SMT_on else UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC)",
"MetricGroup": "Pipeline;Ports_Utilization",
"MetricName": "ILP"
},
{
"BriefDescription": "Average Branch Address Clear Cost (fraction of cycles)",
"MetricExpr": "2* ( RS_EVENTS.EMPTY_CYCLES - ICACHE.IFETCH_STALL ) / RS_EVENTS.EMPTY_END",
"MetricExpr": "2* (( RS_EVENTS.EMPTY_CYCLES - ICACHE.IFETCH_STALL ) / RS_EVENTS.EMPTY_END)",
"MetricGroup": "Unknown_Branches",
"MetricName": "BAClear_Cost"
},
@ -79,13 +79,13 @@
},
{
"BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least 1 such miss)",
"MetricExpr": "L1D_PEND_MISS.PENDING / ( cpu@l1d_pend_miss.pending_cycles\\,any\\=1@ / 2) if #SMT_on else L1D_PEND_MISS.PENDING_CYCLES",
"MetricExpr": "L1D_PEND_MISS.PENDING / (( cpu@l1d_pend_miss.pending_cycles\\,any\\=1@ / 2) if #SMT_on else L1D_PEND_MISS.PENDING_CYCLES)",
"MetricGroup": "Memory_Bound;Memory_BW",
"MetricName": "MLP"
},
{
"BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
"MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION ) / ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles",
"MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION ) / (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
"MetricGroup": "TLB",
"MetricName": "Page_Walks_Utilization"
},
@ -97,7 +97,7 @@
},
{
"BriefDescription": "Giga Floating Point Operations Per Second",
"MetricExpr": "( 1*( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2* FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4*( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8* SIMD_FP_256.PACKED_SINGLE ) / 1000000000 / duration_time",
"MetricExpr": "(( 1*( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2* FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4*( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8* SIMD_FP_256.PACKED_SINGLE )) / 1000000000 / duration_time",
"MetricGroup": "FLOPS;Summary",
"MetricName": "GFLOPs"
},

View File

@ -13,7 +13,7 @@
},
{
"BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely consumed by program instructions",
"MetricExpr": "min( 1 , UOPS_ISSUED.ANY / ( UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY * 32 * ( ICACHE.HIT + ICACHE.MISSES ) / 4) )",
"MetricExpr": "min( 1 , UOPS_ISSUED.ANY / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 32 * ( ICACHE.HIT + ICACHE.MISSES ) / 4) )",
"MetricGroup": "Frontend",
"MetricName": "IFetch_Line_Utilization"
},
@ -25,7 +25,7 @@
},
{
"BriefDescription": "Cycles Per Instruction (threaded)",
"MetricExpr": "1 / INST_RETIRED.ANY / cycles",
"MetricExpr": "1 / (INST_RETIRED.ANY / cycles)",
"MetricGroup": "Pipeline;Summary",
"MetricName": "CPI"
},
@ -37,7 +37,7 @@
},
{
"BriefDescription": "Total issue-pipeline slots",
"MetricExpr": "4*( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles",
"MetricExpr": "4*(( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
"MetricGroup": "TopDownL1",
"MetricName": "SLOTS"
},
@ -49,19 +49,19 @@
},
{
"BriefDescription": "Instructions Per Cycle (per physical core)",
"MetricExpr": "INST_RETIRED.ANY / ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles",
"MetricExpr": "INST_RETIRED.ANY / (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
"MetricGroup": "SMT",
"MetricName": "CoreIPC"
},
{
"BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)",
"MetricExpr": "UOPS_EXECUTED.THREAD / ( cpu@uops_executed.core\\,cmask\\=1@ / 2) if #SMT_on else UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC",
"MetricExpr": "UOPS_EXECUTED.THREAD / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2) if #SMT_on else UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC)",
"MetricGroup": "Pipeline;Ports_Utilization",
"MetricName": "ILP"
},
{
"BriefDescription": "Average Branch Address Clear Cost (fraction of cycles)",
"MetricExpr": "2* ( RS_EVENTS.EMPTY_CYCLES - ICACHE.IFETCH_STALL ) / RS_EVENTS.EMPTY_END",
"MetricExpr": "2* (( RS_EVENTS.EMPTY_CYCLES - ICACHE.IFETCH_STALL ) / RS_EVENTS.EMPTY_END)",
"MetricGroup": "Unknown_Branches",
"MetricName": "BAClear_Cost"
},
@ -79,13 +79,13 @@
},
{
"BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least 1 such miss)",
"MetricExpr": "L1D_PEND_MISS.PENDING / ( cpu@l1d_pend_miss.pending_cycles\\,any\\=1@ / 2) if #SMT_on else L1D_PEND_MISS.PENDING_CYCLES",
"MetricExpr": "L1D_PEND_MISS.PENDING / (( cpu@l1d_pend_miss.pending_cycles\\,any\\=1@ / 2) if #SMT_on else L1D_PEND_MISS.PENDING_CYCLES)",
"MetricGroup": "Memory_Bound;Memory_BW",
"MetricName": "MLP"
},
{
"BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
"MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION ) / ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles",
"MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION ) / (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
"MetricGroup": "TLB",
"MetricName": "Page_Walks_Utilization"
},
@ -97,7 +97,7 @@
},
{
"BriefDescription": "Giga Floating Point Operations Per Second",
"MetricExpr": "( 1*( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2* FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4*( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8* SIMD_FP_256.PACKED_SINGLE ) / 1000000000 / duration_time",
"MetricExpr": "(( 1*( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2* FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4*( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8* SIMD_FP_256.PACKED_SINGLE )) / 1000000000 / duration_time",
"MetricGroup": "FLOPS;Summary",
"MetricName": "GFLOPs"
},

View File

@ -13,7 +13,7 @@
},
{
"BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely consumed by program instructions",
"MetricExpr": "min( 1 , UOPS_ISSUED.ANY / ( UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY * 32 * ( ICACHE.HIT + ICACHE.MISSES ) / 4) )",
"MetricExpr": "min( 1 , UOPS_ISSUED.ANY / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 32 * ( ICACHE.HIT + ICACHE.MISSES ) / 4) )",
"MetricGroup": "Frontend",
"MetricName": "IFetch_Line_Utilization"
},
@ -25,7 +25,7 @@
},
{
"BriefDescription": "Cycles Per Instruction (threaded)",
"MetricExpr": "1 / INST_RETIRED.ANY / cycles",
"MetricExpr": "1 / (INST_RETIRED.ANY / cycles)",
"MetricGroup": "Pipeline;Summary",
"MetricName": "CPI"
},
@ -37,7 +37,7 @@
},
{
"BriefDescription": "Total issue-pipeline slots",
"MetricExpr": "4*( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles",
"MetricExpr": "4*(( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
"MetricGroup": "TopDownL1",
"MetricName": "SLOTS"
},
@ -49,13 +49,13 @@
},
{
"BriefDescription": "Instructions Per Cycle (per physical core)",
"MetricExpr": "INST_RETIRED.ANY / ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles",
"MetricExpr": "INST_RETIRED.ANY / (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
"MetricGroup": "SMT",
"MetricName": "CoreIPC"
},
{
"BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)",
"MetricExpr": "UOPS_DISPATCHED.THREAD / ( cpu@UOPS_DISPATCHED.CORE\\,cmask\\=1@ / 2) if #SMT_on else cpu@UOPS_DISPATCHED.CORE\\,cmask\\=1@",
"MetricExpr": "UOPS_DISPATCHED.THREAD / (( cpu@UOPS_DISPATCHED.CORE\\,cmask\\=1@ / 2) if #SMT_on else cpu@UOPS_DISPATCHED.CORE\\,cmask\\=1@)",
"MetricGroup": "Pipeline;Ports_Utilization",
"MetricName": "ILP"
},
@ -73,7 +73,7 @@
},
{
"BriefDescription": "Giga Floating Point Operations Per Second",
"MetricExpr": "( 1*( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2* FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4*( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8* SIMD_FP_256.PACKED_SINGLE ) / 1000000000 / duration_time",
"MetricExpr": "(( 1*( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2* FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4*( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8* SIMD_FP_256.PACKED_SINGLE )) / 1000000000 / duration_time",
"MetricGroup": "FLOPS;Summary",
"MetricName": "GFLOPs"
},

View File

@ -9,6 +9,7 @@ GenuineIntel-6-27,v4,bonnell,core
GenuineIntel-6-36,v4,bonnell,core
GenuineIntel-6-35,v4,bonnell,core
GenuineIntel-6-5C,v8,goldmont,core
GenuineIntel-6-7A,v1,goldmontplus,core
GenuineIntel-6-3C,v24,haswell,core
GenuineIntel-6-45,v24,haswell,core
GenuineIntel-6-46,v24,haswell,core

1 Family-model Version Filename EventType
9 GenuineIntel-6-36 v4 bonnell core
10 GenuineIntel-6-35 v4 bonnell core
11 GenuineIntel-6-5C v8 goldmont core
12 GenuineIntel-6-7A v1 goldmontplus core
13 GenuineIntel-6-3C v24 haswell core
14 GenuineIntel-6-45 v24 haswell core
15 GenuineIntel-6-46 v24 haswell core

View File

@ -13,7 +13,7 @@
},
{
"BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely consumed by program instructions",
"MetricExpr": "min( 1 , UOPS_ISSUED.ANY / ( UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY * 32 * ( ICACHE.HIT + ICACHE.MISSES ) / 4) )",
"MetricExpr": "min( 1 , UOPS_ISSUED.ANY / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 32 * ( ICACHE.HIT + ICACHE.MISSES ) / 4) )",
"MetricGroup": "Frontend",
"MetricName": "IFetch_Line_Utilization"
},
@ -25,7 +25,7 @@
},
{
"BriefDescription": "Cycles Per Instruction (threaded)",
"MetricExpr": "1 / INST_RETIRED.ANY / cycles",
"MetricExpr": "1 / (INST_RETIRED.ANY / cycles)",
"MetricGroup": "Pipeline;Summary",
"MetricName": "CPI"
},
@ -37,7 +37,7 @@
},
{
"BriefDescription": "Total issue-pipeline slots",
"MetricExpr": "4*( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles",
"MetricExpr": "4*(( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
"MetricGroup": "TopDownL1",
"MetricName": "SLOTS"
},
@ -49,13 +49,13 @@
},
{
"BriefDescription": "Instructions Per Cycle (per physical core)",
"MetricExpr": "INST_RETIRED.ANY / ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles",
"MetricExpr": "INST_RETIRED.ANY / (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
"MetricGroup": "SMT",
"MetricName": "CoreIPC"
},
{
"BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)",
"MetricExpr": "UOPS_DISPATCHED.THREAD / ( cpu@UOPS_DISPATCHED.CORE\\,cmask\\=1@ / 2) if #SMT_on else cpu@UOPS_DISPATCHED.CORE\\,cmask\\=1@",
"MetricExpr": "UOPS_DISPATCHED.THREAD / (( cpu@UOPS_DISPATCHED.CORE\\,cmask\\=1@ / 2) if #SMT_on else cpu@UOPS_DISPATCHED.CORE\\,cmask\\=1@)",
"MetricGroup": "Pipeline;Ports_Utilization",
"MetricName": "ILP"
},
@ -73,7 +73,7 @@
},
{
"BriefDescription": "Giga Floating Point Operations Per Second",
"MetricExpr": "( 1*( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2* FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4*( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8* SIMD_FP_256.PACKED_SINGLE ) / 1000000000 / duration_time",
"MetricExpr": "(( 1*( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2* FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4*( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8* SIMD_FP_256.PACKED_SINGLE )) / 1000000000 / duration_time",
"MetricGroup": "FLOPS;Summary",
"MetricName": "GFLOPs"
},

View File

@ -13,7 +13,7 @@
},
{
"BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely consumed by program instructions",
"MetricExpr": "min( 1 , UOPS_ISSUED.ANY / (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY * 64 * ( ICACHE_64B.IFTAG_HIT + ICACHE_64B.IFTAG_MISS ) / 4.1) )",
"MetricExpr": "min( 1 , UOPS_ISSUED.ANY / ((UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 64 * ( ICACHE_64B.IFTAG_HIT + ICACHE_64B.IFTAG_MISS ) / 4.1) )",
"MetricGroup": "Frontend",
"MetricName": "IFetch_Line_Utilization"
},
@ -25,7 +25,7 @@
},
{
"BriefDescription": "Cycles Per Instruction (threaded)",
"MetricExpr": "1 / INST_RETIRED.ANY / cycles",
"MetricExpr": "1 / (INST_RETIRED.ANY / cycles)",
"MetricGroup": "Pipeline;Summary",
"MetricName": "CPI"
},
@ -37,7 +37,7 @@
},
{
"BriefDescription": "Total issue-pipeline slots",
"MetricExpr": "4*( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles",
"MetricExpr": "4*(( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
"MetricGroup": "TopDownL1",
"MetricName": "SLOTS"
},
@ -49,19 +49,19 @@
},
{
"BriefDescription": "Instructions Per Cycle (per physical core)",
"MetricExpr": "INST_RETIRED.ANY / ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles",
"MetricExpr": "INST_RETIRED.ANY / (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
"MetricGroup": "SMT",
"MetricName": "CoreIPC"
},
{
"BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)",
"MetricExpr": "UOPS_EXECUTED.THREAD / ( UOPS_EXECUTED.CORE_CYCLES_GE_1 / 2) if #SMT_on else UOPS_EXECUTED.CORE_CYCLES_GE_1",
"MetricExpr": "UOPS_EXECUTED.THREAD / (( UOPS_EXECUTED.CORE_CYCLES_GE_1 / 2) if #SMT_on else UOPS_EXECUTED.CORE_CYCLES_GE_1)",
"MetricGroup": "Pipeline;Ports_Utilization",
"MetricName": "ILP"
},
{
"BriefDescription": "Average Branch Address Clear Cost (fraction of cycles)",
"MetricExpr": "2* ( RS_EVENTS.EMPTY_CYCLES - ICACHE_16B.IFDATA_STALL - ICACHE_64B.IFTAG_STALL ) / RS_EVENTS.EMPTY_END",
"MetricExpr": "2* (( RS_EVENTS.EMPTY_CYCLES - ICACHE_16B.IFDATA_STALL - ICACHE_64B.IFTAG_STALL ) / RS_EVENTS.EMPTY_END)",
"MetricGroup": "Unknown_Branches",
"MetricName": "BAClear_Cost"
},
@ -73,19 +73,19 @@
},
{
"BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads",
"MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_RETIRED.L1_MISS + MEM_LOAD_RETIRED.FB_HIT )",
"MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_RETIRED.L1_MISS_PS + MEM_LOAD_RETIRED.FB_HIT_PS )",
"MetricGroup": "Memory_Bound;Memory_Lat",
"MetricName": "Load_Miss_Real_Latency"
},
{
"BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least 1 such miss)",
"MetricExpr": "L1D_PEND_MISS.PENDING / ( L1D_PEND_MISS.PENDING_CYCLES_ANY / 2) if #SMT_on else L1D_PEND_MISS.PENDING_CYCLES",
"MetricExpr": "L1D_PEND_MISS.PENDING / (( L1D_PEND_MISS.PENDING_CYCLES_ANY / 2) if #SMT_on else L1D_PEND_MISS.PENDING_CYCLES)",
"MetricGroup": "Memory_Bound;Memory_BW",
"MetricName": "MLP"
},
{
"BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
"MetricExpr": "( ITLB_MISSES.WALK_PENDING + DTLB_LOAD_MISSES.WALK_PENDING + DTLB_STORE_MISSES.WALK_PENDING + EPT.WALK_PENDING ) / ( 2 * ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles )",
"MetricExpr": "( ITLB_MISSES.WALK_PENDING + DTLB_LOAD_MISSES.WALK_PENDING + DTLB_STORE_MISSES.WALK_PENDING + EPT.WALK_PENDING ) / ( 2 * (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles) )",
"MetricGroup": "TLB",
"MetricName": "Page_Walks_Utilization"
},
@ -97,7 +97,7 @@
},
{
"BriefDescription": "Giga Floating Point Operations Per Second",
"MetricExpr": "( 1*( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2* FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4*( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8* FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE ) / 1000000000 / duration_time",
"MetricExpr": "(( 1*( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2* FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4*( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8* FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE )) / 1000000000 / duration_time",
"MetricGroup": "FLOPS;Summary",
"MetricName": "GFLOPs"
},

View File

@ -13,19 +13,19 @@
},
{
"BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely consumed by program instructions",
"MetricExpr": "min( 1 , UOPS_ISSUED.ANY / (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY * 64 * ( ICACHE_64B.IFTAG_HIT + ICACHE_64B.IFTAG_MISS ) / 4.1) )",
"MetricExpr": "min( 1 , UOPS_ISSUED.ANY / ((UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 64 * ( ICACHE_64B.IFTAG_HIT + ICACHE_64B.IFTAG_MISS ) / 4.1) )",
"MetricGroup": "Frontend",
"MetricName": "IFetch_Line_Utilization"
},
{
"BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache)",
"BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded Icache; or Uop Cache)",
"MetricExpr": "IDQ.DSB_UOPS / ( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS )",
"MetricGroup": "DSB; Frontend_Bandwidth",
"MetricName": "DSB_Coverage"
},
{
"BriefDescription": "Cycles Per Instruction (threaded)",
"MetricExpr": "1 / INST_RETIRED.ANY / cycles",
"MetricExpr": "1 / (INST_RETIRED.ANY / cycles)",
"MetricGroup": "Pipeline;Summary",
"MetricName": "CPI"
},
@ -36,8 +36,8 @@
"MetricName": "CLKS"
},
{
"BriefDescription": "Total issue-pipeline slots (per-core)",
"MetricExpr": "4*cycles if not #SMT_on else (( CPU_CLK_UNHALTED.THREAD / 2) * (CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK )) if #EBS_Mode else ( CPU_CLK_UNHALTED.THREAD_ANY / 2 )",
"BriefDescription": "Total issue-pipeline slots",
"MetricExpr": "4*(( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
"MetricGroup": "TopDownL1",
"MetricName": "SLOTS"
},
@ -49,25 +49,25 @@
},
{
"BriefDescription": "Instructions Per Cycle (per physical core)",
"MetricExpr": "INST_RETIRED.ANY / cycles if not #SMT_on else (( CPU_CLK_UNHALTED.THREAD / 2) * (CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK )) if #EBS_Mode else ( CPU_CLK_UNHALTED.THREAD_ANY / 2 )",
"MetricExpr": "INST_RETIRED.ANY / (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
"MetricGroup": "SMT",
"MetricName": "CoreIPC"
},
{
"BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)",
"MetricExpr": "UOPS_EXECUTED.THREAD / ( UOPS_EXECUTED.CORE_CYCLES_GE_1 / 2) if #SMT_on else UOPS_EXECUTED.CORE_CYCLES_GE_1",
"MetricExpr": "UOPS_EXECUTED.THREAD / (( UOPS_EXECUTED.CORE_CYCLES_GE_1 / 2) if #SMT_on else UOPS_EXECUTED.CORE_CYCLES_GE_1)",
"MetricGroup": "Pipeline;Ports_Utilization",
"MetricName": "ILP"
},
{
"BriefDescription": "Average Branch Address Clear Cost (fraction of cycles)",
"MetricExpr": "( RS_EVENTS.EMPTY_CYCLES - (ICACHE_16B.IFDATA_STALL +2* ICACHE_16B.IFDATA_STALL:c1:e1) - ICACHE_64B.IFTAG_STALL ) / RS_EVENTS.EMPTY_END",
"MetricExpr": "2* (( RS_EVENTS.EMPTY_CYCLES - ICACHE_16B.IFDATA_STALL - ICACHE_64B.IFTAG_STALL ) / RS_EVENTS.EMPTY_END)",
"MetricGroup": "Unknown_Branches",
"MetricName": "BAClear_Cost"
},
{
"BriefDescription": "Core actual clocks when any thread is active on the physical core",
"MetricExpr": "CPU_CLK_UNHALTED.THREAD if not #SMT_on else (( CPU_CLK_UNHALTED.THREAD / 2) * (CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK )) if 1 else ( CPU_CLK_UNHALTED.THREAD_ANY / 2 )",
"MetricExpr": "( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else CPU_CLK_UNHALTED.THREAD",
"MetricGroup": "SMT",
"MetricName": "CORE_CLKS"
},
@ -79,34 +79,16 @@
},
{
"BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least 1 such miss)",
"MetricExpr": "L1D_PEND_MISS.PENDING / ( L1D_PEND_MISS.PENDING_CYCLES_ANY / 2) if #SMT_on else L1D_PEND_MISS.PENDING_CYCLES",
"MetricExpr": "L1D_PEND_MISS.PENDING / (( L1D_PEND_MISS.PENDING_CYCLES_ANY / 2) if #SMT_on else L1D_PEND_MISS.PENDING_CYCLES)",
"MetricGroup": "Memory_Bound;Memory_BW",
"MetricName": "MLP"
},
{
"BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
"MetricExpr": "( ITLB_MISSES.WALK_PENDING + DTLB_LOAD_MISSES.WALK_PENDING + DTLB_STORE_MISSES.WALK_PENDING + EPT.WALK_PENDING ) / ( 2 * cycles if not #SMT_on else (( CPU_CLK_UNHALTED.THREAD / 2) * (CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK )) if #EBS_Mode else ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) )",
"MetricExpr": "( ITLB_MISSES.WALK_PENDING + DTLB_LOAD_MISSES.WALK_PENDING + DTLB_STORE_MISSES.WALK_PENDING + EPT.WALK_PENDING ) / ( 2 * (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles) )",
"MetricGroup": "TLB",
"MetricName": "Page_Walks_Utilization"
},
{
"BriefDescription": "L1 cache miss per kilo instruction for demand loads",
"MetricExpr": "1000 * MEM_LOAD_RETIRED.L1_MISS_PS / INST_RETIRED.ANY",
"MetricGroup": "Cache_Misses;",
"MetricName": "L1MPKI"
},
{
"BriefDescription": "L2 cache miss per kilo instruction for demand loads",
"MetricExpr": "1000 * MEM_LOAD_RETIRED.L2_MISS_PS / INST_RETIRED.ANY",
"MetricGroup": "Cache_Misses;",
"MetricName": "L2MPKI"
},
{
"BriefDescription": "L3 cache miss per kilo instruction for demand loads",
"MetricExpr": "1000 * MEM_LOAD_RETIRED.L3_MISS_PS / INST_RETIRED.ANY",
"MetricGroup": "Cache_Misses;",
"MetricName": "L3MPKI"
},
{
"BriefDescription": "Average CPU Utilization",
"MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
@ -115,7 +97,7 @@
},
{
"BriefDescription": "Giga Floating Point Operations Per Second",
"MetricExpr": "( 1*( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2* FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4*( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8* (FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE ) + 16* FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE) / 1000000000 / duration_time",
"MetricExpr": "(( 1*( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2* FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4*( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8* FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE )) / 1000000000 / duration_time",
"MetricGroup": "FLOPS;Summary",
"MetricName": "GFLOPs"
},

View File

@ -7,3 +7,4 @@ ret = 1
# events are disabled by default when attached to cpu
disabled=1
enable_on_exec=0
optional=1

View File

@ -4,3 +4,4 @@ args = -e cycles kill >/dev/null 2>&1
ret = 1
[event:base-stat]
optional=1

View File

@ -32,6 +32,7 @@ config=2
fd=5
type=0
config=0
optional=1
# PERF_TYPE_HARDWARE / PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
[event6:base-stat]
@ -52,15 +53,18 @@ optional=1
fd=8
type=0
config=1
optional=1
# PERF_TYPE_HARDWARE / PERF_COUNT_HW_BRANCH_INSTRUCTIONS
[event9:base-stat]
fd=9
type=0
config=4
optional=1
# PERF_TYPE_HARDWARE / PERF_COUNT_HW_BRANCH_MISSES
[event10:base-stat]
fd=10
type=0
config=5
optional=1

View File

@ -33,6 +33,7 @@ config=2
fd=5
type=0
config=0
optional=1
# PERF_TYPE_HARDWARE / PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
[event6:base-stat]
@ -53,18 +54,21 @@ optional=1
fd=8
type=0
config=1
optional=1
# PERF_TYPE_HARDWARE / PERF_COUNT_HW_BRANCH_INSTRUCTIONS
[event9:base-stat]
fd=9
type=0
config=4
optional=1
# PERF_TYPE_HARDWARE / PERF_COUNT_HW_BRANCH_MISSES
[event10:base-stat]
fd=10
type=0
config=5
optional=1
# PERF_TYPE_HW_CACHE /
# PERF_COUNT_HW_CACHE_L1D << 0 |
@ -74,6 +78,7 @@ config=5
fd=11
type=3
config=0
optional=1
# PERF_TYPE_HW_CACHE /
# PERF_COUNT_HW_CACHE_L1D << 0 |
@ -83,6 +88,7 @@ config=0
fd=12
type=3
config=65536
optional=1
# PERF_TYPE_HW_CACHE /
# PERF_COUNT_HW_CACHE_LL << 0 |
@ -92,6 +98,7 @@ config=65536
fd=13
type=3
config=2
optional=1
# PERF_TYPE_HW_CACHE,
# PERF_COUNT_HW_CACHE_LL << 0 |
@ -101,3 +108,4 @@ config=2
fd=14
type=3
config=65538
optional=1

View File

@ -33,6 +33,7 @@ config=2
fd=5
type=0
config=0
optional=1
# PERF_TYPE_HARDWARE / PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
[event6:base-stat]
@ -53,18 +54,21 @@ optional=1
fd=8
type=0
config=1
optional=1
# PERF_TYPE_HARDWARE / PERF_COUNT_HW_BRANCH_INSTRUCTIONS
[event9:base-stat]
fd=9
type=0
config=4
optional=1
# PERF_TYPE_HARDWARE / PERF_COUNT_HW_BRANCH_MISSES
[event10:base-stat]
fd=10
type=0
config=5
optional=1
# PERF_TYPE_HW_CACHE /
# PERF_COUNT_HW_CACHE_L1D << 0 |
@ -74,6 +78,7 @@ config=5
fd=11
type=3
config=0
optional=1
# PERF_TYPE_HW_CACHE /
# PERF_COUNT_HW_CACHE_L1D << 0 |
@ -83,6 +88,7 @@ config=0
fd=12
type=3
config=65536
optional=1
# PERF_TYPE_HW_CACHE /
# PERF_COUNT_HW_CACHE_LL << 0 |
@ -92,6 +98,7 @@ config=65536
fd=13
type=3
config=2
optional=1
# PERF_TYPE_HW_CACHE,
# PERF_COUNT_HW_CACHE_LL << 0 |
@ -101,6 +108,7 @@ config=2
fd=14
type=3
config=65538
optional=1
# PERF_TYPE_HW_CACHE,
# PERF_COUNT_HW_CACHE_L1I << 0 |
@ -120,6 +128,7 @@ optional=1
fd=16
type=3
config=65537
optional=1
# PERF_TYPE_HW_CACHE,
# PERF_COUNT_HW_CACHE_DTLB << 0 |
@ -129,6 +138,7 @@ config=65537
fd=17
type=3
config=3
optional=1
# PERF_TYPE_HW_CACHE,
# PERF_COUNT_HW_CACHE_DTLB << 0 |
@ -138,6 +148,7 @@ config=3
fd=18
type=3
config=65539
optional=1
# PERF_TYPE_HW_CACHE,
# PERF_COUNT_HW_CACHE_ITLB << 0 |
@ -147,6 +158,7 @@ config=65539
fd=19
type=3
config=4
optional=1
# PERF_TYPE_HW_CACHE,
# PERF_COUNT_HW_CACHE_ITLB << 0 |
@ -156,3 +168,4 @@ config=4
fd=20
type=3
config=65540
optional=1

View File

@ -33,6 +33,7 @@ config=2
fd=5
type=0
config=0
optional=1
# PERF_TYPE_HARDWARE / PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
[event6:base-stat]
@ -53,18 +54,21 @@ optional=1
fd=8
type=0
config=1
optional=1
# PERF_TYPE_HARDWARE / PERF_COUNT_HW_BRANCH_INSTRUCTIONS
[event9:base-stat]
fd=9
type=0
config=4
optional=1
# PERF_TYPE_HARDWARE / PERF_COUNT_HW_BRANCH_MISSES
[event10:base-stat]
fd=10
type=0
config=5
optional=1
# PERF_TYPE_HW_CACHE /
# PERF_COUNT_HW_CACHE_L1D << 0 |
@ -74,6 +78,7 @@ config=5
fd=11
type=3
config=0
optional=1
# PERF_TYPE_HW_CACHE /
# PERF_COUNT_HW_CACHE_L1D << 0 |
@ -83,6 +88,7 @@ config=0
fd=12
type=3
config=65536
optional=1
# PERF_TYPE_HW_CACHE /
# PERF_COUNT_HW_CACHE_LL << 0 |
@ -92,6 +98,7 @@ config=65536
fd=13
type=3
config=2
optional=1
# PERF_TYPE_HW_CACHE,
# PERF_COUNT_HW_CACHE_LL << 0 |
@ -101,6 +108,7 @@ config=2
fd=14
type=3
config=65538
optional=1
# PERF_TYPE_HW_CACHE,
# PERF_COUNT_HW_CACHE_L1I << 0 |
@ -120,6 +128,7 @@ optional=1
fd=16
type=3
config=65537
optional=1
# PERF_TYPE_HW_CACHE,
# PERF_COUNT_HW_CACHE_DTLB << 0 |
@ -129,6 +138,7 @@ config=65537
fd=17
type=3
config=3
optional=1
# PERF_TYPE_HW_CACHE,
# PERF_COUNT_HW_CACHE_DTLB << 0 |
@ -138,6 +148,7 @@ config=3
fd=18
type=3
config=65539
optional=1
# PERF_TYPE_HW_CACHE,
# PERF_COUNT_HW_CACHE_ITLB << 0 |
@ -147,6 +158,7 @@ config=65539
fd=19
type=3
config=4
optional=1
# PERF_TYPE_HW_CACHE,
# PERF_COUNT_HW_CACHE_ITLB << 0 |
@ -156,6 +168,7 @@ config=4
fd=20
type=3
config=65540
optional=1
# PERF_TYPE_HW_CACHE,
# PERF_COUNT_HW_CACHE_L1D << 0 |

View File

@ -5,3 +5,4 @@ ret = 1
[event:base-stat]
inherit=0
optional=1

View File

@ -13,6 +13,7 @@ libperf-y += find_bit.o
libperf-y += kallsyms.o
libperf-y += levenshtein.o
libperf-y += llvm-utils.o
libperf-y += mmap.o
libperf-y += memswap.o
libperf-y += parse-events.o
libperf-y += perf_regs.o

View File

@ -49,10 +49,9 @@ struct arch {
void *priv;
unsigned int model;
unsigned int family;
int (*init)(struct arch *arch);
int (*init)(struct arch *arch, char *cpuid);
bool (*ins_is_fused)(struct arch *arch, const char *ins1,
const char *ins2);
int (*cpuid_parse)(struct arch *arch, char *cpuid);
struct {
char comment_char;
char skip_functions_char;
@ -132,10 +131,10 @@ static struct arch architectures[] = {
},
{
.name = "x86",
.init = x86__annotate_init,
.instructions = x86__instructions,
.nr_instructions = ARRAY_SIZE(x86__instructions),
.ins_is_fused = x86__ins_is_fused,
.cpuid_parse = x86__cpuid_parse,
.objdump = {
.comment_char = '#',
},
@ -1447,16 +1446,13 @@ int symbol__disassemble(struct symbol *sym, struct map *map,
*parch = arch;
if (arch->init) {
err = arch->init(arch);
err = arch->init(arch, cpuid);
if (err) {
pr_err("%s: failed to initialize %s arch priv area\n", __func__, arch->name);
return err;
}
}
if (arch->cpuid_parse && cpuid)
arch->cpuid_parse(arch, cpuid);
pr_debug("%s: filename=%s, sym=%s, start=%#" PRIx64 ", end=%#" PRIx64 "\n", __func__,
symfs_filename, sym->name, map->unmap_ip(map, sym->start),
map->unmap_ip(map, sym->end));

View File

@ -111,50 +111,53 @@ int dump_printf(const char *fmt, ...)
return ret;
}
static void trace_event_printer(enum binary_printer_ops op,
unsigned int val, void *extra)
static int trace_event_printer(enum binary_printer_ops op,
unsigned int val, void *extra, FILE *fp)
{
const char *color = PERF_COLOR_BLUE;
union perf_event *event = (union perf_event *)extra;
unsigned char ch = (unsigned char)val;
int printed = 0;
switch (op) {
case BINARY_PRINT_DATA_BEGIN:
printf(".");
color_fprintf(stdout, color, "\n. ... raw event: size %d bytes\n",
event->header.size);
printed += fprintf(fp, ".");
printed += color_fprintf(fp, color, "\n. ... raw event: size %d bytes\n",
event->header.size);
break;
case BINARY_PRINT_LINE_BEGIN:
printf(".");
printed += fprintf(fp, ".");
break;
case BINARY_PRINT_ADDR:
color_fprintf(stdout, color, " %04x: ", val);
printed += color_fprintf(fp, color, " %04x: ", val);
break;
case BINARY_PRINT_NUM_DATA:
color_fprintf(stdout, color, " %02x", val);
printed += color_fprintf(fp, color, " %02x", val);
break;
case BINARY_PRINT_NUM_PAD:
color_fprintf(stdout, color, " ");
printed += color_fprintf(fp, color, " ");
break;
case BINARY_PRINT_SEP:
color_fprintf(stdout, color, " ");
printed += color_fprintf(fp, color, " ");
break;
case BINARY_PRINT_CHAR_DATA:
color_fprintf(stdout, color, "%c",
printed += color_fprintf(fp, color, "%c",
isprint(ch) ? ch : '.');
break;
case BINARY_PRINT_CHAR_PAD:
color_fprintf(stdout, color, " ");
printed += color_fprintf(fp, color, " ");
break;
case BINARY_PRINT_LINE_END:
color_fprintf(stdout, color, "\n");
printed += color_fprintf(fp, color, "\n");
break;
case BINARY_PRINT_DATA_END:
printf("\n");
printed += fprintf(fp, "\n");
break;
default:
break;
}
return printed;
}
void trace_event(union perf_event *event)

View File

@ -33,9 +33,6 @@
#include <linux/log2.h>
#include <linux/err.h>
static void perf_mmap__munmap(struct perf_mmap *map);
static void perf_mmap__put(struct perf_mmap *map);
#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
#define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
@ -704,129 +701,6 @@ static int perf_evlist__resume(struct perf_evlist *evlist)
return perf_evlist__set_paused(evlist, false);
}
/* When check_messup is true, 'end' must points to a good entry */
static union perf_event *
perf_mmap__read(struct perf_mmap *md, bool check_messup, u64 start,
u64 end, u64 *prev)
{
unsigned char *data = md->base + page_size;
union perf_event *event = NULL;
int diff = end - start;
if (check_messup) {
/*
* If we're further behind than half the buffer, there's a chance
* the writer will bite our tail and mess up the samples under us.
*
* If we somehow ended up ahead of the 'end', we got messed up.
*
* In either case, truncate and restart at 'end'.
*/
if (diff > md->mask / 2 || diff < 0) {
fprintf(stderr, "WARNING: failed to keep up with mmap data.\n");
/*
* 'end' points to a known good entry, start there.
*/
start = end;
diff = 0;
}
}
if (diff >= (int)sizeof(event->header)) {
size_t size;
event = (union perf_event *)&data[start & md->mask];
size = event->header.size;
if (size < sizeof(event->header) || diff < (int)size) {
event = NULL;
goto broken_event;
}
/*
* Event straddles the mmap boundary -- header should always
* be inside due to u64 alignment of output.
*/
if ((start & md->mask) + size != ((start + size) & md->mask)) {
unsigned int offset = start;
unsigned int len = min(sizeof(*event), size), cpy;
void *dst = md->event_copy;
do {
cpy = min(md->mask + 1 - (offset & md->mask), len);
memcpy(dst, &data[offset & md->mask], cpy);
offset += cpy;
dst += cpy;
len -= cpy;
} while (len);
event = (union perf_event *) md->event_copy;
}
start += size;
}
broken_event:
if (prev)
*prev = start;
return event;
}
union perf_event *perf_mmap__read_forward(struct perf_mmap *md, bool check_messup)
{
u64 head;
u64 old = md->prev;
/*
* Check if event was unmapped due to a POLLHUP/POLLERR.
*/
if (!refcount_read(&md->refcnt))
return NULL;
head = perf_mmap__read_head(md);
return perf_mmap__read(md, check_messup, old, head, &md->prev);
}
union perf_event *
perf_mmap__read_backward(struct perf_mmap *md)
{
u64 head, end;
u64 start = md->prev;
/*
* Check if event was unmapped due to a POLLHUP/POLLERR.
*/
if (!refcount_read(&md->refcnt))
return NULL;
head = perf_mmap__read_head(md);
if (!head)
return NULL;
/*
* 'head' pointer starts from 0. Kernel minus sizeof(record) form
* it each time when kernel writes to it, so in fact 'head' is
* negative. 'end' pointer is made manually by adding the size of
* the ring buffer to 'head' pointer, means the validate data can
* read is the whole ring buffer. If 'end' is positive, the ring
* buffer has not fully filled, so we must adjust 'end' to 0.
*
* However, since both 'head' and 'end' is unsigned, we can't
* simply compare 'end' against 0. Here we compare '-head' and
* the size of the ring buffer, where -head is the number of bytes
* kernel write to the ring buffer.
*/
if (-head < (u64)(md->mask + 1))
end = 0;
else
end = head + md->mask + 1;
return perf_mmap__read(md, false, start, end, &md->prev);
}
union perf_event *perf_evlist__mmap_read_forward(struct perf_evlist *evlist, int idx)
{
struct perf_mmap *md = &evlist->mmap[idx];
@ -857,96 +731,16 @@ union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
return perf_evlist__mmap_read_forward(evlist, idx);
}
void perf_mmap__read_catchup(struct perf_mmap *md)
{
u64 head;
if (!refcount_read(&md->refcnt))
return;
head = perf_mmap__read_head(md);
md->prev = head;
}
void perf_evlist__mmap_read_catchup(struct perf_evlist *evlist, int idx)
{
perf_mmap__read_catchup(&evlist->mmap[idx]);
}
static bool perf_mmap__empty(struct perf_mmap *md)
{
return perf_mmap__read_head(md) == md->prev && !md->auxtrace_mmap.base;
}
static void perf_mmap__get(struct perf_mmap *map)
{
refcount_inc(&map->refcnt);
}
static void perf_mmap__put(struct perf_mmap *md)
{
BUG_ON(md->base && refcount_read(&md->refcnt) == 0);
if (refcount_dec_and_test(&md->refcnt))
perf_mmap__munmap(md);
}
void perf_mmap__consume(struct perf_mmap *md, bool overwrite)
{
if (!overwrite) {
u64 old = md->prev;
perf_mmap__write_tail(md, old);
}
if (refcount_read(&md->refcnt) == 1 && perf_mmap__empty(md))
perf_mmap__put(md);
}
void perf_evlist__mmap_consume(struct perf_evlist *evlist, int idx)
{
perf_mmap__consume(&evlist->mmap[idx], evlist->overwrite);
}
int __weak auxtrace_mmap__mmap(struct auxtrace_mmap *mm __maybe_unused,
struct auxtrace_mmap_params *mp __maybe_unused,
void *userpg __maybe_unused,
int fd __maybe_unused)
{
return 0;
}
void __weak auxtrace_mmap__munmap(struct auxtrace_mmap *mm __maybe_unused)
{
}
void __weak auxtrace_mmap_params__init(
struct auxtrace_mmap_params *mp __maybe_unused,
off_t auxtrace_offset __maybe_unused,
unsigned int auxtrace_pages __maybe_unused,
bool auxtrace_overwrite __maybe_unused)
{
}
void __weak auxtrace_mmap_params__set_idx(
struct auxtrace_mmap_params *mp __maybe_unused,
struct perf_evlist *evlist __maybe_unused,
int idx __maybe_unused,
bool per_cpu __maybe_unused)
{
}
static void perf_mmap__munmap(struct perf_mmap *map)
{
if (map->base != NULL) {
munmap(map->base, perf_mmap__mmap_len(map));
map->base = NULL;
map->fd = -1;
refcount_set(&map->refcnt, 0);
}
auxtrace_mmap__munmap(&map->auxtrace_mmap);
}
static void perf_evlist__munmap_nofree(struct perf_evlist *evlist)
{
int i;
@ -995,48 +789,6 @@ static struct perf_mmap *perf_evlist__alloc_mmap(struct perf_evlist *evlist)
return map;
}
struct mmap_params {
int prot;
int mask;
struct auxtrace_mmap_params auxtrace_mp;
};
static int perf_mmap__mmap(struct perf_mmap *map,
struct mmap_params *mp, int fd)
{
/*
* The last one will be done at perf_evlist__mmap_consume(), so that we
* make sure we don't prevent tools from consuming every last event in
* the ring buffer.
*
* I.e. we can get the POLLHUP meaning that the fd doesn't exist
* anymore, but the last events for it are still in the ring buffer,
* waiting to be consumed.
*
* Tools can chose to ignore this at their own discretion, but the
* evlist layer can't just drop it when filtering events in
* perf_evlist__filter_pollfd().
*/
refcount_set(&map->refcnt, 2);
map->prev = 0;
map->mask = mp->mask;
map->base = mmap(NULL, perf_mmap__mmap_len(map), mp->prot,
MAP_SHARED, fd, 0);
if (map->base == MAP_FAILED) {
pr_debug2("failed to mmap perf event ring buffer, error %d\n",
errno);
map->base = NULL;
return -1;
}
map->fd = fd;
if (auxtrace_mmap__mmap(&map->auxtrace_mmap,
&mp->auxtrace_mp, map->base, fd))
return -1;
return 0;
}
static bool
perf_evlist__should_poll(struct perf_evlist *evlist __maybe_unused,
struct perf_evsel *evsel)

View File

@ -11,8 +11,8 @@
#include "../perf.h"
#include "event.h"
#include "evsel.h"
#include "mmap.h"
#include "util.h"
#include "auxtrace.h"
#include <signal.h>
#include <unistd.h>
@ -24,55 +24,6 @@ struct record_opts;
#define PERF_EVLIST__HLIST_BITS 8
#define PERF_EVLIST__HLIST_SIZE (1 << PERF_EVLIST__HLIST_BITS)
/**
* struct perf_mmap - perf's ring buffer mmap details
*
* @refcnt - e.g. code using PERF_EVENT_IOC_SET_OUTPUT to share this
*/
struct perf_mmap {
void *base;
int mask;
int fd;
refcount_t refcnt;
u64 prev;
struct auxtrace_mmap auxtrace_mmap;
char event_copy[PERF_SAMPLE_MAX_SIZE] __aligned(8);
};
static inline size_t
perf_mmap__mmap_len(struct perf_mmap *map)
{
return map->mask + 1 + page_size;
}
/*
* State machine of bkw_mmap_state:
*
* .________________(forbid)_____________.
* | V
* NOTREADY --(0)--> RUNNING --(1)--> DATA_PENDING --(2)--> EMPTY
* ^ ^ | ^ |
* | |__(forbid)____/ |___(forbid)___/|
* | |
* \_________________(3)_______________/
*
* NOTREADY : Backward ring buffers are not ready
* RUNNING : Backward ring buffers are recording
* DATA_PENDING : We are required to collect data from backward ring buffers
* EMPTY : We have collected data from backward ring buffers.
*
* (0): Setup backward ring buffer
* (1): Pause ring buffers for reading
* (2): Read from ring buffers
* (3): Resume ring buffers for recording
*/
enum bkw_mmap_state {
BKW_MMAP_NOTREADY,
BKW_MMAP_RUNNING,
BKW_MMAP_DATA_PENDING,
BKW_MMAP_EMPTY,
};
struct perf_evlist {
struct list_head entries;
struct hlist_head heads[PERF_EVLIST__HLIST_SIZE];
@ -177,12 +128,6 @@ struct perf_sample_id *perf_evlist__id2sid(struct perf_evlist *evlist, u64 id);
void perf_evlist__toggle_bkw_mmap(struct perf_evlist *evlist, enum bkw_mmap_state state);
union perf_event *perf_mmap__read_forward(struct perf_mmap *map, bool check_messup);
union perf_event *perf_mmap__read_backward(struct perf_mmap *map);
void perf_mmap__read_catchup(struct perf_mmap *md);
void perf_mmap__consume(struct perf_mmap *md, bool overwrite);
union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx);
union perf_event *perf_evlist__mmap_read_forward(struct perf_evlist *evlist,
@ -286,25 +231,6 @@ size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp);
int perf_evlist__strerror_open(struct perf_evlist *evlist, int err, char *buf, size_t size);
int perf_evlist__strerror_mmap(struct perf_evlist *evlist, int err, char *buf, size_t size);
static inline u64 perf_mmap__read_head(struct perf_mmap *mm)
{
struct perf_event_mmap_page *pc = mm->base;
u64 head = ACCESS_ONCE(pc->data_head);
rmb();
return head;
}
static inline void perf_mmap__write_tail(struct perf_mmap *md, u64 tail)
{
struct perf_event_mmap_page *pc = md->base;
/*
* ensure all reads are done before we write the tail out.
*/
mb();
pc->data_tail = tail;
}
bool perf_evlist__can_select_event(struct perf_evlist *evlist, const char *str);
void perf_evlist__to_front(struct perf_evlist *evlist,
struct perf_evsel *move_evsel);

352
tools/perf/util/mmap.c Normal file
View File

@ -0,0 +1,352 @@
/*
* Copyright (C) 2011-2017, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
*
* Parts came from evlist.c builtin-{top,stat,record}.c, see those files for further
* copyright notes.
*
* Released under the GPL v2. (and only v2, not any later version)
*/
#include <sys/mman.h>
#include <inttypes.h>
#include <asm/bug.h>
#include "debug.h"
#include "event.h"
#include "mmap.h"
#include "util.h" /* page_size */
size_t perf_mmap__mmap_len(struct perf_mmap *map)
{
return map->mask + 1 + page_size;
}
/* When check_messup is true, 'end' must points to a good entry */
static union perf_event *perf_mmap__read(struct perf_mmap *map, bool check_messup,
u64 start, u64 end, u64 *prev)
{
unsigned char *data = map->base + page_size;
union perf_event *event = NULL;
int diff = end - start;
if (check_messup) {
/*
* If we're further behind than half the buffer, there's a chance
* the writer will bite our tail and mess up the samples under us.
*
* If we somehow ended up ahead of the 'end', we got messed up.
*
* In either case, truncate and restart at 'end'.
*/
if (diff > map->mask / 2 || diff < 0) {
fprintf(stderr, "WARNING: failed to keep up with mmap data.\n");
/*
* 'end' points to a known good entry, start there.
*/
start = end;
diff = 0;
}
}
if (diff >= (int)sizeof(event->header)) {
size_t size;
event = (union perf_event *)&data[start & map->mask];
size = event->header.size;
if (size < sizeof(event->header) || diff < (int)size) {
event = NULL;
goto broken_event;
}
/*
* Event straddles the mmap boundary -- header should always
* be inside due to u64 alignment of output.
*/
if ((start & map->mask) + size != ((start + size) & map->mask)) {
unsigned int offset = start;
unsigned int len = min(sizeof(*event), size), cpy;
void *dst = map->event_copy;
do {
cpy = min(map->mask + 1 - (offset & map->mask), len);
memcpy(dst, &data[offset & map->mask], cpy);
offset += cpy;
dst += cpy;
len -= cpy;
} while (len);
event = (union perf_event *)map->event_copy;
}
start += size;
}
broken_event:
if (prev)
*prev = start;
return event;
}
union perf_event *perf_mmap__read_forward(struct perf_mmap *map, bool check_messup)
{
u64 head;
u64 old = map->prev;
/*
* Check if event was unmapped due to a POLLHUP/POLLERR.
*/
if (!refcount_read(&map->refcnt))
return NULL;
head = perf_mmap__read_head(map);
return perf_mmap__read(map, check_messup, old, head, &map->prev);
}
union perf_event *perf_mmap__read_backward(struct perf_mmap *map)
{
u64 head, end;
u64 start = map->prev;
/*
* Check if event was unmapped due to a POLLHUP/POLLERR.
*/
if (!refcount_read(&map->refcnt))
return NULL;
head = perf_mmap__read_head(map);
if (!head)
return NULL;
/*
* 'head' pointer starts from 0. Kernel minus sizeof(record) form
* it each time when kernel writes to it, so in fact 'head' is
* negative. 'end' pointer is made manually by adding the size of
* the ring buffer to 'head' pointer, means the validate data can
* read is the whole ring buffer. If 'end' is positive, the ring
* buffer has not fully filled, so we must adjust 'end' to 0.
*
* However, since both 'head' and 'end' is unsigned, we can't
* simply compare 'end' against 0. Here we compare '-head' and
* the size of the ring buffer, where -head is the number of bytes
* kernel write to the ring buffer.
*/
if (-head < (u64)(map->mask + 1))
end = 0;
else
end = head + map->mask + 1;
return perf_mmap__read(map, false, start, end, &map->prev);
}
void perf_mmap__read_catchup(struct perf_mmap *map)
{
u64 head;
if (!refcount_read(&map->refcnt))
return;
head = perf_mmap__read_head(map);
map->prev = head;
}
static bool perf_mmap__empty(struct perf_mmap *map)
{
return perf_mmap__read_head(map) == map->prev && !map->auxtrace_mmap.base;
}
void perf_mmap__get(struct perf_mmap *map)
{
refcount_inc(&map->refcnt);
}
void perf_mmap__put(struct perf_mmap *map)
{
BUG_ON(map->base && refcount_read(&map->refcnt) == 0);
if (refcount_dec_and_test(&map->refcnt))
perf_mmap__munmap(map);
}
void perf_mmap__consume(struct perf_mmap *map, bool overwrite)
{
if (!overwrite) {
u64 old = map->prev;
perf_mmap__write_tail(map, old);
}
if (refcount_read(&map->refcnt) == 1 && perf_mmap__empty(map))
perf_mmap__put(map);
}
int __weak auxtrace_mmap__mmap(struct auxtrace_mmap *mm __maybe_unused,
struct auxtrace_mmap_params *mp __maybe_unused,
void *userpg __maybe_unused,
int fd __maybe_unused)
{
return 0;
}
void __weak auxtrace_mmap__munmap(struct auxtrace_mmap *mm __maybe_unused)
{
}
void __weak auxtrace_mmap_params__init(struct auxtrace_mmap_params *mp __maybe_unused,
off_t auxtrace_offset __maybe_unused,
unsigned int auxtrace_pages __maybe_unused,
bool auxtrace_overwrite __maybe_unused)
{
}
void __weak auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp __maybe_unused,
struct perf_evlist *evlist __maybe_unused,
int idx __maybe_unused,
bool per_cpu __maybe_unused)
{
}
void perf_mmap__munmap(struct perf_mmap *map)
{
if (map->base != NULL) {
munmap(map->base, perf_mmap__mmap_len(map));
map->base = NULL;
map->fd = -1;
refcount_set(&map->refcnt, 0);
}
auxtrace_mmap__munmap(&map->auxtrace_mmap);
}
int perf_mmap__mmap(struct perf_mmap *map, struct mmap_params *mp, int fd)
{
/*
* The last one will be done at perf_evlist__mmap_consume(), so that we
* make sure we don't prevent tools from consuming every last event in
* the ring buffer.
*
* I.e. we can get the POLLHUP meaning that the fd doesn't exist
* anymore, but the last events for it are still in the ring buffer,
* waiting to be consumed.
*
* Tools can chose to ignore this at their own discretion, but the
* evlist layer can't just drop it when filtering events in
* perf_evlist__filter_pollfd().
*/
refcount_set(&map->refcnt, 2);
map->prev = 0;
map->mask = mp->mask;
map->base = mmap(NULL, perf_mmap__mmap_len(map), mp->prot,
MAP_SHARED, fd, 0);
if (map->base == MAP_FAILED) {
pr_debug2("failed to mmap perf event ring buffer, error %d\n",
errno);
map->base = NULL;
return -1;
}
map->fd = fd;
if (auxtrace_mmap__mmap(&map->auxtrace_mmap,
&mp->auxtrace_mp, map->base, fd))
return -1;
return 0;
}
static int backward_rb_find_range(void *buf, int mask, u64 head, u64 *start, u64 *end)
{
struct perf_event_header *pheader;
u64 evt_head = head;
int size = mask + 1;
pr_debug2("backward_rb_find_range: buf=%p, head=%"PRIx64"\n", buf, head);
pheader = (struct perf_event_header *)(buf + (head & mask));
*start = head;
while (true) {
if (evt_head - head >= (unsigned int)size) {
pr_debug("Finished reading backward ring buffer: rewind\n");
if (evt_head - head > (unsigned int)size)
evt_head -= pheader->size;
*end = evt_head;
return 0;
}
pheader = (struct perf_event_header *)(buf + (evt_head & mask));
if (pheader->size == 0) {
pr_debug("Finished reading backward ring buffer: get start\n");
*end = evt_head;
return 0;
}
evt_head += pheader->size;
pr_debug3("move evt_head: %"PRIx64"\n", evt_head);
}
WARN_ONCE(1, "Shouldn't get here\n");
return -1;
}
static int rb_find_range(void *data, int mask, u64 head, u64 old,
u64 *start, u64 *end, bool backward)
{
if (!backward) {
*start = old;
*end = head;
return 0;
}
return backward_rb_find_range(data, mask, head, start, end);
}
int perf_mmap__push(struct perf_mmap *md, bool overwrite, bool backward,
void *to, int push(void *to, void *buf, size_t size))
{
u64 head = perf_mmap__read_head(md);
u64 old = md->prev;
u64 end = head, start = old;
unsigned char *data = md->base + page_size;
unsigned long size;
void *buf;
int rc = 0;
if (rb_find_range(data, md->mask, head, old, &start, &end, backward))
return -1;
if (start == end)
return 0;
size = end - start;
if (size > (unsigned long)(md->mask) + 1) {
WARN_ONCE(1, "failed to keep up with mmap data. (warn only once)\n");
md->prev = head;
perf_mmap__consume(md, overwrite || backward);
return 0;
}
if ((start & md->mask) + size != (end & md->mask)) {
buf = &data[start & md->mask];
size = md->mask + 1 - (start & md->mask);
start += size;
if (push(to, buf, size) < 0) {
rc = -1;
goto out;
}
}
buf = &data[start & md->mask];
size = end - start;
start += size;
if (push(to, buf, size) < 0) {
rc = -1;
goto out;
}
md->prev = head;
perf_mmap__consume(md, overwrite || backward);
out:
return rc;
}

97
tools/perf/util/mmap.h Normal file
View File

@ -0,0 +1,97 @@
#ifndef __PERF_MMAP_H
#define __PERF_MMAP_H 1
#include <linux/compiler.h>
#include <linux/refcount.h>
#include <linux/types.h>
#include <asm/barrier.h>
#include <stdbool.h>
#include "auxtrace.h"
#include "event.h"
/**
* struct perf_mmap - perf's ring buffer mmap details
*
* @refcnt - e.g. code using PERF_EVENT_IOC_SET_OUTPUT to share this
*/
struct perf_mmap {
void *base;
int mask;
int fd;
refcount_t refcnt;
u64 prev;
struct auxtrace_mmap auxtrace_mmap;
char event_copy[PERF_SAMPLE_MAX_SIZE] __aligned(8);
};
/*
* State machine of bkw_mmap_state:
*
* .________________(forbid)_____________.
* | V
* NOTREADY --(0)--> RUNNING --(1)--> DATA_PENDING --(2)--> EMPTY
* ^ ^ | ^ |
* | |__(forbid)____/ |___(forbid)___/|
* | |
* \_________________(3)_______________/
*
* NOTREADY : Backward ring buffers are not ready
* RUNNING : Backward ring buffers are recording
* DATA_PENDING : We are required to collect data from backward ring buffers
* EMPTY : We have collected data from backward ring buffers.
*
* (0): Setup backward ring buffer
* (1): Pause ring buffers for reading
* (2): Read from ring buffers
* (3): Resume ring buffers for recording
*/
enum bkw_mmap_state {
BKW_MMAP_NOTREADY,
BKW_MMAP_RUNNING,
BKW_MMAP_DATA_PENDING,
BKW_MMAP_EMPTY,
};
struct mmap_params {
int prot, mask;
struct auxtrace_mmap_params auxtrace_mp;
};
int perf_mmap__mmap(struct perf_mmap *map, struct mmap_params *mp, int fd);
void perf_mmap__munmap(struct perf_mmap *map);
void perf_mmap__get(struct perf_mmap *map);
void perf_mmap__put(struct perf_mmap *map);
void perf_mmap__consume(struct perf_mmap *map, bool overwrite);
void perf_mmap__read_catchup(struct perf_mmap *md);
static inline u64 perf_mmap__read_head(struct perf_mmap *mm)
{
struct perf_event_mmap_page *pc = mm->base;
u64 head = ACCESS_ONCE(pc->data_head);
rmb();
return head;
}
static inline void perf_mmap__write_tail(struct perf_mmap *md, u64 tail)
{
struct perf_event_mmap_page *pc = md->base;
/*
* ensure all reads are done before we write the tail out.
*/
mb();
pc->data_tail = tail;
}
union perf_event *perf_mmap__read_forward(struct perf_mmap *map, bool check_messup);
union perf_event *perf_mmap__read_backward(struct perf_mmap *map);
int perf_mmap__push(struct perf_mmap *md, bool overwrite, bool backward,
void *to, int push(void *to, void *buf, size_t size));
size_t perf_mmap__mmap_len(struct perf_mmap *map);
#endif /*__PERF_MMAP_H */

View File

@ -9,9 +9,10 @@
#ifndef __PERF_NAMESPACES_H
#define __PERF_NAMESPACES_H
#include "../perf.h"
#include <linux/list.h>
#include <sys/types.h>
#include <linux/perf_event.h>
#include <linux/refcount.h>
#include <linux/types.h>
struct namespaces_event;

View File

@ -2,40 +2,42 @@
#include <linux/log2.h>
#include "sane_ctype.h"
void print_binary(unsigned char *data, size_t len,
size_t bytes_per_line, print_binary_t printer,
void *extra)
int binary__fprintf(unsigned char *data, size_t len,
size_t bytes_per_line, binary__fprintf_t printer,
void *extra, FILE *fp)
{
size_t i, j, mask;
int printed = 0;
if (!printer)
return;
return 0;
bytes_per_line = roundup_pow_of_two(bytes_per_line);
mask = bytes_per_line - 1;
printer(BINARY_PRINT_DATA_BEGIN, 0, extra);
printed += printer(BINARY_PRINT_DATA_BEGIN, 0, extra, fp);
for (i = 0; i < len; i++) {
if ((i & mask) == 0) {
printer(BINARY_PRINT_LINE_BEGIN, -1, extra);
printer(BINARY_PRINT_ADDR, i, extra);
printed += printer(BINARY_PRINT_LINE_BEGIN, -1, extra, fp);
printed += printer(BINARY_PRINT_ADDR, i, extra, fp);
}
printer(BINARY_PRINT_NUM_DATA, data[i], extra);
printed += printer(BINARY_PRINT_NUM_DATA, data[i], extra, fp);
if (((i & mask) == mask) || i == len - 1) {
for (j = 0; j < mask-(i & mask); j++)
printer(BINARY_PRINT_NUM_PAD, -1, extra);
printed += printer(BINARY_PRINT_NUM_PAD, -1, extra, fp);
printer(BINARY_PRINT_SEP, i, extra);
printer(BINARY_PRINT_SEP, i, extra, fp);
for (j = i & ~mask; j <= i; j++)
printer(BINARY_PRINT_CHAR_DATA, data[j], extra);
printed += printer(BINARY_PRINT_CHAR_DATA, data[j], extra, fp);
for (j = 0; j < mask-(i & mask); j++)
printer(BINARY_PRINT_CHAR_PAD, i, extra);
printer(BINARY_PRINT_LINE_END, -1, extra);
printed += printer(BINARY_PRINT_CHAR_PAD, i, extra, fp);
printed += printer(BINARY_PRINT_LINE_END, -1, extra, fp);
}
}
printer(BINARY_PRINT_DATA_END, -1, extra);
printed += printer(BINARY_PRINT_DATA_END, -1, extra, fp);
return printed;
}
int is_printable_array(char *p, unsigned int len)

View File

@ -2,6 +2,7 @@
#define PERF_PRINT_BINARY_H
#include <stddef.h>
#include <stdio.h>
enum binary_printer_ops {
BINARY_PRINT_DATA_BEGIN,
@ -16,12 +17,19 @@ enum binary_printer_ops {
BINARY_PRINT_DATA_END,
};
typedef void (*print_binary_t)(enum binary_printer_ops op,
unsigned int val, void *extra);
typedef int (*binary__fprintf_t)(enum binary_printer_ops op,
unsigned int val, void *extra, FILE *fp);
void print_binary(unsigned char *data, size_t len,
size_t bytes_per_line, print_binary_t printer,
void *extra);
int binary__fprintf(unsigned char *data, size_t len,
size_t bytes_per_line, binary__fprintf_t printer,
void *extra, FILE *fp);
static inline void print_binary(unsigned char *data, size_t len,
size_t bytes_per_line, binary__fprintf_t printer,
void *extra)
{
binary__fprintf(data, len, bytes_per_line, printer, extra, stdout);
}
int is_printable_array(char *p, unsigned int len);

View File

@ -10,6 +10,7 @@ util/ctype.c
util/evlist.c
util/evsel.c
util/cpumap.c
util/mmap.c
util/namespaces.c
../lib/bitmap.c
../lib/find_bit.c