evsel__set_sample_bit(evsel, WEIGHT_STRUCT);
}
-void arch_evsel__fixup_new_cycles(struct perf_event_attr *attr)
-{
- struct perf_env env = { .total_mem = 0, } ;
-
- if (!perf_env__cpuid(&env))
- return;
-
- /*
- * On AMD, precise cycles event sampling internally uses IBS pmu.
- * But IBS does not have filtering capabilities and perf by default
- * sets exclude_guest = 1. This makes IBS pmu event init fail and
- * thus perf ends up doing non-precise sampling. Avoid it by clearing
- * exclude_guest.
- */
- if (env.cpuid && strstarts(env.cpuid, "AuthenticAMD"))
- attr->exclude_guest = 0;
-
- free(env.cpuid);
-}
-
/* Check whether the evsel's PMU supports the perf metrics */
bool evsel__sys_has_perf_metrics(const struct evsel *evsel)
{
record.opts.tail_synthesize = true;
if (rec->evlist->core.nr_entries == 0) {
- if (perf_pmu__has_hybrid()) {
- err = evlist__add_default_hybrid(rec->evlist,
- !record.opts.no_samples);
- } else {
- err = __evlist__add_default(rec->evlist,
- !record.opts.no_samples);
- }
+ bool can_profile_kernel = perf_event_paranoid_check(1);
- if (err < 0) {
- pr_err("Not enough memory for event selector list\n");
+ err = parse_event(rec->evlist, can_profile_kernel ? "cycles:P" : "cycles:Pu");
+ if (err)
goto out;
- }
}
if (rec->opts.target.tid && !rec->opts.no_inherit_set)
if (annotate_check_args(&top.annotation_opts) < 0)
goto out_delete_evlist;
- if (!top.evlist->core.nr_entries &&
- evlist__add_default(top.evlist) < 0) {
- pr_err("Not enough memory for event selector list\n");
- goto out_delete_evlist;
+ if (!top.evlist->core.nr_entries) {
+ bool can_profile_kernel = perf_event_paranoid_check(1);
+ int err = parse_event(top.evlist, can_profile_kernel ? "cycles:P" : "cycles:Pu");
+
+ if (err)
+ goto out_delete_evlist;
}
status = evswitch__init(&top.evswitch, top.evlist, stderr);
#include <perf/evsel.h>
#include <perf/cpumap.h>
-int evlist__add_default_hybrid(struct evlist *evlist, bool precise)
-{
- struct evsel *evsel;
- struct perf_pmu *pmu;
- __u64 config;
- struct perf_cpu_map *cpus;
-
- perf_pmu__for_each_hybrid_pmu(pmu) {
- config = PERF_COUNT_HW_CPU_CYCLES |
- ((__u64)pmu->type << PERF_PMU_TYPE_SHIFT);
- evsel = evsel__new_cycles(precise, PERF_TYPE_HARDWARE,
- config);
- if (!evsel)
- return -ENOMEM;
-
- cpus = perf_cpu_map__get(pmu->cpus);
- evsel->core.cpus = cpus;
- evsel->core.own_cpus = perf_cpu_map__get(cpus);
- evsel->pmu_name = strdup(pmu->name);
- evlist__add(evlist, evsel);
- }
-
- return 0;
-}
-
bool evlist__has_hybrid(struct evlist *evlist)
{
struct evsel *evsel;
#include "evlist.h"
#include <unistd.h>
-int evlist__add_default_hybrid(struct evlist *evlist, bool precise);
bool evlist__has_hybrid(struct evlist *evlist);
#endif /* __PERF_EVLIST_HYBRID_H */
struct evlist *evlist__new_default(void)
{
struct evlist *evlist = evlist__new();
+ bool can_profile_kernel;
+ int err;
+
+ if (!evlist)
+ return NULL;
- if (evlist && evlist__add_default(evlist)) {
+ can_profile_kernel = perf_event_paranoid_check(1);
+ err = parse_event(evlist, can_profile_kernel ? "cycles:P" : "cycles:Pu");
+ if (err) {
evlist__delete(evlist);
evlist = NULL;
}
perf_evlist__set_leader(&evlist->core);
}
-int __evlist__add_default(struct evlist *evlist, bool precise)
-{
- struct evsel *evsel;
-
- evsel = evsel__new_cycles(precise, PERF_TYPE_HARDWARE,
- PERF_COUNT_HW_CPU_CYCLES);
- if (evsel == NULL)
- return -ENOMEM;
-
- evlist__add(evlist, evsel);
- return 0;
-}
-
static struct evsel *evlist__dummy_event(struct evlist *evlist)
{
struct perf_event_attr attr = {
void evlist__add(struct evlist *evlist, struct evsel *entry);
void evlist__remove(struct evlist *evlist, struct evsel *evsel);
-int __evlist__add_default(struct evlist *evlist, bool precise);
-
-static inline int evlist__add_default(struct evlist *evlist)
-{
- return __evlist__add_default(evlist, true);
-}
-
int evlist__add_attrs(struct evlist *evlist, struct perf_event_attr *attrs, size_t nr_attrs);
int __evlist__add_default_attrs(struct evlist *evlist,
return evsel;
}
-static bool perf_event_can_profile_kernel(void)
-{
- return perf_event_paranoid_check(1);
-}
-
-struct evsel *evsel__new_cycles(bool precise __maybe_unused, __u32 type, __u64 config)
-{
- struct perf_event_attr attr = {
- .type = type,
- .config = config,
- .exclude_kernel = !perf_event_can_profile_kernel(),
- };
- struct evsel *evsel;
-
- event_attr_init(&attr);
-
- /*
- * Now let the usual logic to set up the perf_event_attr defaults
- * to kick in when we return and before perf_evsel__open() is called.
- */
- evsel = evsel__new(&attr);
- if (evsel == NULL)
- goto out;
-
- arch_evsel__fixup_new_cycles(&evsel->core.attr);
-
- evsel->precise_max = true;
-
- /* use asprintf() because free(evsel) assumes name is allocated */
- if (asprintf(&evsel->name, "cycles%s%s%.*s",
- (attr.precise_ip || attr.exclude_kernel) ? ":" : "",
- attr.exclude_kernel ? "u" : "",
- attr.precise_ip ? attr.precise_ip + 1 : 0, "ppp") < 0)
- goto error_free;
-out:
- return evsel;
-error_free:
- evsel__delete(evsel);
- evsel = NULL;
- goto out;
-}
-
int copy_config_terms(struct list_head *dst, struct list_head *src)
{
struct evsel_config_term *pos, *tmp;
evsel__set_sample_bit(evsel, WEIGHT);
}
-void __weak arch_evsel__fixup_new_cycles(struct perf_event_attr *attr __maybe_unused)
-{
-}
-
void __weak arch__post_evsel_config(struct evsel *evsel __maybe_unused,
struct perf_event_attr *attr __maybe_unused)
{
}
#endif
-struct evsel *evsel__new_cycles(bool precise, __u32 type, __u64 config);
-
#ifdef HAVE_LIBTRACEEVENT
struct tep_event *event_format__new(const char *sys, const char *name);
#endif
void evsel__set_sample_id(struct evsel *evsel, bool use_sample_identifier);
void arch_evsel__set_sample_weight(struct evsel *evsel);
-void arch_evsel__fixup_new_cycles(struct perf_event_attr *attr);
void arch__post_evsel_config(struct evsel *evsel, struct perf_event_attr *attr);
int evsel__set_filter(struct evsel *evsel, const char *filter);
#define Py_TYPE(ob) (((PyObject*)(ob))->ob_type)
#endif
+/*
+ * Avoid bringing in event parsing.
+ */
+int parse_event(struct evlist *evlist __maybe_unused, const char *str __maybe_unused)
+{
+ return 0;
+}
+
/*
* Provide these two so that we don't have to link against callchain.c and
* start dragging hist.c, etc.