stat_config.metric_no_threshold,
stat_config.user_requested_cpu_list,
stat_config.system_wide,
+ stat_config.hardware_aware_grouping,
&stat_config.metric_events);
}
stat_config.metric_no_threshold,
stat_config.user_requested_cpu_list,
stat_config.system_wide,
+ stat_config.hardware_aware_grouping,
&stat_config.metric_events);
}
/*metric_no_threshold=*/true,
stat_config.user_requested_cpu_list,
stat_config.system_wide,
+ stat_config.hardware_aware_grouping,
&stat_config.metric_events) < 0)
return -1;
}
/*metric_no_threshold=*/true,
stat_config.user_requested_cpu_list,
stat_config.system_wide,
+ stat_config.hardware_aware_grouping,
&stat_config.metric_events) < 0)
return -1;
stat_config.metric_no_threshold,
stat_config.user_requested_cpu_list,
stat_config.system_wide,
+ stat_config.hardware_aware_grouping,
&stat_config.metric_events);
zfree(&metrics);
bool metric_no_threshold,
const char *user_requested_cpu_list,
bool system_wide,
+ bool hardware_aware_grouping,
struct rblist *metric_events)
{
const struct pmu_metrics_table *table = pmu_metrics_table__find();
if (!table)
return -EINVAL;
+ if (hardware_aware_grouping)
+ pr_debug("Use hardware aware grouping instead of traditional metric grouping method\n");
return parse_groups(perf_evlist, pmu, str, metric_no_group, metric_no_merge,
metric_no_threshold, user_requested_cpu_list, system_wide,
bool metric_no_threshold,
const char *user_requested_cpu_list,
bool system_wide,
+ bool hardware_aware_grouping,
struct rblist *metric_events);
int metricgroup__parse_groups_test(struct evlist *evlist,
const struct pmu_metrics_table *table,