struct filter_entry *pos, *tmp;
list_for_each_entry_safe(pos, tmp, head, list) {
- list_del(&pos->list);
+ list_del_init(&pos->list);
free(pos);
}
}
/* broken lock sequence, discard it */
ls->discard = 1;
bad_hist[BROKEN_ACQUIRE]++;
- list_del(&seq->list);
+ list_del_init(&seq->list);
free(seq);
goto end;
default:
/* broken lock sequence, discard it */
ls->discard = 1;
bad_hist[BROKEN_ACQUIRED]++;
- list_del(&seq->list);
+ list_del_init(&seq->list);
free(seq);
goto end;
default:
/* broken lock sequence, discard it */
ls->discard = 1;
bad_hist[BROKEN_CONTENDED]++;
- list_del(&seq->list);
+ list_del_init(&seq->list);
free(seq);
goto end;
default:
ls->nr_release++;
free_seq:
- list_del(&seq->list);
+ list_del_init(&seq->list);
free(seq);
end:
return 0;
list_for_each_entry_safe(es, next, &arch_std_events, list) {
FOR_ALL_EVENT_STRUCT_FIELDS(FREE_EVENT_FIELD);
- list_del(&es->list);
+ list_del_init(&es->list);
free(es);
}
}
while (!list_empty(events)) {
node = list_entry(events->next, struct event_node, list);
- list_del(&node->list);
+ list_del_init(&node->list);
free(node);
}
}
gtk_container_add(GTK_CONTAINER(window), view);
list_for_each_entry_safe(pos, n, ¬es->src->source, al.node) {
- list_del(&pos->al.node);
+ list_del_init(&pos->al.node);
disasm_line__free(pos);
}
return;
}
- list_del(&dl->al.node);
+ list_del_init(&dl->al.node);
disasm_line__free(dl);
}
}
struct annotation_line *al, *n;
list_for_each_entry_safe(al, n, &as->source, node) {
- list_del(&al->node);
+ list_del_init(&al->node);
disasm_line__free(disasm_line(al));
}
}
buffer = list_entry(queues->queue_array[i].head.next,
struct auxtrace_buffer, list);
- list_del(&buffer->list);
+ list_del_init(&buffer->list);
auxtrace_buffer__free(buffer);
}
}
struct auxtrace_index *auxtrace_index, *n;
list_for_each_entry_safe(auxtrace_index, n, head, list) {
- list_del(&auxtrace_index->list);
+ list_del_init(&auxtrace_index->list);
free(auxtrace_index);
}
}
bpf_map_op__delete(struct bpf_map_op *op)
{
if (!list_empty(&op->list))
- list_del(&op->list);
+ list_del_init(&op->list);
if (op->key_type == BPF_MAP_KEY_RANGES)
parse_events__clear_array(&op->k.array);
free(op);
struct call_path_block *pos, *n;
list_for_each_entry_safe(pos, n, &cpr->blocks, node) {
- list_del(&pos->node);
+ list_del_init(&pos->node);
free(pos);
}
free(cpr);
struct callchain_list *call, *tmp;
list_for_each_entry_safe(call, tmp, &new->val, list) {
- list_del(&call->list);
+ list_del_init(&call->list);
map__zput(call->ms.map);
free(call);
}
callchain_cursor_append(cursor, list->ip,
list->ms.map, list->ms.sym,
false, NULL, 0, 0, 0, list->srcline);
- list_del(&list->list);
+ list_del_init(&list->list);
map__zput(list->ms.map);
free(list);
}
struct rb_node *n;
list_for_each_entry_safe(list, tmp, &node->parent_val, list) {
- list_del(&list->list);
+ list_del_init(&list->list);
map__zput(list->ms.map);
free(list);
}
list_for_each_entry_safe(list, tmp, &node->val, list) {
- list_del(&list->list);
+ list_del_init(&list->list);
map__zput(list->ms.map);
free(list);
}
out:
list_for_each_entry_safe(chain, new, &head, list) {
- list_del(&chain->list);
+ list_del_init(&chain->list);
map__zput(chain->ms.map);
free(chain);
}
de = list_entry(dbe->deferred.next, struct deferred_export,
node);
err = dbe->export_comm(dbe, de->comm);
- list_del(&de->node);
+ list_del_init(&de->node);
free(de);
if (err)
return err;
while (!list_empty(&dbe->deferred)) {
de = list_entry(dbe->deferred.next, struct deferred_export,
node);
- list_del(&de->node);
+ list_del_init(&de->node);
free(de);
}
}
static void dso__list_del(struct dso *dso)
{
- list_del(&dso->data.open_entry);
+ list_del_init(&dso->data.open_entry);
WARN_ONCE(dso__data_open_cnt <= 0,
"DSO data fd counter out of bounds.");
dso__data_open_cnt--;
struct perf_evsel_config_term *term, *h;
list_for_each_entry_safe(term, h, &evsel->config_terms, list) {
- list_del(&term->list);
+ list_del_init(&term->list);
free(term);
}
}
list_for_each_entry_safe(node, tmp, &hists->hpp_formats, list) {
perf_hpp_list__for_each_format_safe(&node->hpp, fmt, pos) {
- list_del(&fmt->list);
+ list_del_init(&fmt->list);
free(fmt);
}
- list_del(&node->list);
+ list_del_init(&node->list);
free(node);
}
}
if (!list_empty(cache)) {
new = list_entry(cache->next, struct ordered_event, list);
- list_del(&new->list);
+ list_del_init(&new->list);
} else if (oe->buffer) {
new = &oe->buffer->event[oe->buffer_idx];
if (++oe->buffer_idx == MAX_SAMPLE_BUFFER)
* yet, we need to free only allocated ones ...
*/
if (oe->buffer) {
- list_del(&oe->buffer->list);
+ list_del_init(&oe->buffer->list);
ordered_events_buffer__free(oe->buffer, oe->buffer_idx, oe);
}
/* ... and continue with the rest */
list_for_each_entry_safe(buffer, tmp, &oe->to_free, list) {
- list_del(&buffer->list);
+ list_del_init(&buffer->list);
ordered_events_buffer__free(buffer, MAX_SAMPLE_BUFFER, oe);
}
}
pr_debug("Failed to add BPF event %s:%s\n",
group, event);
list_for_each_entry_safe(evsel, tmp, &new_evsels, node) {
- list_del(&evsel->node);
+ list_del_init(&evsel->node);
perf_evsel__delete(evsel);
}
return err;
info->metric_expr = alias->metric_expr;
info->metric_name = alias->metric_name;
- list_del(&term->list);
+ list_del_init(&term->list);
free(term);
}
while (!list_empty(blacklist)) {
node = list_first_entry(blacklist,
struct kprobe_blacklist_node, list);
- list_del(&node->list);
+ list_del_init(&node->list);
zfree(&node->symbol);
free(node);
}
*/
if (err) {
sfq->buffer = NULL;
- list_del(&buffer->list);
+ list_del_init(&buffer->list);
auxtrace_buffer__free(buffer);
if (err > 0) /* Buffer done, no error */
err = 0;
static void free_srcfile(struct srcfile *sf)
{
- list_del(&sf->nd);
+ list_del_init(&sf->nd);
hlist_del(&sf->hash_nd);
map_total_sz -= sf->maplen;
munmap(sf->map, sf->maplen);
struct phdr_data *p, *tmp;
list_for_each_entry_safe(p, tmp, &kci->phdrs, node) {
- list_del(&p->node);
+ list_del_init(&p->node);
free(p);
}
}
struct sym_data *s, *tmp;
list_for_each_entry_safe(s, tmp, &kci->syms, node) {
- list_del(&s->node);
+ list_del_init(&s->node);
free(s);
}
}
int nr_free = 0;
list_for_each_entry_safe(pos, tmp, sdt_notes, note_list) {
- list_del(&pos->note_list);
+ list_del_init(&pos->note_list);
zfree(&pos->name);
zfree(&pos->provider);
free(pos);
down_write(&thread->namespaces_lock);
list_for_each_entry_safe(namespaces, tmp_namespaces,
&thread->namespaces_list, list) {
- list_del(&namespaces->list);
+ list_del_init(&namespaces->list);
namespaces__free(namespaces);
}
up_write(&thread->namespaces_lock);
down_write(&thread->comm_lock);
list_for_each_entry_safe(comm, tmp_comm, &thread->comm_list, list) {
- list_del(&comm->list);
+ list_del_init(&comm->list);
comm__free(comm);
}
up_write(&thread->comm_lock);