static void ring_buffer_put(struct ring_buffer *rb);
 static void ring_buffer_detach(struct perf_event *event, struct ring_buffer *rb);
 
+static void __free_event(struct perf_event *event)
+{
+       if (!event->parent) {
+               if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
+                       put_callchain_buffers();
+       }
+
+       if (event->destroy)
+               event->destroy(event);
+
+       if (event->ctx)
+               put_ctx(event->ctx);
+
+       call_rcu(&event->rcu_head, free_event_rcu);
+}
 static void free_event(struct perf_event *event)
 {
        irq_work_sync(&event->pending);
                        atomic_dec(&nr_comm_events);
                if (event->attr.task)
                        atomic_dec(&nr_task_events);
-               if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
-                       put_callchain_buffers();
                if (is_cgroup_event(event)) {
                        atomic_dec(&per_cpu(perf_cgroup_events, event->cpu));
                        static_key_slow_dec_deferred(&perf_sched_events);
        if (is_cgroup_event(event))
                perf_detach_cgroup(event);
 
-       if (event->destroy)
-               event->destroy(event);
-
-       if (event->ctx)
-               put_ctx(event->ctx);
 
-       call_rcu(&event->rcu_head, free_event_rcu);
+       __free_event(event);
 }
 
 int perf_event_release_kernel(struct perf_event *event)
        return pmu;
 }
 
+static void account_event(struct perf_event *event)
+{
+       if (event->attach_state & PERF_ATTACH_TASK)
+               static_key_slow_inc(&perf_sched_events.key);
+       if (event->attr.mmap || event->attr.mmap_data)
+               atomic_inc(&nr_mmap_events);
+       if (event->attr.comm)
+               atomic_inc(&nr_comm_events);
+       if (event->attr.task)
+               atomic_inc(&nr_task_events);
+       if (has_branch_stack(event)) {
+               static_key_slow_inc(&perf_sched_events.key);
+               if (!(event->attach_state & PERF_ATTACH_TASK))
+                       atomic_inc(&per_cpu(perf_branch_stack_events,
+                                           event->cpu));
+       }
+
+       if (is_cgroup_event(event)) {
+               atomic_inc(&per_cpu(perf_cgroup_events, event->cpu));
+               static_key_slow_inc(&perf_sched_events.key);
+       }
+}
+
 /*
  * Allocate and initialize a event structure
  */
                        if (err)
                                goto err_pmu;
                }
-
-               if (event->attach_state & PERF_ATTACH_TASK)
-                       static_key_slow_inc(&perf_sched_events.key);
-               if (event->attr.mmap || event->attr.mmap_data)
-                       atomic_inc(&nr_mmap_events);
-               if (event->attr.comm)
-                       atomic_inc(&nr_comm_events);
-               if (event->attr.task)
-                       atomic_inc(&nr_task_events);
-               if (has_branch_stack(event)) {
-                       static_key_slow_inc(&perf_sched_events.key);
-                       if (!(event->attach_state & PERF_ATTACH_TASK))
-                               atomic_inc(&per_cpu(perf_branch_stack_events,
-                                                   event->cpu));
-               }
        }
 
        return event;
 
        if (flags & PERF_FLAG_PID_CGROUP) {
                err = perf_cgroup_connect(pid, event, &attr, group_leader);
-               if (err)
-                       goto err_alloc;
-               /*
-                * one more event:
-                * - that has cgroup constraint on event->cpu
-                * - that may need work on context switch
-                */
-               atomic_inc(&per_cpu(perf_cgroup_events, event->cpu));
-               static_key_slow_inc(&perf_sched_events.key);
+               if (err) {
+                       __free_event(event);
+                       goto err_task;
+               }
        }
 
+       account_event(event);
+
        /*
         * Special case software events and allow them to be part of
         * any hardware group.
                goto err;
        }
 
+       account_event(event);
+
        ctx = find_get_context(event->pmu, task, cpu);
        if (IS_ERR(ctx)) {
                err = PTR_ERR(ctx);