struct bpf_prog *xdp_prog);
 
 struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key);
-void __cpu_map_flush(struct bpf_map *map);
+void __cpu_map_flush(void);
 int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_buff *xdp,
                    struct net_device *dev_rx);
 
        return NULL;
 }
 
-static inline void __cpu_map_flush(struct bpf_map *map)
+static inline void __cpu_map_flush(void)
 {
 }
 
 
        struct bpf_map map;
        /* Below members specific for map type */
        struct bpf_cpu_map_entry **cpu_map;
-       struct list_head __percpu *flush_list;
 };
 
+static DEFINE_PER_CPU(struct list_head, cpu_map_flush_list);
+
 static int bq_flush_to_queue(struct xdp_bulk_queue *bq);
 
 static struct bpf_map *cpu_map_alloc(union bpf_attr *attr)
 {
        struct bpf_cpu_map *cmap;
        int err = -ENOMEM;
-       int ret, cpu;
        u64 cost;
+       int ret;
 
        if (!capable(CAP_SYS_ADMIN))
                return ERR_PTR(-EPERM);
 
        /* make sure page count doesn't overflow */
        cost = (u64) cmap->map.max_entries * sizeof(struct bpf_cpu_map_entry *);
-       cost += sizeof(struct list_head) * num_possible_cpus();
 
        /* Notice returns -EPERM on if map size is larger than memlock limit */
        ret = bpf_map_charge_init(&cmap->map.memory, cost);
                goto free_cmap;
        }
 
-       cmap->flush_list = alloc_percpu(struct list_head);
-       if (!cmap->flush_list)
-               goto free_charge;
-
-       for_each_possible_cpu(cpu)
-               INIT_LIST_HEAD(per_cpu_ptr(cmap->flush_list, cpu));
-
        /* Alloc array for possible remote "destination" CPUs */
        cmap->cpu_map = bpf_map_area_alloc(cmap->map.max_entries *
                                           sizeof(struct bpf_cpu_map_entry *),
                                           cmap->map.numa_node);
        if (!cmap->cpu_map)
-               goto free_percpu;
+               goto free_charge;
 
        return &cmap->map;
-free_percpu:
-       free_percpu(cmap->flush_list);
 free_charge:
        bpf_map_charge_finish(&cmap->map.memory);
 free_cmap:
                /* bq flush and cleanup happens after RCU grace-period */
                __cpu_map_entry_replace(cmap, i, NULL); /* call_rcu */
        }
-       free_percpu(cmap->flush_list);
        bpf_map_area_free(cmap->cpu_map);
        kfree(cmap);
 }
  */
 static int bq_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf)
 {
-       struct list_head *flush_list = this_cpu_ptr(rcpu->cmap->flush_list);
+       struct list_head *flush_list = this_cpu_ptr(&cpu_map_flush_list);
        struct xdp_bulk_queue *bq = this_cpu_ptr(rcpu->bulkq);
 
        if (unlikely(bq->count == CPU_MAP_BULK_SIZE))
        return 0;
 }
 
-void __cpu_map_flush(struct bpf_map *map)
+void __cpu_map_flush(void)
 {
-       struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map);
-       struct list_head *flush_list = this_cpu_ptr(cmap->flush_list);
+       struct list_head *flush_list = this_cpu_ptr(&cpu_map_flush_list);
        struct xdp_bulk_queue *bq, *tmp;
 
        list_for_each_entry_safe(bq, tmp, flush_list, flush_node) {
                wake_up_process(bq->obj->kthread);
        }
 }
+
+static int __init cpu_map_init(void)
+{
+       int cpu;
+
+       for_each_possible_cpu(cpu)
+               INIT_LIST_HEAD(&per_cpu(cpu_map_flush_list, cpu));
+       return 0;
+}
+
+subsys_initcall(cpu_map_init);