#endif
 }
 
+static void free_masks(struct irq_desc *desc)
+{
+#ifdef CONFIG_GENERIC_PENDING_IRQ
+       free_cpumask_var(desc->pending_mask);
+#endif
+       free_cpumask_var(desc->irq_common_data.affinity);
+#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
+       free_cpumask_var(desc->irq_common_data.effective_affinity);
+#endif
+}
+
 #else
 static inline int
 alloc_masks(struct irq_desc *desc, int node) { return 0; }
 static inline void
 desc_smp_init(struct irq_desc *desc, int node, const struct cpumask *affinity) { }
+static inline void free_masks(struct irq_desc *desc) { }
 #endif
 
 static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node,
        mas_erase(&mas);
 }
 
+#ifdef CONFIG_SPARSE_IRQ
+static const struct kobj_type irq_kobj_type;
+#endif
+
+static int init_desc(struct irq_desc *desc, int irq, int node,
+                    unsigned int flags,
+                    const struct cpumask *affinity,
+                    struct module *owner)
+{
+       desc->kstat_irqs = alloc_percpu(unsigned int);
+       if (!desc->kstat_irqs)
+               return -ENOMEM;
+
+       if (alloc_masks(desc, node)) {
+               free_percpu(desc->kstat_irqs);
+               return -ENOMEM;
+       }
+
+       raw_spin_lock_init(&desc->lock);
+       lockdep_set_class(&desc->lock, &irq_desc_lock_class);
+       mutex_init(&desc->request_mutex);
+       init_waitqueue_head(&desc->wait_for_threads);
+       desc_set_defaults(irq, desc, node, affinity, owner);
+       irqd_set(&desc->irq_data, flags);
+       irq_resend_init(desc);
+#ifdef CONFIG_SPARSE_IRQ
+       kobject_init(&desc->kobj, &irq_kobj_type);
+       init_rcu_head(&desc->rcu);
+#endif
+
+       return 0;
+}
+
 #ifdef CONFIG_SPARSE_IRQ
 
 static void irq_kobj_release(struct kobject *kobj);
 EXPORT_SYMBOL_GPL(irq_to_desc);
 #endif
 
-#ifdef CONFIG_SMP
-static void free_masks(struct irq_desc *desc)
-{
-#ifdef CONFIG_GENERIC_PENDING_IRQ
-       free_cpumask_var(desc->pending_mask);
-#endif
-       free_cpumask_var(desc->irq_common_data.affinity);
-#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
-       free_cpumask_var(desc->irq_common_data.effective_affinity);
-#endif
-}
-#else
-static inline void free_masks(struct irq_desc *desc) { }
-#endif
-
 void irq_lock_sparse(void)
 {
        mutex_lock(&sparse_irq_lock);
                                   struct module *owner)
 {
        struct irq_desc *desc;
+       int ret;
 
        desc = kzalloc_node(sizeof(*desc), GFP_KERNEL, node);
        if (!desc)
                return NULL;
-       /* allocate based on nr_cpu_ids */
-       desc->kstat_irqs = alloc_percpu(unsigned int);
-       if (!desc->kstat_irqs)
-               goto err_desc;
-
-       if (alloc_masks(desc, node))
-               goto err_kstat;
 
-       raw_spin_lock_init(&desc->lock);
-       lockdep_set_class(&desc->lock, &irq_desc_lock_class);
-       mutex_init(&desc->request_mutex);
-       init_rcu_head(&desc->rcu);
-       init_waitqueue_head(&desc->wait_for_threads);
-
-       desc_set_defaults(irq, desc, node, affinity, owner);
-       irqd_set(&desc->irq_data, flags);
-       kobject_init(&desc->kobj, &irq_kobj_type);
-       irq_resend_init(desc);
+       ret = init_desc(desc, irq, node, flags, affinity, owner);
+       if (unlikely(ret)) {
+               kfree(desc);
+               return NULL;
+       }
 
        return desc;
-
-err_kstat:
-       free_percpu(desc->kstat_irqs);
-err_desc:
-       kfree(desc);
-       return NULL;
 }
 
 static void irq_kobj_release(struct kobject *kobj)
 int __init early_irq_init(void)
 {
        int count, i, node = first_online_node;
-       struct irq_desc *desc;
+       int ret;
 
        init_irq_default_affinity();
 
        printk(KERN_INFO "NR_IRQS: %d\n", NR_IRQS);
 
-       desc = irq_desc;
        count = ARRAY_SIZE(irq_desc);
 
        for (i = 0; i < count; i++) {
-               desc[i].kstat_irqs = alloc_percpu(unsigned int);
-               alloc_masks(&desc[i], node);
-               raw_spin_lock_init(&desc[i].lock);
-               lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
-               mutex_init(&desc[i].request_mutex);
-               init_waitqueue_head(&desc[i].wait_for_threads);
-               desc_set_defaults(i, &desc[i], node, NULL, NULL);
-               irq_resend_init(&desc[i]);
+               ret = init_desc(irq_desc + i, i, node, 0, NULL, NULL);
+               if (unlikely(ret))
+                       goto __free_desc_res;
        }
+
        return arch_early_irq_init();
+
+__free_desc_res:
+       while (--i >= 0) {
+               free_masks(irq_desc + i);
+               free_percpu(irq_desc[i].kstat_irqs);
+       }
+
+       return ret;
 }
 
 struct irq_desc *irq_to_desc(unsigned int irq)