continue;
                count++;
                cursor = curr->next;
-               debug_spin_lock_restore(&debug_mutex_lock, flags);
+               debug_spin_unlock_restore(&debug_mutex_lock, flags);
 
                printk("\n#%03d:            ", count);
                printk_lock(lock, filter ? 0 : 1);
                goto next;
        }
-       debug_spin_lock_restore(&debug_mutex_lock, flags);
+       debug_spin_unlock_restore(&debug_mutex_lock, flags);
        printk("\n");
 }
 
                        continue;
                list_del_init(curr);
                DEBUG_OFF();
-               debug_spin_lock_restore(&debug_mutex_lock, flags);
+               debug_spin_unlock_restore(&debug_mutex_lock, flags);
 
                printk("BUG: %s/%d, lock held at task exit time!\n",
                        task->comm, task->pid);
                        printk("exiting task is not even the owner??\n");
                return;
        }
-       debug_spin_lock_restore(&debug_mutex_lock, flags);
+       debug_spin_unlock_restore(&debug_mutex_lock, flags);
 }
 
 /*
                        continue;
                list_del_init(curr);
                DEBUG_OFF();
-               debug_spin_lock_restore(&debug_mutex_lock, flags);
+               debug_spin_unlock_restore(&debug_mutex_lock, flags);
 
                printk("BUG: %s/%d, active lock [%p(%p-%p)] freed!\n",
                        current->comm, current->pid, lock, from, to);
                        printk("freeing task is not even the owner??\n");
                return;
        }
-       debug_spin_lock_restore(&debug_mutex_lock, flags);
+       debug_spin_unlock_restore(&debug_mutex_lock, flags);
 }
 
 /*
 
 extern void debug_mutex_unlock(struct mutex *lock);
 extern void debug_mutex_init(struct mutex *lock, const char *name);
 
-#define debug_spin_lock(lock)                          \
-       do {                                            \
-               local_irq_disable();                    \
-               if (debug_mutex_on)                     \
-                       spin_lock(lock);                \
-       } while (0)
-
-#define debug_spin_unlock(lock)                                \
-       do {                                            \
-               if (debug_mutex_on)                     \
-                       spin_unlock(lock);              \
-               local_irq_enable();                     \
-               preempt_check_resched();                \
-       } while (0)
-
 #define debug_spin_lock_save(lock, flags)              \
        do {                                            \
                local_irq_save(flags);                  \
                        spin_lock(lock);                \
        } while (0)
 
-#define debug_spin_lock_restore(lock, flags)           \
+#define debug_spin_unlock_restore(lock, flags)         \
        do {                                            \
                if (debug_mutex_on)                     \
                        spin_unlock(lock);              \
                preempt_check_resched();                \
        } while (0)
 
-#define spin_lock_mutex(lock)                          \
+#define spin_lock_mutex(lock, flags)                   \
        do {                                            \
                struct mutex *l = container_of(lock, struct mutex, wait_lock); \
                                                        \
                DEBUG_WARN_ON(in_interrupt());          \
-               debug_spin_lock(&debug_mutex_lock);     \
+               debug_spin_lock_save(&debug_mutex_lock, flags); \
                spin_lock(lock);                        \
                DEBUG_WARN_ON(l->magic != l);           \
        } while (0)
 
-#define spin_unlock_mutex(lock)                                \
+#define spin_unlock_mutex(lock, flags)                 \
        do {                                            \
                spin_unlock(lock);                      \
-               debug_spin_unlock(&debug_mutex_lock);   \
+               debug_spin_unlock_restore(&debug_mutex_lock, flags);    \
        } while (0)
 
 #define DEBUG_OFF()                                    \
 
        struct task_struct *task = current;
        struct mutex_waiter waiter;
        unsigned int old_val;
+       unsigned long flags;
 
        debug_mutex_init_waiter(&waiter);
 
-       spin_lock_mutex(&lock->wait_lock);
+       spin_lock_mutex(&lock->wait_lock, flags);
 
        debug_mutex_add_waiter(lock, &waiter, task->thread_info, ip);
 
                if (unlikely(state == TASK_INTERRUPTIBLE &&
                                                signal_pending(task))) {
                        mutex_remove_waiter(lock, &waiter, task->thread_info);
-                       spin_unlock_mutex(&lock->wait_lock);
+                       spin_unlock_mutex(&lock->wait_lock, flags);
 
                        debug_mutex_free_waiter(&waiter);
                        return -EINTR;
                __set_task_state(task, state);
 
                /* didnt get the lock, go to sleep: */
-               spin_unlock_mutex(&lock->wait_lock);
+               spin_unlock_mutex(&lock->wait_lock, flags);
                schedule();
-               spin_lock_mutex(&lock->wait_lock);
+               spin_lock_mutex(&lock->wait_lock, flags);
        }
 
        /* got the lock - rejoice! */
        if (likely(list_empty(&lock->wait_list)))
                atomic_set(&lock->count, 0);
 
-       spin_unlock_mutex(&lock->wait_lock);
+       spin_unlock_mutex(&lock->wait_lock, flags);
 
        debug_mutex_free_waiter(&waiter);
 
 __mutex_unlock_slowpath(atomic_t *lock_count __IP_DECL__)
 {
        struct mutex *lock = container_of(lock_count, struct mutex, count);
+       unsigned long flags;
 
        DEBUG_WARN_ON(lock->owner != current_thread_info());
 
-       spin_lock_mutex(&lock->wait_lock);
+       spin_lock_mutex(&lock->wait_lock, flags);
 
        /*
         * some architectures leave the lock unlocked in the fastpath failure
 
        debug_mutex_clear_owner(lock);
 
-       spin_unlock_mutex(&lock->wait_lock);
+       spin_unlock_mutex(&lock->wait_lock, flags);
 }
 
 /*
 static inline int __mutex_trylock_slowpath(atomic_t *lock_count)
 {
        struct mutex *lock = container_of(lock_count, struct mutex, count);
+       unsigned long flags;
        int prev;
 
-       spin_lock_mutex(&lock->wait_lock);
+       spin_lock_mutex(&lock->wait_lock, flags);
 
        prev = atomic_xchg(&lock->count, -1);
        if (likely(prev == 1))
        if (likely(list_empty(&lock->wait_list)))
                atomic_set(&lock->count, 0);
 
-       spin_unlock_mutex(&lock->wait_lock);
+       spin_unlock_mutex(&lock->wait_lock, flags);
 
        return prev == 1;
 }
 
  * !CONFIG_DEBUG_MUTEXES case. Most of them are NOPs:
  */
 
-#define spin_lock_mutex(lock)                  spin_lock(lock)
-#define spin_unlock_mutex(lock)                        spin_unlock(lock)
+#define spin_lock_mutex(lock, flags) \
+               do { spin_lock(lock); (void)(flags); } while (0)
+#define spin_unlock_mutex(lock, flags) \
+               do { spin_unlock(lock); (void)(flags); } while (0)
 #define mutex_remove_waiter(lock, waiter, ti) \
                __list_del((waiter)->list.prev, (waiter)->list.next)