usage[i] = '\0';
 }
 
-static void __print_lock_name(struct lock_class *class)
+static void __print_lock_name(struct held_lock *hlock, struct lock_class *class)
 {
        char str[KSYM_NAME_LEN];
        const char *name;
                        printk(KERN_CONT "#%d", class->name_version);
                if (class->subclass)
                        printk(KERN_CONT "/%d", class->subclass);
+               if (hlock && class->print_fn)
+                       class->print_fn(hlock->instance);
        }
 }
 
-static void print_lock_name(struct lock_class *class)
+static void print_lock_name(struct held_lock *hlock, struct lock_class *class)
 {
        char usage[LOCK_USAGE_CHARS];
 
        get_usage_chars(class, usage);
 
        printk(KERN_CONT " (");
-       __print_lock_name(class);
+       __print_lock_name(hlock, class);
        printk(KERN_CONT "){%s}-{%d:%d}", usage,
                        class->wait_type_outer ?: class->wait_type_inner,
                        class->wait_type_inner);
        }
 
        printk(KERN_CONT "%px", hlock->instance);
-       print_lock_name(lock);
+       print_lock_name(hlock, lock);
        printk(KERN_CONT ", at: %pS\n", (void *)hlock->acquire_ip);
 }
 
        if (debug_locks_silent)
                return;
        printk("\n-> #%u", depth);
-       print_lock_name(target->class);
+       print_lock_name(NULL, target->class);
        printk(KERN_CONT ":\n");
        print_lock_trace(target->trace, 6);
 }
         */
        if (parent != source) {
                printk("Chain exists of:\n  ");
-               __print_lock_name(source);
+               __print_lock_name(src, source);
                printk(KERN_CONT " --> ");
-               __print_lock_name(parent);
+               __print_lock_name(NULL, parent);
                printk(KERN_CONT " --> ");
-               __print_lock_name(target);
+               __print_lock_name(tgt, target);
                printk(KERN_CONT "\n\n");
        }
 
                printk("  rlock(");
        else
                printk("  lock(");
-       __print_lock_name(target);
+       __print_lock_name(tgt, target);
        printk(KERN_CONT ");\n");
        printk("                               lock(");
-       __print_lock_name(parent);
+       __print_lock_name(NULL, parent);
        printk(KERN_CONT ");\n");
        printk("                               lock(");
-       __print_lock_name(target);
+       __print_lock_name(tgt, target);
        printk(KERN_CONT ");\n");
        if (src_read != 0)
                printk("  rlock(");
                printk("  sync(");
        else
                printk("  lock(");
-       __print_lock_name(source);
+       __print_lock_name(src, source);
        printk(KERN_CONT ");\n");
        printk("\n *** DEADLOCK ***\n\n");
 }
        return ret;
 }
 
+static void print_deadlock_bug(struct task_struct *, struct held_lock *, struct held_lock *);
+
 /*
  * Prove that the dependency graph starting at <src> can not
  * lead to <target>. If it can, there is a circle when adding
                        *trace = save_trace();
                }
 
-               print_circular_bug(&src_entry, target_entry, src, target);
+               if (src->class_idx == target->class_idx)
+                       print_deadlock_bug(current, src, target);
+               else
+                       print_circular_bug(&src_entry, target_entry, src, target);
        }
 
        return ret;
        int bit;
 
        printk("%*s->", depth, "");
-       print_lock_name(class);
+       print_lock_name(NULL, class);
 #ifdef CONFIG_DEBUG_LOCKDEP
        printk(KERN_CONT " ops: %lu", debug_class_ops_read(class));
 #endif
         */
        if (middle_class != unsafe_class) {
                printk("Chain exists of:\n  ");
-               __print_lock_name(safe_class);
+               __print_lock_name(NULL, safe_class);
                printk(KERN_CONT " --> ");
-               __print_lock_name(middle_class);
+               __print_lock_name(NULL, middle_class);
                printk(KERN_CONT " --> ");
-               __print_lock_name(unsafe_class);
+               __print_lock_name(NULL, unsafe_class);
                printk(KERN_CONT "\n\n");
        }
 
        printk("       CPU0                    CPU1\n");
        printk("       ----                    ----\n");
        printk("  lock(");
-       __print_lock_name(unsafe_class);
+       __print_lock_name(NULL, unsafe_class);
        printk(KERN_CONT ");\n");
        printk("                               local_irq_disable();\n");
        printk("                               lock(");
-       __print_lock_name(safe_class);
+       __print_lock_name(NULL, safe_class);
        printk(KERN_CONT ");\n");
        printk("                               lock(");
-       __print_lock_name(middle_class);
+       __print_lock_name(NULL, middle_class);
        printk(KERN_CONT ");\n");
        printk("  <Interrupt>\n");
        printk("    lock(");
-       __print_lock_name(safe_class);
+       __print_lock_name(NULL, safe_class);
        printk(KERN_CONT ");\n");
        printk("\n *** DEADLOCK ***\n\n");
 }
        pr_warn("\nand this task is already holding:\n");
        print_lock(prev);
        pr_warn("which would create a new lock dependency:\n");
-       print_lock_name(hlock_class(prev));
+       print_lock_name(prev, hlock_class(prev));
        pr_cont(" ->");
-       print_lock_name(hlock_class(next));
+       print_lock_name(next, hlock_class(next));
        pr_cont("\n");
 
        pr_warn("\nbut this new dependency connects a %s-irq-safe lock:\n",
                irqclass);
-       print_lock_name(backwards_entry->class);
+       print_lock_name(NULL, backwards_entry->class);
        pr_warn("\n... which became %s-irq-safe at:\n", irqclass);
 
        print_lock_trace(backwards_entry->class->usage_traces[bit1], 1);
 
        pr_warn("\nto a %s-irq-unsafe lock:\n", irqclass);
-       print_lock_name(forwards_entry->class);
+       print_lock_name(NULL, forwards_entry->class);
        pr_warn("\n... which became %s-irq-unsafe at:\n", irqclass);
        pr_warn("...");
 
        printk("       CPU0\n");
        printk("       ----\n");
        printk("  lock(");
-       __print_lock_name(prev);
+       __print_lock_name(prv, prev);
        printk(KERN_CONT ");\n");
        printk("  lock(");
-       __print_lock_name(next);
+       __print_lock_name(nxt, next);
        printk(KERN_CONT ");\n");
        printk("\n *** DEADLOCK ***\n\n");
        printk(" May be due to missing lock nesting notation\n\n");
 print_deadlock_bug(struct task_struct *curr, struct held_lock *prev,
                   struct held_lock *next)
 {
+       struct lock_class *class = hlock_class(prev);
+
        if (!debug_locks_off_graph_unlock() || debug_locks_silent)
                return;
 
        pr_warn("\nbut task is already holding lock:\n");
        print_lock(prev);
 
+       if (class->cmp_fn) {
+               pr_warn("and the lock comparison function returns %i:\n",
+                       class->cmp_fn(prev->instance, next->instance));
+       }
+
        pr_warn("\nother info that might help us debug this:\n");
        print_deadlock_scenario(next, prev);
        lockdep_print_held_locks(curr);
 static int
 check_deadlock(struct task_struct *curr, struct held_lock *next)
 {
+       struct lock_class *class;
        struct held_lock *prev;
        struct held_lock *nest = NULL;
        int i;
                if ((next->read == 2) && prev->read)
                        continue;
 
+               class = hlock_class(prev);
+
+               if (class->cmp_fn &&
+                   class->cmp_fn(prev->instance, next->instance) < 0)
+                       continue;
+
                /*
                 * We're holding the nest_lock, which serializes this lock's
                 * nesting behaviour.
                return 2;
        }
 
+       if (prev->class_idx == next->class_idx) {
+               struct lock_class *class = hlock_class(prev);
+
+               if (class->cmp_fn &&
+                   class->cmp_fn(prev->instance, next->instance) < 0)
+                       return 2;
+       }
+
        /*
         * Prove that the new <prev> -> <next> dependency would not
         * create a circular dependency in the graph. (We do this by
                hlock_id = chain_hlocks[chain->base + i];
                chain_key = print_chain_key_iteration(hlock_id, chain_key);
 
-               print_lock_name(lock_classes + chain_hlock_class_idx(hlock_id));
+               print_lock_name(NULL, lock_classes + chain_hlock_class_idx(hlock_id));
                printk("\n");
        }
 }
        printk("       CPU0\n");
        printk("       ----\n");
        printk("  lock(");
-       __print_lock_name(class);
+       __print_lock_name(lock, class);
        printk(KERN_CONT ");\n");
        printk("  <Interrupt>\n");
        printk("    lock(");
-       __print_lock_name(class);
+       __print_lock_name(lock, class);
        printk(KERN_CONT ");\n");
        printk("\n *** DEADLOCK ***\n\n");
 }
                pr_warn("but this lock took another, %s-unsafe lock in the past:\n", irqclass);
        else
                pr_warn("but this lock was taken by another, %s-safe lock in the past:\n", irqclass);
-       print_lock_name(other->class);
+       print_lock_name(NULL, other->class);
        pr_warn("\n\nand interrupts could create inverse lock ordering between them.\n\n");
 
        pr_warn("\nother info that might help us debug this:\n");
 struct lock_class_key __lockdep_no_validate__;
 EXPORT_SYMBOL_GPL(__lockdep_no_validate__);
 
+#ifdef CONFIG_PROVE_LOCKING
+void lockdep_set_lock_cmp_fn(struct lockdep_map *lock, lock_cmp_fn cmp_fn,
+                            lock_print_fn print_fn)
+{
+       struct lock_class *class = lock->class_cache[0];
+       unsigned long flags;
+
+       raw_local_irq_save(flags);
+       lockdep_recursion_inc();
+
+       if (!class)
+               class = register_lock_class(lock, 0, 0);
+
+       if (class) {
+               WARN_ON(class->cmp_fn   && class->cmp_fn != cmp_fn);
+               WARN_ON(class->print_fn && class->print_fn != print_fn);
+
+               class->cmp_fn   = cmp_fn;
+               class->print_fn = print_fn;
+       }
+
+       lockdep_recursion_finish();
+       raw_local_irq_restore(flags);
+}
+EXPORT_SYMBOL_GPL(lockdep_set_lock_cmp_fn);
+#endif
+
 static void
 print_lock_nested_lock_not_held(struct task_struct *curr,
                                struct held_lock *hlock)