void btrfs_set_lock_blocking_read(struct extent_buffer *eb)
 {
+       trace_btrfs_set_lock_blocking_read(eb);
        /*
         * No lock is required.  The lock owner may change if we have a read
         * lock, but it won't change to or away from us.  If we have the write
 
 void btrfs_set_lock_blocking_write(struct extent_buffer *eb)
 {
+       trace_btrfs_set_lock_blocking_write(eb);
        /*
         * No lock is required.  The lock owner may change if we have a read
         * lock, but it won't change to or away from us.  If we have the write
 
 void btrfs_clear_lock_blocking_read(struct extent_buffer *eb)
 {
+       trace_btrfs_clear_lock_blocking_read(eb);
        /*
         * No lock is required.  The lock owner may change if we have a read
         * lock, but it won't change to or away from us.  If we have the write
 
 void btrfs_clear_lock_blocking_write(struct extent_buffer *eb)
 {
+       trace_btrfs_clear_lock_blocking_write(eb);
        /*
         * no lock is required.  The lock owner may change if
         * we have a read lock, but it won't change to or away
        }
        btrfs_assert_tree_read_locks_get(eb);
        btrfs_assert_spinning_readers_get(eb);
+       trace_btrfs_tree_read_lock_atomic(eb);
        return 1;
 }
 
        }
        btrfs_assert_tree_read_locks_get(eb);
        btrfs_assert_spinning_readers_get(eb);
+       trace_btrfs_try_tree_read_lock(eb);
        return 1;
 }
 
        btrfs_assert_tree_write_locks_get(eb);
        btrfs_assert_spinning_writers_get(eb);
        eb->lock_owner = current->pid;
+       trace_btrfs_try_tree_write_lock(eb);
        return 1;
 }
 
  */
 void btrfs_tree_read_unlock(struct extent_buffer *eb)
 {
+       trace_btrfs_tree_read_unlock(eb);
        /*
         * if we're nested, we have the write lock.  No new locking
         * is needed as long as we are the lock owner.
  */
 void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb)
 {
+       trace_btrfs_tree_read_unlock_blocking(eb);
        /*
         * if we're nested, we have the write lock.  No new locking
         * is needed as long as we are the lock owner.
        BUG_ON(blockers > 1);
 
        btrfs_assert_tree_locked(eb);
+       trace_btrfs_tree_unlock(eb);
        eb->lock_owner = 0;
        btrfs_assert_tree_write_locks_put(eb);
 
 
        TP_ARGS(eb, start_ns)
 );
 
+DECLARE_EVENT_CLASS(btrfs_locking_events,
+       TP_PROTO(const struct extent_buffer *eb),
+
+       TP_ARGS(eb),
+
+       TP_STRUCT__entry_btrfs(
+               __field(        u64,    block           )
+               __field(        u64,    generation      )
+               __field(        u64,    owner           )
+               __field(        int,    is_log_tree     )
+       ),
+
+       TP_fast_assign_btrfs(eb->fs_info,
+               __entry->block          = eb->start;
+               __entry->generation     = btrfs_header_generation(eb);
+               __entry->owner          = btrfs_header_owner(eb);
+               __entry->is_log_tree    = (eb->log_index >= 0);
+       ),
+
+       TP_printk_btrfs("block=%llu generation=%llu owner=%llu is_log_tree=%d",
+               __entry->block, __entry->generation,
+               __entry->owner, __entry->is_log_tree)
+);
+
+#define DEFINE_BTRFS_LOCK_EVENT(name)                          \
+DEFINE_EVENT(btrfs_locking_events, name,                       \
+               TP_PROTO(const struct extent_buffer *eb),       \
+                                                               \
+               TP_ARGS(eb)                                     \
+)
+
+DEFINE_BTRFS_LOCK_EVENT(btrfs_tree_unlock);
+DEFINE_BTRFS_LOCK_EVENT(btrfs_tree_read_unlock);
+DEFINE_BTRFS_LOCK_EVENT(btrfs_tree_read_unlock_blocking);
+DEFINE_BTRFS_LOCK_EVENT(btrfs_set_lock_blocking_read);
+DEFINE_BTRFS_LOCK_EVENT(btrfs_set_lock_blocking_write);
+DEFINE_BTRFS_LOCK_EVENT(btrfs_clear_lock_blocking_read);
+DEFINE_BTRFS_LOCK_EVENT(btrfs_clear_lock_blocking_write);
+DEFINE_BTRFS_LOCK_EVENT(btrfs_try_tree_read_lock);
+DEFINE_BTRFS_LOCK_EVENT(btrfs_try_tree_write_lock);
+DEFINE_BTRFS_LOCK_EVENT(btrfs_tree_read_lock_atomic);
+
 #endif /* _TRACE_BTRFS_H */
 
 /* This part must be outside protection */