return container_of(node, struct lock_stat, rb);
 }
 
+static struct lock_stat *lock_stat_find(u64 addr)
+{
+       struct hlist_head *entry = lockhashentry(addr);
+       struct lock_stat *ret;
+
+       hlist_for_each_entry(ret, entry, hash_entry) {
+               if (ret->addr == addr)
+                       return ret;
+       }
+       return NULL;
+}
+
 static struct lock_stat *lock_stat_findnew(u64 addr, const char *name)
 {
        struct hlist_head *entry = lockhashentry(addr);
        return 0;
 }
 
+static int report_lock_contention_begin_event(struct evsel *evsel,
+                                             struct perf_sample *sample)
+{
+       struct lock_stat *ls;
+       struct thread_stat *ts;
+       struct lock_seq_stat *seq;
+       u64 addr = evsel__intval(evsel, sample, "lock_addr");
+
+       if (show_thread_stats)
+               addr = sample->tid;
+
+       ls = lock_stat_findnew(addr, "No name");
+       if (!ls)
+               return -ENOMEM;
+
+       ts = thread_stat_findnew(sample->tid);
+       if (!ts)
+               return -ENOMEM;
+
+       seq = get_seq(ts, addr);
+       if (!seq)
+               return -ENOMEM;
+
+       switch (seq->state) {
+       case SEQ_STATE_UNINITIALIZED:
+       case SEQ_STATE_ACQUIRED:
+               break;
+       case SEQ_STATE_CONTENDED:
+               /*
+                * It can have nested contention begin with mutex spinning,
+                * then we would use the original contention begin event and
+                * ignore the second one.
+                */
+               goto end;
+       case SEQ_STATE_ACQUIRING:
+       case SEQ_STATE_READ_ACQUIRED:
+       case SEQ_STATE_RELEASED:
+               /* broken lock sequence */
+               if (!ls->broken) {
+                       ls->broken = 1;
+                       bad_hist[BROKEN_CONTENDED]++;
+               }
+               list_del_init(&seq->list);
+               free(seq);
+               goto end;
+       default:
+               BUG_ON("Unknown state of lock sequence found!\n");
+               break;
+       }
+
+       if (seq->state != SEQ_STATE_CONTENDED) {
+               seq->state = SEQ_STATE_CONTENDED;
+               seq->prev_event_time = sample->time;
+               ls->nr_contended++;
+       }
+end:
+       return 0;
+}
+
+static int report_lock_contention_end_event(struct evsel *evsel,
+                                           struct perf_sample *sample)
+{
+       struct lock_stat *ls;
+       struct thread_stat *ts;
+       struct lock_seq_stat *seq;
+       u64 contended_term;
+       u64 addr = evsel__intval(evsel, sample, "lock_addr");
+
+       if (show_thread_stats)
+               addr = sample->tid;
+
+       ls = lock_stat_find(addr);
+       if (!ls)
+               return 0;
+
+       ts = thread_stat_find(sample->tid);
+       if (!ts)
+               return 0;
+
+       seq = get_seq(ts, addr);
+       if (!seq)
+               return -ENOMEM;
+
+       switch (seq->state) {
+       case SEQ_STATE_UNINITIALIZED:
+               goto end;
+       case SEQ_STATE_CONTENDED:
+               contended_term = sample->time - seq->prev_event_time;
+               ls->wait_time_total += contended_term;
+               if (contended_term < ls->wait_time_min)
+                       ls->wait_time_min = contended_term;
+               if (ls->wait_time_max < contended_term)
+                       ls->wait_time_max = contended_term;
+               break;
+       case SEQ_STATE_ACQUIRING:
+       case SEQ_STATE_ACQUIRED:
+       case SEQ_STATE_READ_ACQUIRED:
+       case SEQ_STATE_RELEASED:
+               /* broken lock sequence */
+               if (!ls->broken) {
+                       ls->broken = 1;
+                       bad_hist[BROKEN_ACQUIRED]++;
+               }
+               list_del_init(&seq->list);
+               free(seq);
+               goto end;
+       default:
+               BUG_ON("Unknown state of lock sequence found!\n");
+               break;
+       }
+
+       seq->state = SEQ_STATE_ACQUIRED;
+       ls->nr_acquired++;
+       ls->avg_wait_time = ls->wait_time_total/ls->nr_acquired;
+end:
+       return 0;
+}
+
 /* lock oriented handlers */
 /* TODO: handlers for CPU oriented, thread oriented */
 static struct trace_lock_handler report_lock_ops  = {
        .acquired_event         = report_lock_acquired_event,
        .contended_event        = report_lock_contended_event,
        .release_event          = report_lock_release_event,
+       .contention_begin_event = report_lock_contention_begin_event,
+       .contention_end_event   = report_lock_contention_end_event,
 };
 
 static struct trace_lock_handler *trace_handler;
                goto out_delete;
        }
 
+       if (perf_session__set_tracepoints_handlers(session, contention_tracepoints)) {
+               pr_err("Initializing perf session tracepoint handlers failed\n");
+               goto out_delete;
+       }
+
        if (setup_output_field(output_fields))
                goto out_delete;