*             Obtain the 64bit jiffies
  *     Return
  *             The 64 bit jiffies
+ *
+ * int bpf_read_branch_records(struct bpf_perf_event_data *ctx, void *buf, u32 size, u64 flags)
+ *     Description
+ *             For an eBPF program attached to a perf event, retrieve the
+ *             branch records (struct perf_branch_entry) associated to *ctx*
+ *             and store it in the buffer pointed by *buf* up to size
+ *             *size* bytes.
+ *     Return
+ *             On success, number of bytes written to *buf*. On error, a
+ *             negative value.
+ *
+ *             The *flags* can be set to **BPF_F_GET_BRANCH_RECORDS_SIZE** to
+ *             instead return the number of bytes required to store all the
+ *             branch entries. If this flag is set, *buf* may be NULL.
+ *
+ *             **-EINVAL** if arguments invalid or **size** not a multiple
+ *             of sizeof(struct perf_branch_entry).
+ *
+ *             **-ENOENT** if architecture does not support branch records.
  */
 #define __BPF_FUNC_MAPPER(FN)          \
        FN(unspec),                     \
        FN(probe_read_kernel_str),      \
        FN(tcp_send_ack),               \
        FN(send_signal_thread),         \
-       FN(jiffies64),
+       FN(jiffies64),                  \
+       FN(read_branch_records),
 
 /* integer value in 'imm' field of BPF_CALL instruction selects which helper
  * function eBPF program intends to call
 /* BPF_FUNC_sk_storage_get flags */
 #define BPF_SK_STORAGE_GET_F_CREATE    (1ULL << 0)
 
+/* BPF_FUNC_read_branch_records flags. */
+#define BPF_F_GET_BRANCH_RECORDS_SIZE  (1ULL << 0)
+
 /* Mode for BPF_FUNC_skb_adjust_room helper. */
 enum bpf_adj_room_mode {
        BPF_ADJ_ROOM_NET,
 
          .arg3_type      = ARG_CONST_SIZE,
 };
 
+BPF_CALL_4(bpf_read_branch_records, struct bpf_perf_event_data_kern *, ctx,
+          void *, buf, u32, size, u64, flags)
+{
+#ifndef CONFIG_X86
+       return -ENOENT;
+#else
+       static const u32 br_entry_size = sizeof(struct perf_branch_entry);
+       struct perf_branch_stack *br_stack = ctx->data->br_stack;
+       u32 to_copy;
+
+       if (unlikely(flags & ~BPF_F_GET_BRANCH_RECORDS_SIZE))
+               return -EINVAL;
+
+       if (unlikely(!br_stack))
+               return -EINVAL;
+
+       if (flags & BPF_F_GET_BRANCH_RECORDS_SIZE)
+               return br_stack->nr * br_entry_size;
+
+       if (!buf || (size % br_entry_size != 0))
+               return -EINVAL;
+
+       to_copy = min_t(u32, br_stack->nr * br_entry_size, size);
+       memcpy(buf, br_stack->entries, to_copy);
+
+       return to_copy;
+#endif
+}
+
+static const struct bpf_func_proto bpf_read_branch_records_proto = {
+       .func           = bpf_read_branch_records,
+       .gpl_only       = true,
+       .ret_type       = RET_INTEGER,
+       .arg1_type      = ARG_PTR_TO_CTX,
+       .arg2_type      = ARG_PTR_TO_MEM_OR_NULL,
+       .arg3_type      = ARG_CONST_SIZE_OR_ZERO,
+       .arg4_type      = ARG_ANYTHING,
+};
+
 static const struct bpf_func_proto *
 pe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
 {
                return &bpf_get_stack_proto_tp;
        case BPF_FUNC_perf_prog_read_value:
                return &bpf_perf_prog_read_value_proto;
+       case BPF_FUNC_read_branch_records:
+               return &bpf_read_branch_records_proto;
        default:
                return tracing_func_proto(func_id, prog);
        }