/* Clean up bpf_link and corresponding anon_inode file and FD. After
  * anon_inode is created, bpf_link can't be just kfree()'d due to deferred
- * anon_inode's release() call. This helper marksbpf_link as
+ * anon_inode's release() call. This helper marks bpf_link as
  * defunct, releases anon_inode file and puts reserved FD. bpf_prog's refcnt
  * is not decremented, it's the responsibility of a calling code that failed
  * to complete bpf_link initialization.
+ * This helper eventually calls link's dealloc callback, but does not call
+ * link's release callback.
  */
 void bpf_link_cleanup(struct bpf_link_primer *primer)
 {
                if (prog->expected_attach_type == BPF_TRACE_KPROBE_MULTI &&
                    attach_type != BPF_TRACE_KPROBE_MULTI)
                        return -EINVAL;
+               if (prog->expected_attach_type == BPF_TRACE_UPROBE_MULTI &&
+                   attach_type != BPF_TRACE_UPROBE_MULTI)
+                       return -EINVAL;
                if (attach_type != BPF_PERF_EVENT &&
-                   attach_type != BPF_TRACE_KPROBE_MULTI)
+                   attach_type != BPF_TRACE_KPROBE_MULTI &&
+                   attach_type != BPF_TRACE_UPROBE_MULTI)
                        return -EINVAL;
                return 0;
        case BPF_PROG_TYPE_SCHED_CLS:
        case BPF_PROG_TYPE_KPROBE:
                if (attr->link_create.attach_type == BPF_PERF_EVENT)
                        ret = bpf_perf_link_attach(attr, prog);
-               else
+               else if (attr->link_create.attach_type == BPF_TRACE_KPROBE_MULTI)
                        ret = bpf_kprobe_multi_link_attach(attr, prog);
+               else if (attr->link_create.attach_type == BPF_TRACE_UPROBE_MULTI)
+                       ret = bpf_uprobe_multi_link_attach(attr, prog);
                break;
        default:
                ret = -EINVAL;
 
 #include <linux/sort.h>
 #include <linux/key.h>
 #include <linux/verification.h>
+#include <linux/namei.h>
 
 #include <net/bpf_sk_storage.h>
 
        return 0;
 }
 #endif
+
+#ifdef CONFIG_UPROBES
+struct bpf_uprobe_multi_link;
+
+struct bpf_uprobe {
+       struct bpf_uprobe_multi_link *link;
+       loff_t offset;
+       struct uprobe_consumer consumer;
+};
+
+struct bpf_uprobe_multi_link {
+       struct path path;
+       struct bpf_link link;
+       u32 cnt;
+       struct bpf_uprobe *uprobes;
+};
+
+struct bpf_uprobe_multi_run_ctx {
+       struct bpf_run_ctx run_ctx;
+       unsigned long entry_ip;
+};
+
+static void bpf_uprobe_unregister(struct path *path, struct bpf_uprobe *uprobes,
+                                 u32 cnt)
+{
+       u32 i;
+
+       for (i = 0; i < cnt; i++) {
+               uprobe_unregister(d_real_inode(path->dentry), uprobes[i].offset,
+                                 &uprobes[i].consumer);
+       }
+}
+
+static void bpf_uprobe_multi_link_release(struct bpf_link *link)
+{
+       struct bpf_uprobe_multi_link *umulti_link;
+
+       umulti_link = container_of(link, struct bpf_uprobe_multi_link, link);
+       bpf_uprobe_unregister(&umulti_link->path, umulti_link->uprobes, umulti_link->cnt);
+}
+
+static void bpf_uprobe_multi_link_dealloc(struct bpf_link *link)
+{
+       struct bpf_uprobe_multi_link *umulti_link;
+
+       umulti_link = container_of(link, struct bpf_uprobe_multi_link, link);
+       path_put(&umulti_link->path);
+       kvfree(umulti_link->uprobes);
+       kfree(umulti_link);
+}
+
+static const struct bpf_link_ops bpf_uprobe_multi_link_lops = {
+       .release = bpf_uprobe_multi_link_release,
+       .dealloc = bpf_uprobe_multi_link_dealloc,
+};
+
+static int uprobe_prog_run(struct bpf_uprobe *uprobe,
+                          unsigned long entry_ip,
+                          struct pt_regs *regs)
+{
+       struct bpf_uprobe_multi_link *link = uprobe->link;
+       struct bpf_uprobe_multi_run_ctx run_ctx = {
+               .entry_ip = entry_ip,
+       };
+       struct bpf_prog *prog = link->link.prog;
+       bool sleepable = prog->aux->sleepable;
+       struct bpf_run_ctx *old_run_ctx;
+       int err = 0;
+
+       if (sleepable)
+               rcu_read_lock_trace();
+       else
+               rcu_read_lock();
+
+       migrate_disable();
+
+       old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
+       err = bpf_prog_run(link->link.prog, regs);
+       bpf_reset_run_ctx(old_run_ctx);
+
+       migrate_enable();
+
+       if (sleepable)
+               rcu_read_unlock_trace();
+       else
+               rcu_read_unlock();
+       return err;
+}
+
+static int
+uprobe_multi_link_handler(struct uprobe_consumer *con, struct pt_regs *regs)
+{
+       struct bpf_uprobe *uprobe;
+
+       uprobe = container_of(con, struct bpf_uprobe, consumer);
+       return uprobe_prog_run(uprobe, instruction_pointer(regs), regs);
+}
+
+static int
+uprobe_multi_link_ret_handler(struct uprobe_consumer *con, unsigned long func, struct pt_regs *regs)
+{
+       struct bpf_uprobe *uprobe;
+
+       uprobe = container_of(con, struct bpf_uprobe, consumer);
+       return uprobe_prog_run(uprobe, func, regs);
+}
+
+int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
+{
+       struct bpf_uprobe_multi_link *link = NULL;
+       unsigned long __user *uref_ctr_offsets;
+       unsigned long *ref_ctr_offsets = NULL;
+       struct bpf_link_primer link_primer;
+       struct bpf_uprobe *uprobes = NULL;
+       unsigned long __user *uoffsets;
+       void __user *upath;
+       u32 flags, cnt, i;
+       struct path path;
+       char *name;
+       int err;
+
+       /* no support for 32bit archs yet */
+       if (sizeof(u64) != sizeof(void *))
+               return -EOPNOTSUPP;
+
+       if (prog->expected_attach_type != BPF_TRACE_UPROBE_MULTI)
+               return -EINVAL;
+
+       flags = attr->link_create.uprobe_multi.flags;
+       if (flags & ~BPF_F_UPROBE_MULTI_RETURN)
+               return -EINVAL;
+
+       /*
+        * path, offsets and cnt are mandatory,
+        * ref_ctr_offsets is optional
+        */
+       upath = u64_to_user_ptr(attr->link_create.uprobe_multi.path);
+       uoffsets = u64_to_user_ptr(attr->link_create.uprobe_multi.offsets);
+       cnt = attr->link_create.uprobe_multi.cnt;
+
+       if (!upath || !uoffsets || !cnt)
+               return -EINVAL;
+
+       uref_ctr_offsets = u64_to_user_ptr(attr->link_create.uprobe_multi.ref_ctr_offsets);
+
+       name = strndup_user(upath, PATH_MAX);
+       if (IS_ERR(name)) {
+               err = PTR_ERR(name);
+               return err;
+       }
+
+       err = kern_path(name, LOOKUP_FOLLOW, &path);
+       kfree(name);
+       if (err)
+               return err;
+
+       if (!d_is_reg(path.dentry)) {
+               err = -EBADF;
+               goto error_path_put;
+       }
+
+       err = -ENOMEM;
+
+       link = kzalloc(sizeof(*link), GFP_KERNEL);
+       uprobes = kvcalloc(cnt, sizeof(*uprobes), GFP_KERNEL);
+
+       if (!uprobes || !link)
+               goto error_free;
+
+       if (uref_ctr_offsets) {
+               ref_ctr_offsets = kvcalloc(cnt, sizeof(*ref_ctr_offsets), GFP_KERNEL);
+               if (!ref_ctr_offsets)
+                       goto error_free;
+       }
+
+       for (i = 0; i < cnt; i++) {
+               if (uref_ctr_offsets && __get_user(ref_ctr_offsets[i], uref_ctr_offsets + i)) {
+                       err = -EFAULT;
+                       goto error_free;
+               }
+               if (__get_user(uprobes[i].offset, uoffsets + i)) {
+                       err = -EFAULT;
+                       goto error_free;
+               }
+
+               uprobes[i].link = link;
+
+               if (flags & BPF_F_UPROBE_MULTI_RETURN)
+                       uprobes[i].consumer.ret_handler = uprobe_multi_link_ret_handler;
+               else
+                       uprobes[i].consumer.handler = uprobe_multi_link_handler;
+       }
+
+       link->cnt = cnt;
+       link->uprobes = uprobes;
+       link->path = path;
+
+       bpf_link_init(&link->link, BPF_LINK_TYPE_UPROBE_MULTI,
+                     &bpf_uprobe_multi_link_lops, prog);
+
+       for (i = 0; i < cnt; i++) {
+               err = uprobe_register_refctr(d_real_inode(link->path.dentry),
+                                            uprobes[i].offset,
+                                            ref_ctr_offsets ? ref_ctr_offsets[i] : 0,
+                                            &uprobes[i].consumer);
+               if (err) {
+                       bpf_uprobe_unregister(&path, uprobes, i);
+                       goto error_free;
+               }
+       }
+
+       err = bpf_link_prime(&link->link, &link_primer);
+       if (err)
+               goto error_free;
+
+       kvfree(ref_ctr_offsets);
+       return bpf_link_settle(&link_primer);
+
+error_free:
+       kvfree(ref_ctr_offsets);
+       kvfree(uprobes);
+       kfree(link);
+error_path_put:
+       path_put(&path);
+       return err;
+}
+#else /* !CONFIG_UPROBES */
+int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
+{
+       return -EOPNOTSUPP;
+}
+#endif /* CONFIG_UPROBES */