.max_entries = 1,
 };
 
+struct bpf_map_def SEC("maps") pidmap = {
+       .type = BPF_MAP_TYPE_ARRAY,
+       .key_size = sizeof(__u32),
+       .value_size = sizeof(__u32),
+       .max_entries = 1,
+};
+
 SEC("tracepoint/syscalls/sys_enter_nanosleep")
 int trace(void *ctx)
 {
-       __u32 key = 0;
+       __u32 pid = bpf_get_current_pid_tgid();
+       __u32 key = 0, *expected_pid;
        __u64 *val;
 
+       expected_pid = bpf_map_lookup_elem(&pidmap, &key);
+       if (!expected_pid || *expected_pid != pid)
+               return 0;
+
        val = bpf_map_lookup_elem(&cg_ids, &key);
        if (val)
                *val = bpf_get_current_cgroup_id();
 
        const char *probe_name = "syscalls/sys_enter_nanosleep";
        const char *file = "get_cgroup_id_kern.o";
        int err, bytes, efd, prog_fd, pmu_fd;
+       int cgroup_fd, cgidmap_fd, pidmap_fd;
        struct perf_event_attr attr = {};
-       int cgroup_fd, cgidmap_fd;
        struct bpf_object *obj;
        __u64 kcgid = 0, ucgid;
+       __u32 key = 0, pid;
        int exit_code = 1;
        char buf[256];
-       __u32 key = 0;
 
        err = setup_cgroup_environment();
        if (CHECK(err, "setup_cgroup_environment", "err %d errno %d\n", err,
                  cgidmap_fd, errno))
                goto close_prog;
 
+       pidmap_fd = bpf_find_map(__func__, obj, "pidmap");
+       if (CHECK(pidmap_fd < 0, "bpf_find_map", "err %d errno %d\n",
+                 pidmap_fd, errno))
+               goto close_prog;
+
+       pid = getpid();
+       bpf_map_update_elem(pidmap_fd, &key, &pid, 0);
+
        snprintf(buf, sizeof(buf),
                 "/sys/kernel/debug/tracing/events/%s/id", probe_name);
        efd = open(buf, O_RDONLY, 0);