DECLARE_BITMAP(map, MAX_ARG_COUNT_PER_ENTRY);
 };
 
+void optee_cq_init(struct optee_call_queue *cq, int thread_count)
+{
+       mutex_init(&cq->mutex);
+       INIT_LIST_HEAD(&cq->waiters);
+
+       /*
+        * If cq->total_thread_count is 0 then we're not trying to keep
+        * track of how many free threads we have, instead we're relying on
+        * the secure world to tell us when we're out of thread and have to
+        * wait for another thread to become available.
+        */
+       cq->total_thread_count = thread_count;
+       cq->free_thread_count = thread_count;
+}
+
 void optee_cq_wait_init(struct optee_call_queue *cq,
                        struct optee_call_waiter *w, bool sys_thread)
 {
+       unsigned int free_thread_threshold;
+       bool need_wait = false;
+
+       memset(w, 0, sizeof(*w));
+
        /*
         * We're preparing to make a call to secure world. In case we can't
         * allocate a thread in secure world we'll end up waiting in
         */
        init_completion(&w->c);
        list_add_tail(&w->list_node, &cq->waiters);
+       w->sys_thread = sys_thread;
+
+       if (cq->total_thread_count) {
+               if (sys_thread || !cq->sys_thread_req_count)
+                       free_thread_threshold = 0;
+               else
+                       free_thread_threshold = 1;
+
+               if (cq->free_thread_count > free_thread_threshold)
+                       cq->free_thread_count--;
+               else
+                       need_wait = true;
+       }
 
        mutex_unlock(&cq->mutex);
+
+       while (need_wait) {
+               optee_cq_wait_for_completion(cq, w);
+               mutex_lock(&cq->mutex);
+
+               if (sys_thread || !cq->sys_thread_req_count)
+                       free_thread_threshold = 0;
+               else
+                       free_thread_threshold = 1;
+
+               if (cq->free_thread_count > free_thread_threshold) {
+                       cq->free_thread_count--;
+                       need_wait = false;
+               }
+
+               mutex_unlock(&cq->mutex);
+       }
 }
 
 void optee_cq_wait_for_completion(struct optee_call_queue *cq,
 {
        struct optee_call_waiter *w;
 
+       /* Wake a waiting system session if any, prior to a normal session */
+       list_for_each_entry(w, &cq->waiters, list_node) {
+               if (w->sys_thread && !completion_done(&w->c)) {
+                       complete(&w->c);
+                       return;
+               }
+       }
+
        list_for_each_entry(w, &cq->waiters, list_node) {
                if (!completion_done(&w->c)) {
                        complete(&w->c);
        /* Get out of the list */
        list_del(&w->list_node);
 
+       cq->free_thread_count++;
+
        /* Wake up one eventual waiting task */
        optee_cq_complete_one(cq);
 
        mutex_unlock(&cq->mutex);
 }
 
+/* Count registered system sessions to reserved a system thread or not */
+static bool optee_cq_incr_sys_thread_count(struct optee_call_queue *cq)
+{
+       if (cq->total_thread_count <= 1)
+               return false;
+
+       mutex_lock(&cq->mutex);
+       cq->sys_thread_req_count++;
+       mutex_unlock(&cq->mutex);
+
+       return true;
+}
+
+static void optee_cq_decr_sys_thread_count(struct optee_call_queue *cq)
+{
+       mutex_lock(&cq->mutex);
+       cq->sys_thread_req_count--;
+       /* If there's someone waiting, let it resume */
+       optee_cq_complete_one(cq);
+       mutex_unlock(&cq->mutex);
+}
+
 /* Requires the filpstate mutex to be held */
 static struct optee_session *find_session(struct optee_context_data *ctxdata,
                                          u32 session_id)
        return rc;
 }
 
+int optee_system_session(struct tee_context *ctx, u32 session)
+{
+       struct optee *optee = tee_get_drvdata(ctx->teedev);
+       struct optee_context_data *ctxdata = ctx->data;
+       struct optee_session *sess;
+       int rc = -EINVAL;
+
+       mutex_lock(&ctxdata->mutex);
+
+       sess = find_session(ctxdata, session);
+       if (sess && (sess->use_sys_thread ||
+                    optee_cq_incr_sys_thread_count(&optee->call_queue))) {
+               sess->use_sys_thread = true;
+               rc = 0;
+       }
+
+       mutex_unlock(&ctxdata->mutex);
+
+       return rc;
+}
+
 int optee_close_session_helper(struct tee_context *ctx, u32 session,
                               bool system_thread)
 {
 
        optee_free_msg_arg(ctx, entry, offs);
 
+       if (system_thread)
+               optee_cq_decr_sys_thread_count(&optee->call_queue);
+
        return 0;
 }
 
 
                                unsigned long, unsigned long,
                                struct arm_smccc_res *);
 
+/*
+ * struct optee_call_waiter - TEE entry may need to wait for a free TEE thread
+ * @list_node          Reference in waiters list
+ * @c                  Waiting completion reference
+ * @sys_thread         True if waiter belongs to a system thread
+ */
 struct optee_call_waiter {
        struct list_head list_node;
        struct completion c;
+       bool sys_thread;
 };
 
+/*
+ * struct optee_call_queue - OP-TEE call queue management
+ * @mutex                      Serializes access to this struct
+ * @waiters                    List of threads waiting to enter OP-TEE
+ * @total_thread_count         Overall number of thread context in OP-TEE or 0
+ * @free_thread_count          Number of threads context free in OP-TEE
+ * @sys_thread_req_count       Number of registered system thread sessions
+ */
 struct optee_call_queue {
        /* Serializes access to this struct */
        struct mutex mutex;
        struct list_head waiters;
+       int total_thread_count;
+       int free_thread_count;
+       int sys_thread_req_count;
 };
 
 struct optee_notif {
 int optee_open_session(struct tee_context *ctx,
                       struct tee_ioctl_open_session_arg *arg,
                       struct tee_param *param);
+int optee_system_session(struct tee_context *ctx, u32 session);
 int optee_close_session_helper(struct tee_context *ctx, u32 session,
                               bool system_thread);
 int optee_close_session(struct tee_context *ctx, u32 session);
        mp->u.value.c = p->u.value.c;
 }
 
+void optee_cq_init(struct optee_call_queue *cq, int thread_count);
 void optee_cq_wait_init(struct optee_call_queue *cq,
                        struct optee_call_waiter *w, bool sys_thread);
 void optee_cq_wait_for_completion(struct optee_call_queue *cq,
 
        .release = optee_release,
        .open_session = optee_open_session,
        .close_session = optee_close_session,
+       .system_session = optee_system_session,
        .invoke_func = optee_invoke_func,
        .cancel_req = optee_cancel_req,
        .shm_register = optee_shm_register,
        return true;
 }
 
+static unsigned int optee_msg_get_thread_count(optee_invoke_fn *invoke_fn)
+{
+       struct arm_smccc_res res;
+
+       invoke_fn(OPTEE_SMC_GET_THREAD_COUNT, 0, 0, 0, 0, 0, 0, 0, &res);
+       if (res.a0)
+               return 0;
+       return res.a1;
+}
+
 static struct tee_shm_pool *
 optee_config_shm_memremap(optee_invoke_fn *invoke_fn, void **memremaped_shm)
 {
        struct optee *optee = NULL;
        void *memremaped_shm = NULL;
        unsigned int rpc_param_count;
+       unsigned int thread_count;
        struct tee_device *teedev;
        struct tee_context *ctx;
        u32 max_notif_value;
                return -EINVAL;
        }
 
+       thread_count = optee_msg_get_thread_count(invoke_fn);
        if (!optee_msg_exchange_capabilities(invoke_fn, &sec_caps,
                                             &max_notif_value,
                                             &rpc_param_count)) {
        if (rc)
                goto err_unreg_supp_teedev;
 
-       mutex_init(&optee->call_queue.mutex);
-       INIT_LIST_HEAD(&optee->call_queue.waiters);
+       optee_cq_init(&optee->call_queue, thread_count);
        optee_supp_init(&optee->supp);
        optee->smc.memremaped_shm = memremaped_shm;
        optee->pool = pool;