crypto:hisilicon/sec2 - update busy processing logic
authorKai Ye <yekai13@huawei.com>
Tue, 7 Jul 2020 01:15:38 +0000 (09:15 +0800)
committerHerbert Xu <herbert@gondor.apana.org.au>
Thu, 16 Jul 2020 11:49:00 +0000 (21:49 +1000)
As before, if a SEC queue is at the 'fake busy' status,
the request with a 'fake busy' flag will be sent into hardware
and the sending function returns busy. After the request is
finished, SEC driver's call back will identify the 'fake busy' flag,
and notifies the user that hardware is not busy now by calling
user's call back function.

Now, a request sent into busy hardware will be cached in the
SEC queue's backlog, return '-EBUSY' to user.
After the request being finished, the cached requests will
be processed in the call back function. to notify the
corresponding user that SEC queue can process more requests.

Signed-off-by: Kai Ye <yekai13@huawei.com>
Reviewed-by: Longfang Liu <liulongfang@huawei.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
drivers/crypto/hisilicon/sec2/sec.h
drivers/crypto/hisilicon/sec2/sec_crypto.c
drivers/crypto/hisilicon/sec2/sec_main.c

index 7b64aca704d642208aefa1fd4c22913781a0cdee..037762b531e2706bcff0f8994f9217c4f3cc0eca 100644 (file)
@@ -46,9 +46,11 @@ struct sec_req {
 
        struct sec_cipher_req c_req;
        struct sec_aead_req aead_req;
+       struct list_head backlog_head;
 
        int err_type;
        int req_id;
+       int flag;
 
        /* Status of the SEC request */
        bool fake_busy;
@@ -104,6 +106,7 @@ struct sec_qp_ctx {
        struct sec_alg_res res[QM_Q_DEPTH];
        struct sec_ctx *ctx;
        struct mutex req_lock;
+       struct list_head backlog;
        struct hisi_acc_sgl_pool *c_in_pool;
        struct hisi_acc_sgl_pool *c_out_pool;
        atomic_t pending_reqs;
@@ -161,6 +164,7 @@ struct sec_dfx {
        atomic64_t send_cnt;
        atomic64_t recv_cnt;
        atomic64_t send_busy_cnt;
+       atomic64_t recv_busy_cnt;
        atomic64_t err_bd_cnt;
        atomic64_t invalid_req_cnt;
        atomic64_t done_flag_cnt;
index 64614a9bdf219fa0978fe4f07d00e8c3a8e0293d..bfb9ce1359f3d1048898dc46ae545866221c6c62 100644 (file)
@@ -166,6 +166,7 @@ static void sec_req_cb(struct hisi_qp *qp, void *resp)
        req = qp_ctx->req_list[le16_to_cpu(bd->type2.tag)];
        if (unlikely(!req)) {
                atomic64_inc(&dfx->invalid_req_cnt);
+               atomic_inc(&qp->qp_status.used);
                return;
        }
        req->err_type = bd->type2.error_type;
@@ -198,21 +199,30 @@ static int sec_bd_send(struct sec_ctx *ctx, struct sec_req *req)
        struct sec_qp_ctx *qp_ctx = req->qp_ctx;
        int ret;
 
+       if (ctx->fake_req_limit <=
+           atomic_read(&qp_ctx->qp->qp_status.used) &&
+           !(req->flag & CRYPTO_TFM_REQ_MAY_BACKLOG))
+               return -EBUSY;
+
        mutex_lock(&qp_ctx->req_lock);
        ret = hisi_qp_send(qp_ctx->qp, &req->sec_sqe);
+
+       if (ctx->fake_req_limit <=
+           atomic_read(&qp_ctx->qp->qp_status.used) && !ret) {
+               list_add_tail(&req->backlog_head, &qp_ctx->backlog);
+               atomic64_inc(&ctx->sec->debug.dfx.send_cnt);
+               atomic64_inc(&ctx->sec->debug.dfx.send_busy_cnt);
+               mutex_unlock(&qp_ctx->req_lock);
+               return -EBUSY;
+       }
        mutex_unlock(&qp_ctx->req_lock);
-       atomic64_inc(&ctx->sec->debug.dfx.send_cnt);
 
        if (unlikely(ret == -EBUSY))
                return -ENOBUFS;
 
-       if (!ret) {
-               if (req->fake_busy) {
-                       atomic64_inc(&ctx->sec->debug.dfx.send_busy_cnt);
-                       ret = -EBUSY;
-               } else {
-                       ret = -EINPROGRESS;
-               }
+       if (likely(!ret)) {
+               ret = -EINPROGRESS;
+               atomic64_inc(&ctx->sec->debug.dfx.send_cnt);
        }
 
        return ret;
@@ -373,8 +383,8 @@ static int sec_create_qp_ctx(struct hisi_qm *qm, struct sec_ctx *ctx,
        qp_ctx->ctx = ctx;
 
        mutex_init(&qp_ctx->req_lock);
-       atomic_set(&qp_ctx->pending_reqs, 0);
        idr_init(&qp_ctx->req_idr);
+       INIT_LIST_HEAD(&qp_ctx->backlog);
 
        qp_ctx->c_in_pool = hisi_acc_create_sgl_pool(dev, QM_Q_DEPTH,
                                                     SEC_SGL_SGE_NR);
@@ -1048,21 +1058,49 @@ static void sec_update_iv(struct sec_req *req, enum sec_alg_type alg_type)
                dev_err(SEC_CTX_DEV(req->ctx), "copy output iv error!\n");
 }
 
+static struct sec_req *sec_back_req_clear(struct sec_ctx *ctx,
+                               struct sec_qp_ctx *qp_ctx)
+{
+       struct sec_req *backlog_req = NULL;
+
+       mutex_lock(&qp_ctx->req_lock);
+       if (ctx->fake_req_limit >=
+           atomic_read(&qp_ctx->qp->qp_status.used) &&
+           !list_empty(&qp_ctx->backlog)) {
+               backlog_req = list_first_entry(&qp_ctx->backlog,
+                               typeof(*backlog_req), backlog_head);
+               list_del(&backlog_req->backlog_head);
+       }
+       mutex_unlock(&qp_ctx->req_lock);
+
+       return backlog_req;
+}
+
 static void sec_skcipher_callback(struct sec_ctx *ctx, struct sec_req *req,
                                  int err)
 {
        struct skcipher_request *sk_req = req->c_req.sk_req;
        struct sec_qp_ctx *qp_ctx = req->qp_ctx;
+       struct skcipher_request *backlog_sk_req;
+       struct sec_req *backlog_req;
 
-       atomic_dec(&qp_ctx->pending_reqs);
        sec_free_req_id(req);
 
        /* IV output at encrypto of CBC mode */
        if (!err && ctx->c_ctx.c_mode == SEC_CMODE_CBC && req->c_req.encrypt)
                sec_update_iv(req, SEC_SKCIPHER);
 
-       if (req->fake_busy)
-               sk_req->base.complete(&sk_req->base, -EINPROGRESS);
+       while (1) {
+               backlog_req = sec_back_req_clear(ctx, qp_ctx);
+               if (!backlog_req)
+                       break;
+
+               backlog_sk_req = backlog_req->c_req.sk_req;
+               backlog_sk_req->base.complete(&backlog_sk_req->base,
+                                               -EINPROGRESS);
+               atomic64_inc(&ctx->sec->debug.dfx.recv_busy_cnt);
+       }
+
 
        sk_req->base.complete(&sk_req->base, err);
 }
@@ -1133,10 +1171,10 @@ static void sec_aead_callback(struct sec_ctx *c, struct sec_req *req, int err)
        struct sec_cipher_req *c_req = &req->c_req;
        size_t authsize = crypto_aead_authsize(tfm);
        struct sec_qp_ctx *qp_ctx = req->qp_ctx;
+       struct aead_request *backlog_aead_req;
+       struct sec_req *backlog_req;
        size_t sz;
 
-       atomic_dec(&qp_ctx->pending_reqs);
-
        if (!err && c->c_ctx.c_mode == SEC_CMODE_CBC && c_req->encrypt)
                sec_update_iv(req, SEC_AEAD);
 
@@ -1157,17 +1195,22 @@ static void sec_aead_callback(struct sec_ctx *c, struct sec_req *req, int err)
 
        sec_free_req_id(req);
 
-       if (req->fake_busy)
-               a_req->base.complete(&a_req->base, -EINPROGRESS);
+       while (1) {
+               backlog_req = sec_back_req_clear(c, qp_ctx);
+               if (!backlog_req)
+                       break;
+
+               backlog_aead_req = backlog_req->aead_req.aead_req;
+               backlog_aead_req->base.complete(&backlog_aead_req->base,
+                                               -EINPROGRESS);
+               atomic64_inc(&c->sec->debug.dfx.recv_busy_cnt);
+       }
 
        a_req->base.complete(&a_req->base, err);
 }
 
 static void sec_request_uninit(struct sec_ctx *ctx, struct sec_req *req)
 {
-       struct sec_qp_ctx *qp_ctx = req->qp_ctx;
-
-       atomic_dec(&qp_ctx->pending_reqs);
        sec_free_req_id(req);
        sec_free_queue_id(ctx, req);
 }
@@ -1187,11 +1230,6 @@ static int sec_request_init(struct sec_ctx *ctx, struct sec_req *req)
                return req->req_id;
        }
 
-       if (ctx->fake_req_limit <= atomic_inc_return(&qp_ctx->pending_reqs))
-               req->fake_busy = true;
-       else
-               req->fake_busy = false;
-
        return 0;
 }
 
@@ -1213,7 +1251,8 @@ static int sec_process(struct sec_ctx *ctx, struct sec_req *req)
                sec_update_iv(req, ctx->alg_type);
 
        ret = ctx->req_op->bd_send(ctx, req);
-       if (unlikely(ret != -EBUSY && ret != -EINPROGRESS)) {
+       if (unlikely((ret != -EBUSY && ret != -EINPROGRESS) ||
+               (ret == -EBUSY && !(req->flag & CRYPTO_TFM_REQ_MAY_BACKLOG)))) {
                dev_err_ratelimited(SEC_CTX_DEV(ctx), "send sec request failed!\n");
                goto err_send_req;
        }
@@ -1407,6 +1446,7 @@ static int sec_skcipher_crypto(struct skcipher_request *sk_req, bool encrypt)
        if (!sk_req->cryptlen)
                return 0;
 
+       req->flag = sk_req->base.flags;
        req->c_req.sk_req = sk_req;
        req->c_req.encrypt = encrypt;
        req->ctx = ctx;
@@ -1530,6 +1570,7 @@ static int sec_aead_crypto(struct aead_request *a_req, bool encrypt)
        struct sec_ctx *ctx = crypto_aead_ctx(tfm);
        int ret;
 
+       req->flag = a_req->base.flags;
        req->aead_req.aead_req = a_req;
        req->c_req.encrypt = encrypt;
        req->ctx = ctx;
index d5f0589a1b0d2df010a1291f2274071fe07a6f34..109e7400f8980448ef4b4079f3b71f22d2541f7e 100644 (file)
@@ -122,6 +122,7 @@ static struct sec_dfx_item sec_dfx_labels[] = {
        {"send_cnt", offsetof(struct sec_dfx, send_cnt)},
        {"recv_cnt", offsetof(struct sec_dfx, recv_cnt)},
        {"send_busy_cnt", offsetof(struct sec_dfx, send_busy_cnt)},
+       {"recv_busy_cnt", offsetof(struct sec_dfx, recv_busy_cnt)},
        {"err_bd_cnt", offsetof(struct sec_dfx, err_bd_cnt)},
        {"invalid_req_cnt", offsetof(struct sec_dfx, invalid_req_cnt)},
        {"done_flag_cnt", offsetof(struct sec_dfx, done_flag_cnt)},