req = qp_ctx->req_list[le16_to_cpu(bd->type2.tag)];
if (unlikely(!req)) {
atomic64_inc(&dfx->invalid_req_cnt);
+ atomic_inc(&qp->qp_status.used);
return;
}
req->err_type = bd->type2.error_type;
struct sec_qp_ctx *qp_ctx = req->qp_ctx;
int ret;
+ if (ctx->fake_req_limit <=
+ atomic_read(&qp_ctx->qp->qp_status.used) &&
+ !(req->flag & CRYPTO_TFM_REQ_MAY_BACKLOG))
+ return -EBUSY;
+
mutex_lock(&qp_ctx->req_lock);
ret = hisi_qp_send(qp_ctx->qp, &req->sec_sqe);
+
+ if (ctx->fake_req_limit <=
+ atomic_read(&qp_ctx->qp->qp_status.used) && !ret) {
+ list_add_tail(&req->backlog_head, &qp_ctx->backlog);
+ atomic64_inc(&ctx->sec->debug.dfx.send_cnt);
+ atomic64_inc(&ctx->sec->debug.dfx.send_busy_cnt);
+ mutex_unlock(&qp_ctx->req_lock);
+ return -EBUSY;
+ }
mutex_unlock(&qp_ctx->req_lock);
- atomic64_inc(&ctx->sec->debug.dfx.send_cnt);
if (unlikely(ret == -EBUSY))
return -ENOBUFS;
- if (!ret) {
- if (req->fake_busy) {
- atomic64_inc(&ctx->sec->debug.dfx.send_busy_cnt);
- ret = -EBUSY;
- } else {
- ret = -EINPROGRESS;
- }
+ if (likely(!ret)) {
+ ret = -EINPROGRESS;
+ atomic64_inc(&ctx->sec->debug.dfx.send_cnt);
}
return ret;
qp_ctx->ctx = ctx;
mutex_init(&qp_ctx->req_lock);
- atomic_set(&qp_ctx->pending_reqs, 0);
idr_init(&qp_ctx->req_idr);
+ INIT_LIST_HEAD(&qp_ctx->backlog);
qp_ctx->c_in_pool = hisi_acc_create_sgl_pool(dev, QM_Q_DEPTH,
SEC_SGL_SGE_NR);
dev_err(SEC_CTX_DEV(req->ctx), "copy output iv error!\n");
}
+static struct sec_req *sec_back_req_clear(struct sec_ctx *ctx,
+ struct sec_qp_ctx *qp_ctx)
+{
+ struct sec_req *backlog_req = NULL;
+
+ mutex_lock(&qp_ctx->req_lock);
+ if (ctx->fake_req_limit >=
+ atomic_read(&qp_ctx->qp->qp_status.used) &&
+ !list_empty(&qp_ctx->backlog)) {
+ backlog_req = list_first_entry(&qp_ctx->backlog,
+ typeof(*backlog_req), backlog_head);
+ list_del(&backlog_req->backlog_head);
+ }
+ mutex_unlock(&qp_ctx->req_lock);
+
+ return backlog_req;
+}
+
static void sec_skcipher_callback(struct sec_ctx *ctx, struct sec_req *req,
int err)
{
struct skcipher_request *sk_req = req->c_req.sk_req;
struct sec_qp_ctx *qp_ctx = req->qp_ctx;
+ struct skcipher_request *backlog_sk_req;
+ struct sec_req *backlog_req;
- atomic_dec(&qp_ctx->pending_reqs);
sec_free_req_id(req);
/* IV output at encrypto of CBC mode */
if (!err && ctx->c_ctx.c_mode == SEC_CMODE_CBC && req->c_req.encrypt)
sec_update_iv(req, SEC_SKCIPHER);
- if (req->fake_busy)
- sk_req->base.complete(&sk_req->base, -EINPROGRESS);
+ while (1) {
+ backlog_req = sec_back_req_clear(ctx, qp_ctx);
+ if (!backlog_req)
+ break;
+
+ backlog_sk_req = backlog_req->c_req.sk_req;
+ backlog_sk_req->base.complete(&backlog_sk_req->base,
+ -EINPROGRESS);
+ atomic64_inc(&ctx->sec->debug.dfx.recv_busy_cnt);
+ }
+
sk_req->base.complete(&sk_req->base, err);
}
struct sec_cipher_req *c_req = &req->c_req;
size_t authsize = crypto_aead_authsize(tfm);
struct sec_qp_ctx *qp_ctx = req->qp_ctx;
+ struct aead_request *backlog_aead_req;
+ struct sec_req *backlog_req;
size_t sz;
- atomic_dec(&qp_ctx->pending_reqs);
-
if (!err && c->c_ctx.c_mode == SEC_CMODE_CBC && c_req->encrypt)
sec_update_iv(req, SEC_AEAD);
sec_free_req_id(req);
- if (req->fake_busy)
- a_req->base.complete(&a_req->base, -EINPROGRESS);
+ while (1) {
+ backlog_req = sec_back_req_clear(c, qp_ctx);
+ if (!backlog_req)
+ break;
+
+ backlog_aead_req = backlog_req->aead_req.aead_req;
+ backlog_aead_req->base.complete(&backlog_aead_req->base,
+ -EINPROGRESS);
+ atomic64_inc(&c->sec->debug.dfx.recv_busy_cnt);
+ }
a_req->base.complete(&a_req->base, err);
}
static void sec_request_uninit(struct sec_ctx *ctx, struct sec_req *req)
{
- struct sec_qp_ctx *qp_ctx = req->qp_ctx;
-
- atomic_dec(&qp_ctx->pending_reqs);
sec_free_req_id(req);
sec_free_queue_id(ctx, req);
}
return req->req_id;
}
- if (ctx->fake_req_limit <= atomic_inc_return(&qp_ctx->pending_reqs))
- req->fake_busy = true;
- else
- req->fake_busy = false;
-
return 0;
}
sec_update_iv(req, ctx->alg_type);
ret = ctx->req_op->bd_send(ctx, req);
- if (unlikely(ret != -EBUSY && ret != -EINPROGRESS)) {
+ if (unlikely((ret != -EBUSY && ret != -EINPROGRESS) ||
+ (ret == -EBUSY && !(req->flag & CRYPTO_TFM_REQ_MAY_BACKLOG)))) {
dev_err_ratelimited(SEC_CTX_DEV(ctx), "send sec request failed!\n");
goto err_send_req;
}
if (!sk_req->cryptlen)
return 0;
+ req->flag = sk_req->base.flags;
req->c_req.sk_req = sk_req;
req->c_req.encrypt = encrypt;
req->ctx = ctx;
struct sec_ctx *ctx = crypto_aead_ctx(tfm);
int ret;
+ req->flag = a_req->base.flags;
req->aead_req.aead_req = a_req;
req->c_req.encrypt = encrypt;
req->ctx = ctx;