From: John Fastabend Date: Fri, 19 Jul 2019 17:29:16 +0000 (-0700) Subject: net/tls: remove close callback sock unlock/lock around TX work flush X-Git-Url: http://git.maquefel.me/?a=commitdiff_plain;h=f87e62d45e51b12d48d2cb46b5cde8f83b866bc4;p=linux.git net/tls: remove close callback sock unlock/lock around TX work flush The tls close() callback currently drops the sock lock, makes a cancel_delayed_work_sync() call, and then relocks the sock. By restructuring the code we can avoid droping lock and then reclaiming it. To simplify this we do the following, tls_sk_proto_close set_bit(CLOSING) set_bit(SCHEDULE) cancel_delay_work_sync() <- cancel workqueue lock_sock(sk) ... release_sock(sk) strp_done() Setting the CLOSING bit prevents the SCHEDULE bit from being cleared by any workqueue items e.g. if one happens to be scheduled and run between when we set SCHEDULE bit and cancel work. Then because SCHEDULE bit is set now no new work will be scheduled. Tested with net selftests and bpf selftests. Signed-off-by: John Fastabend Signed-off-by: Jakub Kicinski Reviewed-by: Dirk van der Merwe Signed-off-by: Daniel Borkmann --- diff --git a/include/net/tls.h b/include/net/tls.h index 43f551cd508b3..d4276cb6de535 100644 --- a/include/net/tls.h +++ b/include/net/tls.h @@ -162,6 +162,7 @@ struct tls_sw_context_tx { int async_capable; #define BIT_TX_SCHEDULED 0 +#define BIT_TX_CLOSING 1 unsigned long tx_bitmask; }; @@ -360,6 +361,7 @@ int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size); int tls_sw_sendpage(struct sock *sk, struct page *page, int offset, size_t size, int flags); void tls_sw_close(struct sock *sk, long timeout); +void tls_sw_cancel_work_tx(struct tls_context *tls_ctx); void tls_sw_free_resources_tx(struct sock *sk); void tls_sw_free_resources_rx(struct sock *sk); void tls_sw_release_resources_rx(struct sock *sk); diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c index 7ab682ed99fac..5c29b410cf7d1 100644 --- a/net/tls/tls_main.c +++ b/net/tls/tls_main.c @@ -268,6 +268,9 @@ static void tls_sk_proto_close(struct sock *sk, long timeout) void (*sk_proto_close)(struct sock *sk, long timeout); bool free_ctx = false; + if (ctx->tx_conf == TLS_SW) + tls_sw_cancel_work_tx(ctx); + lock_sock(sk); sk_proto_close = ctx->sk_proto_close; diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c index f58a8ffc2a9c5..38c0e53c727d4 100644 --- a/net/tls/tls_sw.c +++ b/net/tls/tls_sw.c @@ -2054,6 +2054,15 @@ static void tls_data_ready(struct sock *sk) } } +void tls_sw_cancel_work_tx(struct tls_context *tls_ctx) +{ + struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); + + set_bit(BIT_TX_CLOSING, &ctx->tx_bitmask); + set_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask); + cancel_delayed_work_sync(&ctx->tx_work.work); +} + void tls_sw_free_resources_tx(struct sock *sk) { struct tls_context *tls_ctx = tls_get_ctx(sk); @@ -2065,11 +2074,6 @@ void tls_sw_free_resources_tx(struct sock *sk) if (atomic_read(&ctx->encrypt_pending)) crypto_wait_req(-EINPROGRESS, &ctx->async_wait); - release_sock(sk); - cancel_delayed_work_sync(&ctx->tx_work.work); - lock_sock(sk); - - /* Tx whatever records we can transmit and abandon the rest */ tls_tx_records(sk, -1); /* Free up un-sent records in tx_list. First, free @@ -2137,11 +2141,17 @@ static void tx_work_handler(struct work_struct *work) struct tx_work, work); struct sock *sk = tx_work->sk; struct tls_context *tls_ctx = tls_get_ctx(sk); - struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); + struct tls_sw_context_tx *ctx; - if (!test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) + if (unlikely(!tls_ctx)) return; + ctx = tls_sw_ctx_tx(tls_ctx); + if (test_bit(BIT_TX_CLOSING, &ctx->tx_bitmask)) + return; + + if (!test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) + return; lock_sock(sk); tls_tx_records(sk, -1); release_sock(sk);