tls: rx: only copy IV from the packet for TLS 1.2
authorJakub Kicinski <kuba@kernel.org>
Mon, 11 Apr 2022 19:19:17 +0000 (12:19 -0700)
committerDavid S. Miller <davem@davemloft.net>
Wed, 13 Apr 2022 10:45:39 +0000 (11:45 +0100)
TLS 1.3 and ChaChaPoly don't carry IV in the packet.
The code before this change would copy out iv_size
worth of whatever followed the TLS header in the packet
and then for TLS 1.3 | ChaCha overwrite that with
the sequence number. Waste of cycles especially
with TLS 1.2 being close to dead and TLS 1.3 being
the common case.

Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
net/tls/tls_sw.c

index 465d902f5bb9b368cd946df1912d9af631787254..ddbe05ec5489dd352dee832e038884339f338b43 100644 (file)
@@ -1482,20 +1482,20 @@ static int decrypt_internal(struct sock *sk, struct sk_buff *skb,
        }
 
        /* Prepare IV */
-       err = skb_copy_bits(skb, rxm->offset + TLS_HEADER_SIZE,
-                           iv + iv_offset + prot->salt_size,
-                           prot->iv_size);
-       if (err < 0) {
-               kfree(mem);
-               return err;
-       }
        if (prot->version == TLS_1_3_VERSION ||
-           prot->cipher_type == TLS_CIPHER_CHACHA20_POLY1305)
+           prot->cipher_type == TLS_CIPHER_CHACHA20_POLY1305) {
                memcpy(iv + iv_offset, tls_ctx->rx.iv,
                       prot->iv_size + prot->salt_size);
-       else
+       } else {
+               err = skb_copy_bits(skb, rxm->offset + TLS_HEADER_SIZE,
+                                   iv + iv_offset + prot->salt_size,
+                                   prot->iv_size);
+               if (err < 0) {
+                       kfree(mem);
+                       return err;
+               }
                memcpy(iv + iv_offset, tls_ctx->rx.iv, prot->salt_size);
-
+       }
        xor_iv_with_seq(prot, iv + iv_offset, tls_ctx->rx.rec_seq);
 
        /* Prepare AAD */