Async crypto currently benefits from the fact that we decrypt
in place. When we allow input and output to be different skbs
we will have to hang onto the input while we move to the next
record. Clone the inputs and keep them on a list.
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
atomic_t decrypt_pending;
/* protect crypto_wait with decrypt_pending*/
spinlock_t decrypt_compl_lock;
atomic_t decrypt_pending;
/* protect crypto_wait with decrypt_pending*/
spinlock_t decrypt_compl_lock;
+ struct sk_buff_head async_hold;
struct wait_queue_head wq;
};
struct wait_queue_head wq;
};
obj-$(CONFIG_TLS) += tls.o
obj-$(CONFIG_TLS) += tls.o
-tls-y := tls_main.o tls_sw.o tls_proc.o trace.o
+tls-y := tls_main.o tls_sw.o tls_proc.o trace.o tls_strp.o
tls-$(CONFIG_TLS_TOE) += tls_toe.o
tls-$(CONFIG_TLS_DEVICE) += tls_device.o tls_device_fallback.o
tls-$(CONFIG_TLS_TOE) += tls_toe.o
tls-$(CONFIG_TLS_DEVICE) += tls_device.o tls_device_fallback.o
struct tls_offload_context_tx *offload_ctx,
struct tls_crypto_info *crypto_info);
struct tls_offload_context_tx *offload_ctx,
struct tls_crypto_info *crypto_info);
+int tls_strp_msg_hold(struct sock *sk, struct sk_buff *skb,
+ struct sk_buff_head *dst);
+
static inline struct tls_msg *tls_msg(struct sk_buff *skb)
{
struct sk_skb_cb *scb = (struct sk_skb_cb *)skb->cb;
static inline struct tls_msg *tls_msg(struct sk_buff *skb)
{
struct sk_skb_cb *scb = (struct sk_skb_cb *)skb->cb;
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/skbuff.h>
+
+#include "tls.h"
+
+int tls_strp_msg_hold(struct sock *sk, struct sk_buff *skb,
+ struct sk_buff_head *dst)
+{
+ struct sk_buff *clone;
+
+ clone = skb_clone(skb, sk->sk_allocation);
+ if (!clone)
+ return -ENOMEM;
+ __skb_queue_tail(dst, clone);
+ return 0;
+}
goto exit_free_pages;
darg->skb = tls_strp_msg(ctx);
goto exit_free_pages;
darg->skb = tls_strp_msg(ctx);
- if (darg->async)
- return 0;
+
+ if (unlikely(darg->async)) {
+ err = tls_strp_msg_hold(sk, skb, &ctx->async_hold);
+ if (err)
+ __skb_queue_tail(&ctx->async_hold, darg->skb);
+ return err;
+ }
if (prot->tail_size)
darg->tail = dctx->tail;
if (prot->tail_size)
darg->tail = dctx->tail;
reinit_completion(&ctx->async_wait.completion);
pending = atomic_read(&ctx->decrypt_pending);
spin_unlock_bh(&ctx->decrypt_compl_lock);
reinit_completion(&ctx->async_wait.completion);
pending = atomic_read(&ctx->decrypt_pending);
spin_unlock_bh(&ctx->decrypt_compl_lock);
+ ret = 0;
+ if (pending)
ret = crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
ret = crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
- if (ret) {
- if (err >= 0 || err == -EINPROGRESS)
- err = ret;
- decrypted = 0;
- goto end;
- }
+ __skb_queue_purge(&ctx->async_hold);
+
+ if (ret) {
+ if (err >= 0 || err == -EINPROGRESS)
+ err = ret;
+ decrypted = 0;
+ goto end;
}
/* Drain records from the rx_list & copy if required */
}
/* Drain records from the rx_list & copy if required */
crypto_info = &ctx->crypto_recv.info;
cctx = &ctx->rx;
skb_queue_head_init(&sw_ctx_rx->rx_list);
crypto_info = &ctx->crypto_recv.info;
cctx = &ctx->rx;
skb_queue_head_init(&sw_ctx_rx->rx_list);
+ skb_queue_head_init(&sw_ctx_rx->async_hold);
aead = &sw_ctx_rx->aead_recv;
}
aead = &sw_ctx_rx->aead_recv;
}