static int
tls_rx_rec_wait(struct sock *sk, struct sk_psock *psock, bool nonblock,
- long timeo)
+ bool released)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
DEFINE_WAIT_FUNC(wait, woken_wake_function);
+ long timeo;
+
+ timeo = sock_rcvtimeo(sk, nonblock);
- while (!ctx->recv_pkt) {
+ while (!tls_strp_msg_ready(ctx)) {
if (!sk_psock_queue_empty(psock))
return 0;
return sock_error(sk);
if (!skb_queue_empty(&sk->sk_receive_queue)) {
- __strp_unpause(&ctx->strp);
- if (ctx->recv_pkt)
+ tls_strp_check_rcv(&ctx->strp);
+ if (tls_strp_msg_ready(ctx))
break;
}
if (sock_flag(sk, SOCK_DONE))
return 0;
- if (nonblock || !timeo)
+ if (!timeo)
return -EAGAIN;
+ released = true;
add_wait_queue(sk_sleep(sk), &wait);
sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
sk_wait_event(sk, &timeo,
- ctx->recv_pkt || !sk_psock_queue_empty(psock),
+ tls_strp_msg_ready(ctx) ||
+ !sk_psock_queue_empty(psock),
&wait);
sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
remove_wait_queue(sk_sleep(sk), &wait);
return sock_intr_errno(timeo);
}
+ tls_strp_msg_load(&ctx->strp, released);
+
return 1;
}
return rc;
}
+static struct sk_buff *
+tls_alloc_clrtxt_skb(struct sock *sk, struct sk_buff *skb,
+ unsigned int full_len)
+{
+ struct strp_msg *clr_rxm;
+ struct sk_buff *clr_skb;
+ int err;
+
+ clr_skb = alloc_skb_with_frags(0, full_len, TLS_PAGE_ORDER,
+ &err, sk->sk_allocation);
+ if (!clr_skb)
+ return NULL;
+
+ skb_copy_header(clr_skb, skb);
+ clr_skb->len = full_len;
+ clr_skb->data_len = full_len;
+
+ clr_rxm = strp_msg(clr_skb);
+ clr_rxm->offset = 0;
+
+ return clr_skb;
+}
+
/* Decrypt handlers
*
- * tls_decrypt_sg() and tls_decrypt_device() are decrypt handlers.
+ * tls_decrypt_sw() and tls_decrypt_device() are decrypt handlers.
* They must transform the darg in/out argument are as follows:
* | Input | Output
* -------------------------------------------------------------------
* zc | Zero-copy decrypt allowed | Zero-copy performed
* async | Async decrypt allowed | Async crypto used / in progress
* skb | * | Output skb
+ *
+ * If ZC decryption was performed darg.skb will point to the input skb.
*/
/* This function decrypts the input skb into either out_iov or in out_sg
struct tls_prot_info *prot = &tls_ctx->prot_info;
int n_sgin, n_sgout, aead_size, err, pages = 0;
struct sk_buff *skb = tls_strp_msg(ctx);
- struct strp_msg *rxm = strp_msg(skb);
- struct tls_msg *tlm = tls_msg(skb);
+ const struct strp_msg *rxm = strp_msg(skb);
+ const struct tls_msg *tlm = tls_msg(skb);
struct aead_request *aead_req;
- struct sk_buff *unused;
struct scatterlist *sgin = NULL;
struct scatterlist *sgout = NULL;
const int data_len = rxm->full_len - prot->overhead_size;
int tail_pages = !!prot->tail_size;
struct tls_decrypt_ctx *dctx;
+ struct sk_buff *clear_skb;
int iv_offset = 0;
u8 *mem;
+ n_sgin = skb_nsg(skb, rxm->offset + prot->prepend_size,
+ rxm->full_len - prot->prepend_size);
+ if (n_sgin < 1)
+ return n_sgin ?: -EBADMSG;
+
if (darg->zc && (out_iov || out_sg)) {
+ clear_skb = NULL;
+
if (out_iov)
n_sgout = 1 + tail_pages +
iov_iter_npages_cap(out_iov, INT_MAX, data_len);
else
n_sgout = sg_nents(out_sg);
- n_sgin = skb_nsg(skb, rxm->offset + prot->prepend_size,
- rxm->full_len - prot->prepend_size);
} else {
- n_sgout = 0;
darg->zc = false;
- n_sgin = skb_cow_data(skb, 0, &unused);
- }
- if (n_sgin < 1)
- return -EBADMSG;
+ clear_skb = tls_alloc_clrtxt_skb(sk, skb, rxm->full_len);
+ if (!clear_skb)
+ return -ENOMEM;
+
+ n_sgout = 1 + skb_shinfo(clear_skb)->nr_frags;
+ }
/* Increment to accommodate AAD */
n_sgin = n_sgin + 1;
aead_size = sizeof(*aead_req) + crypto_aead_reqsize(ctx->aead_recv);
mem = kmalloc(aead_size + struct_size(dctx, sg, n_sgin + n_sgout),
sk->sk_allocation);
- if (!mem)
- return -ENOMEM;
+ if (!mem) {
+ err = -ENOMEM;
+ goto exit_free_skb;
+ }
/* Segment the allocated memory */
aead_req = (struct aead_request *)mem;
if (err < 0)
goto exit_free;
- if (n_sgout) {
- if (out_iov) {
- sg_init_table(sgout, n_sgout);
- sg_set_buf(&sgout[0], dctx->aad, prot->aad_size);
+ if (clear_skb) {
+ sg_init_table(sgout, n_sgout);
+ sg_set_buf(&sgout[0], dctx->aad, prot->aad_size);
- err = tls_setup_from_iter(out_iov, data_len,
- &pages, &sgout[1],
- (n_sgout - 1 - tail_pages));
- if (err < 0)
- goto fallback_to_reg_recv;
+ err = skb_to_sgvec(clear_skb, &sgout[1], prot->prepend_size,
+ data_len + prot->tail_size);
+ if (err < 0)
+ goto exit_free;
+ } else if (out_iov) {
+ sg_init_table(sgout, n_sgout);
+ sg_set_buf(&sgout[0], dctx->aad, prot->aad_size);
- if (prot->tail_size) {
- sg_unmark_end(&sgout[pages]);
- sg_set_buf(&sgout[pages + 1], &dctx->tail,
- prot->tail_size);
- sg_mark_end(&sgout[pages + 1]);
- }
- } else if (out_sg) {
- memcpy(sgout, out_sg, n_sgout * sizeof(*sgout));
- } else {
- goto fallback_to_reg_recv;
+ err = tls_setup_from_iter(out_iov, data_len, &pages, &sgout[1],
+ (n_sgout - 1 - tail_pages));
+ if (err < 0)
+ goto exit_free_pages;
+
+ if (prot->tail_size) {
+ sg_unmark_end(&sgout[pages]);
+ sg_set_buf(&sgout[pages + 1], &dctx->tail,
+ prot->tail_size);
+ sg_mark_end(&sgout[pages + 1]);
}
- } else {
-fallback_to_reg_recv:
- sgout = sgin;
- pages = 0;
- darg->zc = false;
+ } else if (out_sg) {
+ memcpy(sgout, out_sg, n_sgout * sizeof(*sgout));
}
/* Prepare and submit AEAD request */
if (err)
goto exit_free_pages;
- darg->skb = tls_strp_msg(ctx);
+ darg->skb = clear_skb ?: tls_strp_msg(ctx);
+ clear_skb = NULL;
if (unlikely(darg->async)) {
- err = tls_strp_msg_hold(sk, skb, &ctx->async_hold);
+ err = tls_strp_msg_hold(&ctx->strp, &ctx->async_hold);
if (err)
__skb_queue_tail(&ctx->async_hold, darg->skb);
return err;
put_page(sg_page(&sgout[pages]));
exit_free:
kfree(mem);
+exit_free_skb:
+ consume_skb(clear_skb);
return err;
}
static int
-tls_decrypt_device(struct sock *sk, struct tls_context *tls_ctx,
- struct tls_decrypt_arg *darg)
+tls_decrypt_sw(struct sock *sk, struct tls_context *tls_ctx,
+ struct msghdr *msg, struct tls_decrypt_arg *darg)
{
- struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
- int err;
-
- if (tls_ctx->rx_conf != TLS_HW)
- return 0;
-
- err = tls_device_decrypted(sk, tls_ctx);
- if (err <= 0)
- return err;
-
- darg->zc = false;
- darg->async = false;
- darg->skb = tls_strp_msg(ctx);
- ctx->recv_pkt = NULL;
- return 1;
-}
-
-static int tls_rx_one_record(struct sock *sk, struct iov_iter *dest,
- struct tls_decrypt_arg *darg)
-{
- struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
struct tls_prot_info *prot = &tls_ctx->prot_info;
struct strp_msg *rxm;
int pad, err;
- err = tls_decrypt_device(sk, tls_ctx, darg);
- if (err < 0)
- return err;
- if (err)
- goto decrypt_done;
-
- err = tls_decrypt_sg(sk, dest, NULL, darg);
+ err = tls_decrypt_sg(sk, &msg->msg_iter, NULL, darg);
if (err < 0) {
if (err == -EBADMSG)
TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSDECRYPTERROR);
return err;
}
- if (darg->async)
- goto decrypt_done;
+ /* keep going even for ->async, the code below is TLS 1.3 */
+
/* If opportunistic TLS 1.3 ZC failed retry without ZC */
if (unlikely(darg->zc && prot->version == TLS_1_3_VERSION &&
darg->tail != TLS_RECORD_TYPE_DATA)) {
if (!darg->tail)
TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSRXNOPADVIOL);
TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSDECRYPTRETRY);
- return tls_rx_one_record(sk, dest, darg);
+ return tls_decrypt_sw(sk, tls_ctx, msg, darg);
}
-decrypt_done:
- if (darg->skb == ctx->recv_pkt)
- ctx->recv_pkt = NULL;
-
pad = tls_padding_length(prot, darg->skb, darg);
if (pad < 0) {
- consume_skb(darg->skb);
+ if (darg->skb != tls_strp_msg(ctx))
+ consume_skb(darg->skb);
return pad;
}
rxm = strp_msg(darg->skb);
rxm->full_len -= pad;
+
+ return 0;
+}
+
+static int
+tls_decrypt_device(struct sock *sk, struct msghdr *msg,
+ struct tls_context *tls_ctx, struct tls_decrypt_arg *darg)
+{
+ struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
+ struct tls_prot_info *prot = &tls_ctx->prot_info;
+ struct strp_msg *rxm;
+ int pad, err;
+
+ if (tls_ctx->rx_conf != TLS_HW)
+ return 0;
+
+ err = tls_device_decrypted(sk, tls_ctx);
+ if (err <= 0)
+ return err;
+
+ pad = tls_padding_length(prot, tls_strp_msg(ctx), darg);
+ if (pad < 0)
+ return pad;
+
+ darg->async = false;
+ darg->skb = tls_strp_msg(ctx);
+ /* ->zc downgrade check, in case TLS 1.3 gets here */
+ darg->zc &= !(prot->version == TLS_1_3_VERSION &&
+ tls_msg(darg->skb)->control != TLS_RECORD_TYPE_DATA);
+
+ rxm = strp_msg(darg->skb);
+ rxm->full_len -= pad;
+
+ if (!darg->zc) {
+ /* Non-ZC case needs a real skb */
+ darg->skb = tls_strp_msg_detach(ctx);
+ if (!darg->skb)
+ return -ENOMEM;
+ } else {
+ unsigned int off, len;
+
+ /* In ZC case nobody cares about the output skb.
+ * Just copy the data here. Note the skb is not fully trimmed.
+ */
+ off = rxm->offset + prot->prepend_size;
+ len = rxm->full_len - prot->overhead_size;
+
+ err = skb_copy_datagram_msg(darg->skb, off, msg, len);
+ if (err)
+ return err;
+ }
+ return 1;
+}
+
+static int tls_rx_one_record(struct sock *sk, struct msghdr *msg,
+ struct tls_decrypt_arg *darg)
+{
+ struct tls_context *tls_ctx = tls_get_ctx(sk);
+ struct tls_prot_info *prot = &tls_ctx->prot_info;
+ struct strp_msg *rxm;
+ int err;
+
+ err = tls_decrypt_device(sk, msg, tls_ctx, darg);
+ if (!err)
+ err = tls_decrypt_sw(sk, tls_ctx, msg, darg);
+ if (err < 0)
+ return err;
+
+ rxm = strp_msg(darg->skb);
rxm->offset += prot->prepend_size;
rxm->full_len -= prot->overhead_size;
tls_advance_record_sn(sk, prot, &tls_ctx->rx);
static void tls_rx_rec_done(struct tls_sw_context_rx *ctx)
{
- consume_skb(ctx->recv_pkt);
- ctx->recv_pkt = NULL;
- __strp_unpause(&ctx->strp);
+ tls_strp_msg_done(&ctx->strp);
}
/* This function traverses the rx_list in tls receive context to copies the
u8 *control,
size_t skip,
size_t len,
- bool zc,
bool is_peek)
{
struct sk_buff *skb = skb_peek(&ctx->rx_list);
if (err <= 0)
goto out;
- if (!zc || (rxm->full_len - skip) > len) {
- err = skb_copy_datagram_msg(skb, rxm->offset + skip,
- msg, chunk);
- if (err < 0)
- goto out;
- }
+ err = skb_copy_datagram_msg(skb, rxm->offset + skip,
+ msg, chunk);
+ if (err < 0)
+ goto out;
len = len - chunk;
copied = copied + chunk;
return copied ? : err;
}
-static void
+static bool
tls_read_flush_backlog(struct sock *sk, struct tls_prot_info *prot,
size_t len_left, size_t decrypted, ssize_t done,
size_t *flushed_at)
size_t max_rec;
if (len_left <= decrypted)
- return;
+ return false;
max_rec = prot->overhead_size - prot->tail_size + TLS_MAX_PAYLOAD_SIZE;
if (done - *flushed_at < SZ_128K && tcp_inq(sk) > max_rec)
- return;
+ return false;
*flushed_at = done;
- sk_flush_backlog(sk);
+ return sk_flush_backlog(sk);
}
-static long tls_rx_reader_lock(struct sock *sk, struct tls_sw_context_rx *ctx,
- bool nonblock)
+static int tls_rx_reader_lock(struct sock *sk, struct tls_sw_context_rx *ctx,
+ bool nonblock)
{
long timeo;
+ int err;
lock_sock(sk);
!READ_ONCE(ctx->reader_present), &wait);
remove_wait_queue(&ctx->wq, &wait);
- if (!timeo)
- return -EAGAIN;
- if (signal_pending(current))
- return sock_intr_errno(timeo);
+ if (timeo <= 0) {
+ err = -EAGAIN;
+ goto err_unlock;
+ }
+ if (signal_pending(current)) {
+ err = sock_intr_errno(timeo);
+ goto err_unlock;
+ }
}
WRITE_ONCE(ctx->reader_present, 1);
- return timeo;
+ return 0;
+
+err_unlock:
+ release_sock(sk);
+ return err;
}
static void tls_rx_reader_unlock(struct sock *sk, struct tls_sw_context_rx *ctx)
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
struct tls_prot_info *prot = &tls_ctx->prot_info;
+ ssize_t decrypted = 0, async_copy_bytes = 0;
struct sk_psock *psock;
unsigned char control = 0;
- ssize_t decrypted = 0;
size_t flushed_at = 0;
struct strp_msg *rxm;
struct tls_msg *tlm;
- struct sk_buff *skb;
ssize_t copied = 0;
bool async = false;
- int target, err = 0;
- long timeo;
+ int target, err;
bool is_kvec = iov_iter_is_kvec(&msg->msg_iter);
bool is_peek = flags & MSG_PEEK;
+ bool released = true;
bool bpf_strp_enabled;
bool zc_capable;
return sock_recv_errqueue(sk, msg, len, SOL_IP, IP_RECVERR);
psock = sk_psock_get(sk);
- timeo = tls_rx_reader_lock(sk, ctx, flags & MSG_DONTWAIT);
- if (timeo < 0)
- return timeo;
+ err = tls_rx_reader_lock(sk, ctx, flags & MSG_DONTWAIT);
+ if (err < 0)
+ return err;
bpf_strp_enabled = sk_psock_strp_enabled(psock);
/* If crypto failed the connection is broken */
goto end;
/* Process pending decrypted records. It must be non-zero-copy */
- err = process_rx_list(ctx, msg, &control, 0, len, false, is_peek);
+ err = process_rx_list(ctx, msg, &control, 0, len, is_peek);
if (err < 0)
goto end;
zc_capable = !bpf_strp_enabled && !is_kvec && !is_peek &&
ctx->zc_capable;
decrypted = 0;
- while (len && (decrypted + copied < target || ctx->recv_pkt)) {
+ while (len && (decrypted + copied < target || tls_strp_msg_ready(ctx))) {
struct tls_decrypt_arg darg;
int to_decrypt, chunk;
- err = tls_rx_rec_wait(sk, psock, flags & MSG_DONTWAIT, timeo);
+ err = tls_rx_rec_wait(sk, psock, flags & MSG_DONTWAIT,
+ released);
if (err <= 0) {
if (psock) {
chunk = sk_msg_recvmsg(sk, psock, msg, len,
memset(&darg.inargs, 0, sizeof(darg.inargs));
- rxm = strp_msg(ctx->recv_pkt);
- tlm = tls_msg(ctx->recv_pkt);
+ rxm = strp_msg(tls_strp_msg(ctx));
+ tlm = tls_msg(tls_strp_msg(ctx));
to_decrypt = rxm->full_len - prot->overhead_size;
else
darg.async = false;
- err = tls_rx_one_record(sk, &msg->msg_iter, &darg);
+ err = tls_rx_one_record(sk, msg, &darg);
if (err < 0) {
tls_err_abort(sk, -EBADMSG);
goto recv_end;
}
- skb = darg.skb;
- rxm = strp_msg(skb);
- tlm = tls_msg(skb);
-
async |= darg.async;
/* If the type of records being processed is not known yet,
* is known just after record is dequeued from stream parser.
* For tls1.3, we disable async.
*/
- err = tls_record_content_type(msg, tlm, &control);
+ err = tls_record_content_type(msg, tls_msg(darg.skb), &control);
if (err <= 0) {
+ DEBUG_NET_WARN_ON_ONCE(darg.zc);
tls_rx_rec_done(ctx);
put_on_rx_list_err:
- __skb_queue_tail(&ctx->rx_list, skb);
+ __skb_queue_tail(&ctx->rx_list, darg.skb);
goto recv_end;
}
/* periodically flush backlog, and feed strparser */
- tls_read_flush_backlog(sk, prot, len, to_decrypt,
- decrypted + copied, &flushed_at);
+ released = tls_read_flush_backlog(sk, prot, len, to_decrypt,
+ decrypted + copied,
+ &flushed_at);
/* TLS 1.3 may have updated the length by more than overhead */
+ rxm = strp_msg(darg.skb);
chunk = rxm->full_len;
tls_rx_rec_done(ctx);
- if (async) {
- /* TLS 1.2-only, to_decrypt must be text length */
- chunk = min_t(int, to_decrypt, len);
-put_on_rx_list:
- decrypted += chunk;
- len -= chunk;
- __skb_queue_tail(&ctx->rx_list, skb);
- continue;
- }
-
if (!darg.zc) {
bool partially_consumed = chunk > len;
+ struct sk_buff *skb = darg.skb;
+
+ DEBUG_NET_WARN_ON_ONCE(darg.skb == ctx->strp.anchor);
+
+ if (async) {
+ /* TLS 1.2-only, to_decrypt must be text len */
+ chunk = min_t(int, to_decrypt, len);
+ async_copy_bytes += chunk;
+put_on_rx_list:
+ decrypted += chunk;
+ len -= chunk;
+ __skb_queue_tail(&ctx->rx_list, skb);
+ continue;
+ }
if (bpf_strp_enabled) {
+ released = true;
err = sk_psock_tls_strp_read(psock, skb);
if (err != __SK_PASS) {
rxm->offset = rxm->offset + rxm->full_len;
rxm->full_len -= chunk;
goto put_on_rx_list;
}
+
+ consume_skb(skb);
}
decrypted += chunk;
len -= chunk;
- consume_skb(skb);
-
/* Return full control message to userspace before trying
* to parse another message type
*/
/* Drain records from the rx_list & copy if required */
if (is_peek || is_kvec)
err = process_rx_list(ctx, msg, &control, copied,
- decrypted, false, is_peek);
+ decrypted, is_peek);
else
err = process_rx_list(ctx, msg, &control, 0,
- decrypted, true, is_peek);
+ async_copy_bytes, is_peek);
decrypted = max(err, 0);
}
struct tls_msg *tlm;
struct sk_buff *skb;
ssize_t copied = 0;
- int err = 0;
- long timeo;
int chunk;
+ int err;
- timeo = tls_rx_reader_lock(sk, ctx, flags & SPLICE_F_NONBLOCK);
- if (timeo < 0)
- return timeo;
+ err = tls_rx_reader_lock(sk, ctx, flags & SPLICE_F_NONBLOCK);
+ if (err < 0)
+ return err;
if (!skb_queue_empty(&ctx->rx_list)) {
skb = __skb_dequeue(&ctx->rx_list);
struct tls_decrypt_arg darg;
err = tls_rx_rec_wait(sk, NULL, flags & SPLICE_F_NONBLOCK,
- timeo);
+ true);
if (err <= 0)
goto splice_read_end;
ingress_empty = list_empty(&psock->ingress_msg);
rcu_read_unlock();
- return !ingress_empty || ctx->recv_pkt ||
+ return !ingress_empty || tls_strp_msg_ready(ctx) ||
!skb_queue_empty(&ctx->rx_list);
}
-static int tls_read_size(struct strparser *strp, struct sk_buff *skb)
+int tls_rx_msg_size(struct tls_strparser *strp, struct sk_buff *skb)
{
struct tls_context *tls_ctx = tls_get_ctx(strp->sk);
struct tls_prot_info *prot = &tls_ctx->prot_info;
char header[TLS_HEADER_SIZE + MAX_IV_SIZE];
- struct strp_msg *rxm = strp_msg(skb);
- struct tls_msg *tlm = tls_msg(skb);
size_t cipher_overhead;
size_t data_len = 0;
int ret;
/* Verify that we have a full TLS header, or wait for more data */
- if (rxm->offset + prot->prepend_size > skb->len)
+ if (strp->stm.offset + prot->prepend_size > skb->len)
return 0;
/* Sanity-check size of on-stack buffer. */
}
/* Linearize header to local buffer */
- ret = skb_copy_bits(skb, rxm->offset, header, prot->prepend_size);
+ ret = skb_copy_bits(skb, strp->stm.offset, header, prot->prepend_size);
if (ret < 0)
goto read_failure;
- tlm->control = header[0];
+ strp->mark = header[0];
data_len = ((header[4] & 0xFF) | (header[3] << 8));
}
tls_device_rx_resync_new_rec(strp->sk, data_len + TLS_HEADER_SIZE,
- TCP_SKB_CB(skb)->seq + rxm->offset);
+ TCP_SKB_CB(skb)->seq + strp->stm.offset);
return data_len + TLS_HEADER_SIZE;
read_failure:
return ret;
}
-static void tls_queue(struct strparser *strp, struct sk_buff *skb)
+void tls_rx_msg_ready(struct tls_strparser *strp)
{
- struct tls_context *tls_ctx = tls_get_ctx(strp->sk);
- struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
-
- ctx->recv_pkt = skb;
- strp_pause(strp);
+ struct tls_sw_context_rx *ctx;
+ ctx = container_of(strp, struct tls_sw_context_rx, strp);
ctx->saved_data_ready(strp->sk);
}
struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
struct sk_psock *psock;
- strp_data_ready(&ctx->strp);
+ tls_strp_data_ready(&ctx->strp);
psock = sk_psock_get(sk);
if (psock) {
kfree(tls_ctx->rx.iv);
if (ctx->aead_recv) {
- kfree_skb(ctx->recv_pkt);
- ctx->recv_pkt = NULL;
__skb_queue_purge(&ctx->rx_list);
crypto_free_aead(ctx->aead_recv);
- strp_stop(&ctx->strp);
+ tls_strp_stop(&ctx->strp);
/* If tls_sw_strparser_arm() was not called (cleanup paths)
- * we still want to strp_stop(), but sk->sk_data_ready was
+ * we still want to tls_strp_stop(), but sk->sk_data_ready was
* never swapped.
*/
if (ctx->saved_data_ready) {
{
struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
- strp_done(&ctx->strp);
+ tls_strp_done(&ctx->strp);
}
void tls_sw_free_ctx_rx(struct tls_context *tls_ctx)
rx_ctx->saved_data_ready = sk->sk_data_ready;
sk->sk_data_ready = tls_data_ready;
write_unlock_bh(&sk->sk_callback_lock);
-
- strp_check_rcv(&rx_ctx->strp);
}
void tls_update_rx_zc_capable(struct tls_context *tls_ctx)
struct tls_sw_context_rx *sw_ctx_rx = NULL;
struct cipher_context *cctx;
struct crypto_aead **aead;
- struct strp_callbacks cb;
u16 nonce_size, tag_size, iv_size, rec_seq_size, salt_size;
struct crypto_tfm *tfm;
char *iv, *rec_seq, *key, *salt, *cipher_name;
crypto_info->version != TLS_1_3_VERSION &&
!!(tfm->__crt_alg->cra_flags & CRYPTO_ALG_ASYNC);
- /* Set up strparser */
- memset(&cb, 0, sizeof(cb));
- cb.rcv_msg = tls_queue;
- cb.parse_msg = tls_read_size;
-
- strp_init(&sw_ctx_rx->strp, sk, &cb);
+ tls_strp_init(&sw_ctx_rx->strp, sk);
}
goto out;