sctp: pass a kernel pointer to sctp_setsockopt_pf_expose
[linux-2.6-microblaze.git] / net / tls / tls_device.c
index 0e55f83..18fa606 100644 (file)
@@ -690,15 +690,55 @@ static void tls_device_resync_rx(struct tls_context *tls_ctx,
        TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSRXDEVICERESYNC);
 }
 
+static bool
+tls_device_rx_resync_async(struct tls_offload_resync_async *resync_async,
+                          s64 resync_req, u32 *seq)
+{
+       u32 is_async = resync_req & RESYNC_REQ_ASYNC;
+       u32 req_seq = resync_req >> 32;
+       u32 req_end = req_seq + ((resync_req >> 16) & 0xffff);
+
+       if (is_async) {
+               /* asynchronous stage: log all headers seq such that
+                * req_seq <= seq <= end_seq, and wait for real resync request
+                */
+               if (between(*seq, req_seq, req_end) &&
+                   resync_async->loglen < TLS_DEVICE_RESYNC_ASYNC_LOGMAX)
+                       resync_async->log[resync_async->loglen++] = *seq;
+
+               return false;
+       }
+
+       /* synchronous stage: check against the logged entries and
+        * proceed to check the next entries if no match was found
+        */
+       while (resync_async->loglen) {
+               if (req_seq == resync_async->log[resync_async->loglen - 1] &&
+                   atomic64_try_cmpxchg(&resync_async->req,
+                                        &resync_req, 0)) {
+                       resync_async->loglen = 0;
+                       *seq = req_seq;
+                       return true;
+               }
+               resync_async->loglen--;
+       }
+
+       if (req_seq == *seq &&
+           atomic64_try_cmpxchg(&resync_async->req,
+                                &resync_req, 0))
+               return true;
+
+       return false;
+}
+
 void tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq)
 {
        struct tls_context *tls_ctx = tls_get_ctx(sk);
        struct tls_offload_context_rx *rx_ctx;
-       bool is_req_pending, is_force_resync;
        u8 rcd_sn[TLS_MAX_REC_SEQ_SIZE];
+       u32 sock_data, is_req_pending;
        struct tls_prot_info *prot;
        s64 resync_req;
-       u32 sock_data;
        u32 req_seq;
 
        if (tls_ctx->rx_conf != TLS_HW)
@@ -713,11 +753,9 @@ void tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq)
                resync_req = atomic64_read(&rx_ctx->resync_req);
                req_seq = resync_req >> 32;
                seq += TLS_HEADER_SIZE - 1;
-               is_req_pending = resync_req & RESYNC_REQ;
-               is_force_resync = resync_req & RESYNC_REQ_FORCE;
+               is_req_pending = resync_req;
 
-               if (likely(!is_req_pending) ||
-                   (!is_force_resync && req_seq != seq) ||
+               if (likely(!is_req_pending) || req_seq != seq ||
                    !atomic64_try_cmpxchg(&rx_ctx->resync_req, &resync_req, 0))
                        return;
                break;
@@ -739,6 +777,16 @@ void tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq)
                seq += rcd_len;
                tls_bigint_increment(rcd_sn, prot->rec_seq_size);
                break;
+       case TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ_ASYNC:
+               resync_req = atomic64_read(&rx_ctx->resync_async->req);
+               is_req_pending = resync_req;
+               if (likely(!is_req_pending))
+                       return;
+
+               if (!tls_device_rx_resync_async(rx_ctx->resync_async,
+                                               resync_req, &seq))
+                       return;
+               break;
        }
 
        tls_device_resync_rx(tls_ctx, sk, seq, rcd_sn);