Merge branch 'asoc-4.18' into asoc-linus
[linux-2.6-microblaze.git] / drivers / crypto / chelsio / chtls / chtls_main.c
1 /*
2  * Copyright (c) 2018 Chelsio Communications, Inc.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 as
6  * published by the Free Software Foundation.
7  *
8  * Written by: Atul Gupta (atul.gupta@chelsio.com)
9  */
10 #include <linux/kernel.h>
11 #include <linux/module.h>
12 #include <linux/skbuff.h>
13 #include <linux/socket.h>
14 #include <linux/hash.h>
15 #include <linux/in.h>
16 #include <linux/net.h>
17 #include <linux/ip.h>
18 #include <linux/tcp.h>
19 #include <net/tcp.h>
20 #include <net/tls.h>
21
22 #include "chtls.h"
23 #include "chtls_cm.h"
24
25 #define DRV_NAME "chtls"
26
27 /*
28  * chtls device management
29  * maintains a list of the chtls devices
30  */
31 static LIST_HEAD(cdev_list);
32 static DEFINE_MUTEX(cdev_mutex);
33 static DEFINE_MUTEX(cdev_list_lock);
34
35 static DEFINE_MUTEX(notify_mutex);
36 static RAW_NOTIFIER_HEAD(listen_notify_list);
37 static struct proto chtls_cpl_prot;
38 struct request_sock_ops chtls_rsk_ops;
39 static uint send_page_order = (14 - PAGE_SHIFT < 0) ? 0 : 14 - PAGE_SHIFT;
40
41 static void register_listen_notifier(struct notifier_block *nb)
42 {
43         mutex_lock(&notify_mutex);
44         raw_notifier_chain_register(&listen_notify_list, nb);
45         mutex_unlock(&notify_mutex);
46 }
47
48 static void unregister_listen_notifier(struct notifier_block *nb)
49 {
50         mutex_lock(&notify_mutex);
51         raw_notifier_chain_unregister(&listen_notify_list, nb);
52         mutex_unlock(&notify_mutex);
53 }
54
55 static int listen_notify_handler(struct notifier_block *this,
56                                  unsigned long event, void *data)
57 {
58         struct chtls_dev *cdev;
59         struct sock *sk;
60         int ret;
61
62         sk = data;
63         ret =  NOTIFY_DONE;
64
65         switch (event) {
66         case CHTLS_LISTEN_START:
67         case CHTLS_LISTEN_STOP:
68                 mutex_lock(&cdev_list_lock);
69                 list_for_each_entry(cdev, &cdev_list, list) {
70                         if (event == CHTLS_LISTEN_START)
71                                 ret = chtls_listen_start(cdev, sk);
72                         else
73                                 chtls_listen_stop(cdev, sk);
74                 }
75                 mutex_unlock(&cdev_list_lock);
76                 break;
77         }
78         return ret;
79 }
80
81 static struct notifier_block listen_notifier = {
82         .notifier_call = listen_notify_handler
83 };
84
85 static int listen_backlog_rcv(struct sock *sk, struct sk_buff *skb)
86 {
87         if (likely(skb_transport_header(skb) != skb_network_header(skb)))
88                 return tcp_v4_do_rcv(sk, skb);
89         BLOG_SKB_CB(skb)->backlog_rcv(sk, skb);
90         return 0;
91 }
92
93 static int chtls_start_listen(struct sock *sk)
94 {
95         int err;
96
97         if (sk->sk_protocol != IPPROTO_TCP)
98                 return -EPROTONOSUPPORT;
99
100         if (sk->sk_family == PF_INET &&
101             LOOPBACK(inet_sk(sk)->inet_rcv_saddr))
102                 return -EADDRNOTAVAIL;
103
104         sk->sk_backlog_rcv = listen_backlog_rcv;
105         mutex_lock(&notify_mutex);
106         err = raw_notifier_call_chain(&listen_notify_list,
107                                       CHTLS_LISTEN_START, sk);
108         mutex_unlock(&notify_mutex);
109         return err;
110 }
111
112 static void chtls_stop_listen(struct sock *sk)
113 {
114         if (sk->sk_protocol != IPPROTO_TCP)
115                 return;
116
117         mutex_lock(&notify_mutex);
118         raw_notifier_call_chain(&listen_notify_list,
119                                 CHTLS_LISTEN_STOP, sk);
120         mutex_unlock(&notify_mutex);
121 }
122
123 static int chtls_inline_feature(struct tls_device *dev)
124 {
125         struct net_device *netdev;
126         struct chtls_dev *cdev;
127         int i;
128
129         cdev = to_chtls_dev(dev);
130
131         for (i = 0; i < cdev->lldi->nports; i++) {
132                 netdev = cdev->ports[i];
133                 if (netdev->features & NETIF_F_HW_TLS_RECORD)
134                         return 1;
135         }
136         return 0;
137 }
138
139 static int chtls_create_hash(struct tls_device *dev, struct sock *sk)
140 {
141         if (sk->sk_state == TCP_LISTEN)
142                 return chtls_start_listen(sk);
143         return 0;
144 }
145
146 static void chtls_destroy_hash(struct tls_device *dev, struct sock *sk)
147 {
148         if (sk->sk_state == TCP_LISTEN)
149                 chtls_stop_listen(sk);
150 }
151
152 static void chtls_register_dev(struct chtls_dev *cdev)
153 {
154         struct tls_device *tlsdev = &cdev->tlsdev;
155
156         strlcpy(tlsdev->name, "chtls", TLS_DEVICE_NAME_MAX);
157         strlcat(tlsdev->name, cdev->lldi->ports[0]->name,
158                 TLS_DEVICE_NAME_MAX);
159         tlsdev->feature = chtls_inline_feature;
160         tlsdev->hash = chtls_create_hash;
161         tlsdev->unhash = chtls_destroy_hash;
162         tls_register_device(&cdev->tlsdev);
163 }
164
165 static void chtls_unregister_dev(struct chtls_dev *cdev)
166 {
167         tls_unregister_device(&cdev->tlsdev);
168 }
169
170 static void process_deferq(struct work_struct *task_param)
171 {
172         struct chtls_dev *cdev = container_of(task_param,
173                                 struct chtls_dev, deferq_task);
174         struct sk_buff *skb;
175
176         spin_lock_bh(&cdev->deferq.lock);
177         while ((skb = __skb_dequeue(&cdev->deferq)) != NULL) {
178                 spin_unlock_bh(&cdev->deferq.lock);
179                 DEFERRED_SKB_CB(skb)->handler(cdev, skb);
180                 spin_lock_bh(&cdev->deferq.lock);
181         }
182         spin_unlock_bh(&cdev->deferq.lock);
183 }
184
185 static int chtls_get_skb(struct chtls_dev *cdev)
186 {
187         cdev->askb = alloc_skb(sizeof(struct tcphdr), GFP_KERNEL);
188         if (!cdev->askb)
189                 return -ENOMEM;
190
191         skb_put(cdev->askb, sizeof(struct tcphdr));
192         skb_reset_transport_header(cdev->askb);
193         memset(cdev->askb->data, 0, cdev->askb->len);
194         return 0;
195 }
196
197 static void *chtls_uld_add(const struct cxgb4_lld_info *info)
198 {
199         struct cxgb4_lld_info *lldi;
200         struct chtls_dev *cdev;
201         int i, j;
202
203         cdev = kzalloc(sizeof(*cdev) + info->nports *
204                       (sizeof(struct net_device *)), GFP_KERNEL);
205         if (!cdev)
206                 goto out;
207
208         lldi = kzalloc(sizeof(*lldi), GFP_KERNEL);
209         if (!lldi)
210                 goto out_lldi;
211
212         if (chtls_get_skb(cdev))
213                 goto out_skb;
214
215         *lldi = *info;
216         cdev->lldi = lldi;
217         cdev->pdev = lldi->pdev;
218         cdev->tids = lldi->tids;
219         cdev->ports = lldi->ports;
220         cdev->mtus = lldi->mtus;
221         cdev->tids = lldi->tids;
222         cdev->pfvf = FW_VIID_PFN_G(cxgb4_port_viid(lldi->ports[0]))
223                         << FW_VIID_PFN_S;
224
225         for (i = 0; i < (1 << RSPQ_HASH_BITS); i++) {
226                 unsigned int size = 64 - sizeof(struct rsp_ctrl) - 8;
227
228                 cdev->rspq_skb_cache[i] = __alloc_skb(size,
229                                                       gfp_any(), 0,
230                                                       lldi->nodeid);
231                 if (unlikely(!cdev->rspq_skb_cache[i]))
232                         goto out_rspq_skb;
233         }
234
235         idr_init(&cdev->hwtid_idr);
236         INIT_WORK(&cdev->deferq_task, process_deferq);
237         spin_lock_init(&cdev->listen_lock);
238         spin_lock_init(&cdev->idr_lock);
239         cdev->send_page_order = min_t(uint, get_order(32768),
240                                       send_page_order);
241         cdev->max_host_sndbuf = 48 * 1024;
242
243         if (lldi->vr->key.size)
244                 if (chtls_init_kmap(cdev, lldi))
245                         goto out_rspq_skb;
246
247         mutex_lock(&cdev_mutex);
248         list_add_tail(&cdev->list, &cdev_list);
249         mutex_unlock(&cdev_mutex);
250
251         return cdev;
252 out_rspq_skb:
253         for (j = 0; j < i; j++)
254                 kfree_skb(cdev->rspq_skb_cache[j]);
255         kfree_skb(cdev->askb);
256 out_skb:
257         kfree(lldi);
258 out_lldi:
259         kfree(cdev);
260 out:
261         return NULL;
262 }
263
264 static void chtls_free_uld(struct chtls_dev *cdev)
265 {
266         int i;
267
268         chtls_unregister_dev(cdev);
269         kvfree(cdev->kmap.addr);
270         idr_destroy(&cdev->hwtid_idr);
271         for (i = 0; i < (1 << RSPQ_HASH_BITS); i++)
272                 kfree_skb(cdev->rspq_skb_cache[i]);
273         kfree(cdev->lldi);
274         if (cdev->askb)
275                 kfree_skb(cdev->askb);
276         kfree(cdev);
277 }
278
279 static void chtls_free_all_uld(void)
280 {
281         struct chtls_dev *cdev, *tmp;
282
283         mutex_lock(&cdev_mutex);
284         list_for_each_entry_safe(cdev, tmp, &cdev_list, list)
285                 chtls_free_uld(cdev);
286         mutex_unlock(&cdev_mutex);
287 }
288
289 static int chtls_uld_state_change(void *handle, enum cxgb4_state new_state)
290 {
291         struct chtls_dev *cdev = handle;
292
293         switch (new_state) {
294         case CXGB4_STATE_UP:
295                 chtls_register_dev(cdev);
296                 break;
297         case CXGB4_STATE_DOWN:
298                 break;
299         case CXGB4_STATE_START_RECOVERY:
300                 break;
301         case CXGB4_STATE_DETACH:
302                 mutex_lock(&cdev_mutex);
303                 list_del(&cdev->list);
304                 mutex_unlock(&cdev_mutex);
305                 chtls_free_uld(cdev);
306                 break;
307         default:
308                 break;
309         }
310         return 0;
311 }
312
313 static struct sk_buff *copy_gl_to_skb_pkt(const struct pkt_gl *gl,
314                                           const __be64 *rsp,
315                                           u32 pktshift)
316 {
317         struct sk_buff *skb;
318
319         /* Allocate space for cpl_pass_accpet_req which will be synthesized by
320          * driver. Once driver synthesizes cpl_pass_accpet_req the skb will go
321          * through the regular cpl_pass_accept_req processing in TOM.
322          */
323         skb = alloc_skb(gl->tot_len + sizeof(struct cpl_pass_accept_req)
324                         - pktshift, GFP_ATOMIC);
325         if (unlikely(!skb))
326                 return NULL;
327         __skb_put(skb, gl->tot_len + sizeof(struct cpl_pass_accept_req)
328                    - pktshift);
329         /* For now we will copy  cpl_rx_pkt in the skb */
330         skb_copy_to_linear_data(skb, rsp, sizeof(struct cpl_rx_pkt));
331         skb_copy_to_linear_data_offset(skb, sizeof(struct cpl_pass_accept_req)
332                                        , gl->va + pktshift,
333                                        gl->tot_len - pktshift);
334
335         return skb;
336 }
337
338 static int chtls_recv_packet(struct chtls_dev *cdev,
339                              const struct pkt_gl *gl, const __be64 *rsp)
340 {
341         unsigned int opcode = *(u8 *)rsp;
342         struct sk_buff *skb;
343         int ret;
344
345         skb = copy_gl_to_skb_pkt(gl, rsp, cdev->lldi->sge_pktshift);
346         if (!skb)
347                 return -ENOMEM;
348
349         ret = chtls_handlers[opcode](cdev, skb);
350         if (ret & CPL_RET_BUF_DONE)
351                 kfree_skb(skb);
352
353         return 0;
354 }
355
356 static int chtls_recv_rsp(struct chtls_dev *cdev, const __be64 *rsp)
357 {
358         unsigned long rspq_bin;
359         unsigned int opcode;
360         struct sk_buff *skb;
361         unsigned int len;
362         int ret;
363
364         len = 64 - sizeof(struct rsp_ctrl) - 8;
365         opcode = *(u8 *)rsp;
366
367         rspq_bin = hash_ptr((void *)rsp, RSPQ_HASH_BITS);
368         skb = cdev->rspq_skb_cache[rspq_bin];
369         if (skb && !skb_is_nonlinear(skb) &&
370             !skb_shared(skb) && !skb_cloned(skb)) {
371                 refcount_inc(&skb->users);
372                 if (refcount_read(&skb->users) == 2) {
373                         __skb_trim(skb, 0);
374                         if (skb_tailroom(skb) >= len)
375                                 goto copy_out;
376                 }
377                 refcount_dec(&skb->users);
378         }
379         skb = alloc_skb(len, GFP_ATOMIC);
380         if (unlikely(!skb))
381                 return -ENOMEM;
382
383 copy_out:
384         __skb_put(skb, len);
385         skb_copy_to_linear_data(skb, rsp, len);
386         skb_reset_network_header(skb);
387         skb_reset_transport_header(skb);
388         ret = chtls_handlers[opcode](cdev, skb);
389
390         if (ret & CPL_RET_BUF_DONE)
391                 kfree_skb(skb);
392         return 0;
393 }
394
395 static void chtls_recv(struct chtls_dev *cdev,
396                        struct sk_buff **skbs, const __be64 *rsp)
397 {
398         struct sk_buff *skb = *skbs;
399         unsigned int opcode;
400         int ret;
401
402         opcode = *(u8 *)rsp;
403
404         __skb_push(skb, sizeof(struct rss_header));
405         skb_copy_to_linear_data(skb, rsp, sizeof(struct rss_header));
406
407         ret = chtls_handlers[opcode](cdev, skb);
408         if (ret & CPL_RET_BUF_DONE)
409                 kfree_skb(skb);
410 }
411
412 static int chtls_uld_rx_handler(void *handle, const __be64 *rsp,
413                                 const struct pkt_gl *gl)
414 {
415         struct chtls_dev *cdev = handle;
416         unsigned int opcode;
417         struct sk_buff *skb;
418
419         opcode = *(u8 *)rsp;
420
421         if (unlikely(opcode == CPL_RX_PKT)) {
422                 if (chtls_recv_packet(cdev, gl, rsp) < 0)
423                         goto nomem;
424                 return 0;
425         }
426
427         if (!gl)
428                 return chtls_recv_rsp(cdev, rsp);
429
430 #define RX_PULL_LEN 128
431         skb = cxgb4_pktgl_to_skb(gl, RX_PULL_LEN, RX_PULL_LEN);
432         if (unlikely(!skb))
433                 goto nomem;
434         chtls_recv(cdev, &skb, rsp);
435         return 0;
436
437 nomem:
438         return -ENOMEM;
439 }
440
441 static int do_chtls_getsockopt(struct sock *sk, char __user *optval,
442                                int __user *optlen)
443 {
444         struct tls_crypto_info crypto_info = { 0 };
445
446         crypto_info.version = TLS_1_2_VERSION;
447         if (copy_to_user(optval, &crypto_info, sizeof(struct tls_crypto_info)))
448                 return -EFAULT;
449         return 0;
450 }
451
452 static int chtls_getsockopt(struct sock *sk, int level, int optname,
453                             char __user *optval, int __user *optlen)
454 {
455         struct tls_context *ctx = tls_get_ctx(sk);
456
457         if (level != SOL_TLS)
458                 return ctx->getsockopt(sk, level, optname, optval, optlen);
459
460         return do_chtls_getsockopt(sk, optval, optlen);
461 }
462
463 static int do_chtls_setsockopt(struct sock *sk, int optname,
464                                char __user *optval, unsigned int optlen)
465 {
466         struct tls_crypto_info *crypto_info, tmp_crypto_info;
467         struct chtls_sock *csk;
468         int keylen;
469         int rc = 0;
470
471         csk = rcu_dereference_sk_user_data(sk);
472
473         if (!optval || optlen < sizeof(*crypto_info)) {
474                 rc = -EINVAL;
475                 goto out;
476         }
477
478         rc = copy_from_user(&tmp_crypto_info, optval, sizeof(*crypto_info));
479         if (rc) {
480                 rc = -EFAULT;
481                 goto out;
482         }
483
484         /* check version */
485         if (tmp_crypto_info.version != TLS_1_2_VERSION) {
486                 rc = -ENOTSUPP;
487                 goto out;
488         }
489
490         crypto_info = (struct tls_crypto_info *)&csk->tlshws.crypto_info;
491
492         switch (tmp_crypto_info.cipher_type) {
493         case TLS_CIPHER_AES_GCM_128: {
494                 /* Obtain version and type from previous copy */
495                 crypto_info[0] = tmp_crypto_info;
496                 /* Now copy the following data */
497                 rc = copy_from_user((char *)crypto_info + sizeof(*crypto_info),
498                                 optval + sizeof(*crypto_info),
499                                 sizeof(struct tls12_crypto_info_aes_gcm_128)
500                                 - sizeof(*crypto_info));
501
502                 if (rc) {
503                         rc = -EFAULT;
504                         goto out;
505                 }
506
507                 keylen = TLS_CIPHER_AES_GCM_128_KEY_SIZE;
508                 rc = chtls_setkey(csk, keylen, optname);
509                 break;
510         }
511         default:
512                 rc = -EINVAL;
513                 goto out;
514         }
515 out:
516         return rc;
517 }
518
519 static int chtls_setsockopt(struct sock *sk, int level, int optname,
520                             char __user *optval, unsigned int optlen)
521 {
522         struct tls_context *ctx = tls_get_ctx(sk);
523
524         if (level != SOL_TLS)
525                 return ctx->setsockopt(sk, level, optname, optval, optlen);
526
527         return do_chtls_setsockopt(sk, optname, optval, optlen);
528 }
529
530 static struct cxgb4_uld_info chtls_uld_info = {
531         .name = DRV_NAME,
532         .nrxq = MAX_ULD_QSETS,
533         .ntxq = MAX_ULD_QSETS,
534         .rxq_size = 1024,
535         .add = chtls_uld_add,
536         .state_change = chtls_uld_state_change,
537         .rx_handler = chtls_uld_rx_handler,
538 };
539
540 void chtls_install_cpl_ops(struct sock *sk)
541 {
542         sk->sk_prot = &chtls_cpl_prot;
543 }
544
545 static void __init chtls_init_ulp_ops(void)
546 {
547         chtls_cpl_prot                  = tcp_prot;
548         chtls_init_rsk_ops(&chtls_cpl_prot, &chtls_rsk_ops,
549                            &tcp_prot, PF_INET);
550         chtls_cpl_prot.close            = chtls_close;
551         chtls_cpl_prot.disconnect       = chtls_disconnect;
552         chtls_cpl_prot.destroy          = chtls_destroy_sock;
553         chtls_cpl_prot.shutdown         = chtls_shutdown;
554         chtls_cpl_prot.sendmsg          = chtls_sendmsg;
555         chtls_cpl_prot.sendpage         = chtls_sendpage;
556         chtls_cpl_prot.recvmsg          = chtls_recvmsg;
557         chtls_cpl_prot.setsockopt       = chtls_setsockopt;
558         chtls_cpl_prot.getsockopt       = chtls_getsockopt;
559 }
560
561 static int __init chtls_register(void)
562 {
563         chtls_init_ulp_ops();
564         register_listen_notifier(&listen_notifier);
565         cxgb4_register_uld(CXGB4_ULD_TLS, &chtls_uld_info);
566         return 0;
567 }
568
569 static void __exit chtls_unregister(void)
570 {
571         unregister_listen_notifier(&listen_notifier);
572         chtls_free_all_uld();
573         cxgb4_unregister_uld(CXGB4_ULD_TLS);
574 }
575
576 module_init(chtls_register);
577 module_exit(chtls_unregister);
578
579 MODULE_DESCRIPTION("Chelsio TLS Inline driver");
580 MODULE_LICENSE("GPL");
581 MODULE_AUTHOR("Chelsio Communications");
582 MODULE_VERSION(DRV_VERSION);